aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar_ethtool.c')
-rw-r--r--drivers/net/gianfar_ethtool.c376
1 files changed, 326 insertions, 50 deletions
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..1010367695e4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
41#include "gianfar.h" 42#include "gianfar.h"
42 43
43extern void gfar_start(struct net_device *dev); 44extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 45extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 46
46#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
136{ 137{
137 int i; 138 int i;
138 struct gfar_private *priv = netdev_priv(dev); 139 struct gfar_private *priv = netdev_priv(dev);
140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
139 u64 *extra = (u64 *) & priv->extra_stats; 141 u64 *extra = (u64 *) & priv->extra_stats;
140 142
141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 144 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 145 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 146
145 for (i = 0; i < GFAR_RMON_LEN; i++) 147 for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 199{
198 struct gfar_private *priv = netdev_priv(dev); 200 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 201 struct phy_device *phydev = priv->phydev;
202 struct gfar_priv_rx_q *rx_queue = NULL;
203 struct gfar_priv_tx_q *tx_queue = NULL;
200 204
201 if (NULL == phydev) 205 if (NULL == phydev)
202 return -ENODEV; 206 return -ENODEV;
207 tx_queue = priv->tx_queue[0];
208 rx_queue = priv->rx_queue[0];
203 209
204 cmd->maxtxpkt = get_icft_value(priv->txic); 210 /* etsec-1.7 and older versions have only one txic
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 211 * and rxic regs although they support multiple queues */
212 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
213 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 214
207 return phy_ethtool_gset(phydev, cmd); 215 return phy_ethtool_gset(phydev, cmd);
208} 216}
@@ -218,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
218{ 226{
219 int i; 227 int i;
220 struct gfar_private *priv = netdev_priv(dev); 228 struct gfar_private *priv = netdev_priv(dev);
221 u32 __iomem *theregs = (u32 __iomem *) priv->regs; 229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
222 u32 *buf = (u32 *) regbuf; 230 u32 *buf = (u32 *) regbuf;
223 231
224 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 287static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 288{
281 struct gfar_private *priv = netdev_priv(dev); 289 struct gfar_private *priv = netdev_priv(dev);
290 struct gfar_priv_rx_q *rx_queue = NULL;
291 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 292 unsigned long rxtime;
283 unsigned long rxcount; 293 unsigned long rxcount;
284 unsigned long txtime; 294 unsigned long txtime;
@@ -290,10 +300,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 300 if (NULL == priv->phydev)
291 return -ENODEV; 301 return -ENODEV;
292 302
293 rxtime = get_ictt_value(priv->rxic); 303 rx_queue = priv->rx_queue[0];
294 rxcount = get_icft_value(priv->rxic); 304 tx_queue = priv->tx_queue[0];
295 txtime = get_ictt_value(priv->txic); 305
296 txcount = get_icft_value(priv->txic); 306 rxtime = get_ictt_value(rx_queue->rxic);
307 rxcount = get_icft_value(rx_queue->rxic);
308 txtime = get_ictt_value(tx_queue->txic);
309 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 310 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 311 cvals->rx_max_coalesced_frames = rxcount;
299 312
@@ -339,16 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 353{
341 struct gfar_private *priv = netdev_priv(dev); 354 struct gfar_private *priv = netdev_priv(dev);
355 int i = 0;
342 356
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 358 return -EOPNOTSUPP;
345 359
346 /* Set up rx coalescing */ 360 /* Set up rx coalescing */
361 /* As of now, we will enable/disable coalescing for all
362 * queues together in case of eTSEC2, this will be modified
363 * along with the ethtool interface */
347 if ((cvals->rx_coalesce_usecs == 0) || 364 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 365 (cvals->rx_max_coalesced_frames == 0)) {
349 priv->rxcoalescing = 0; 366 for (i = 0; i < priv->num_rx_queues; i++)
350 else 367 priv->rx_queue[i]->rxcoalescing = 0;
351 priv->rxcoalescing = 1; 368 } else {
369 for (i = 0; i < priv->num_rx_queues; i++)
370 priv->rx_queue[i]->rxcoalescing = 1;
371 }
352 372
353 if (NULL == priv->phydev) 373 if (NULL == priv->phydev)
354 return -ENODEV; 374 return -ENODEV;
@@ -366,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 386 return -EINVAL;
367 } 387 }
368 388
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 389 for (i = 0; i < priv->num_rx_queues; i++) {
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 390 priv->rx_queue[i]->rxic = mk_ic_value(
391 cvals->rx_max_coalesced_frames,
392 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
393 }
371 394
372 /* Set up tx coalescing */ 395 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 396 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 397 (cvals->tx_max_coalesced_frames == 0)) {
375 priv->txcoalescing = 0; 398 for (i = 0; i < priv->num_tx_queues; i++)
376 else 399 priv->tx_queue[i]->txcoalescing = 0;
377 priv->txcoalescing = 1; 400 } else {
401 for (i = 0; i < priv->num_tx_queues; i++)
402 priv->tx_queue[i]->txcoalescing = 1;
403 }
378 404
379 /* Check the bounds of the values */ 405 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 406 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 415 return -EINVAL;
390 } 416 }
391 417
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 418 for (i = 0; i < priv->num_tx_queues; i++) {
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 419 priv->tx_queue[i]->txic = mk_ic_value(
394 420 cvals->tx_max_coalesced_frames,
395 gfar_write(&priv->regs->rxic, 0); 421 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
396 if (priv->rxcoalescing) 422 }
397 gfar_write(&priv->regs->rxic, priv->rxic);
398 423
399 gfar_write(&priv->regs->txic, 0); 424 gfar_configure_coalescing(priv, 0xFF, 0xFF);
400 if (priv->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic);
402 425
403 return 0; 426 return 0;
404} 427}
@@ -409,6 +432,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 432static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 433{
411 struct gfar_private *priv = netdev_priv(dev); 434 struct gfar_private *priv = netdev_priv(dev);
435 struct gfar_priv_tx_q *tx_queue = NULL;
436 struct gfar_priv_rx_q *rx_queue = NULL;
437
438 tx_queue = priv->tx_queue[0];
439 rx_queue = priv->rx_queue[0];
412 440
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 441 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 442 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 446 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 447 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 448 */
421 rvals->rx_pending = priv->rx_ring_size; 449 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 450 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 451 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 452 rvals->tx_pending = tx_queue->tx_ring_size;
425} 453}
426 454
427/* Change the current ring parameters, stopping the controller if 455/* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 459static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 460{
433 struct gfar_private *priv = netdev_priv(dev); 461 struct gfar_private *priv = netdev_priv(dev);
434 int err = 0; 462 int err = 0, i = 0;
435 463
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 464 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
437 return -EINVAL; 465 return -EINVAL;
@@ -451,34 +479,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 479 return -EINVAL;
452 } 480 }
453 481
482
454 if (dev->flags & IFF_UP) { 483 if (dev->flags & IFF_UP) {
455 unsigned long flags; 484 unsigned long flags;
456 485
457 /* Halt TX and RX, and process the frames which 486 /* Halt TX and RX, and process the frames which
458 * have already been received */ 487 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 488 local_irq_save(flags);
460 spin_lock(&priv->rxlock); 489 lock_tx_qs(priv);
490 lock_rx_qs(priv);
461 491
462 gfar_halt(dev); 492 gfar_halt(dev);
463 493
464 spin_unlock(&priv->rxlock); 494 unlock_rx_qs(priv);
465 spin_unlock_irqrestore(&priv->txlock, flags); 495 unlock_tx_qs(priv);
496 local_irq_restore(flags);
466 497
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 498 for (i = 0; i < priv->num_rx_queues; i++)
499 gfar_clean_rx_ring(priv->rx_queue[i],
500 priv->rx_queue[i]->rx_ring_size);
468 501
469 /* Now we take down the rings to rebuild them */ 502 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 503 stop_gfar(dev);
471 } 504 }
472 505
473 /* Change the size */ 506 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 507 for (i = 0; i < priv->num_rx_queues; i++) {
475 priv->tx_ring_size = rvals->tx_pending; 508 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 509 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
510 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
511 }
477 512
478 /* Rebuild the rings with the new size */ 513 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 514 if (dev->flags & IFF_UP) {
480 err = startup_gfar(dev); 515 err = startup_gfar(dev);
481 netif_wake_queue(dev); 516 netif_tx_wake_all_queues(dev);
482 } 517 }
483 return err; 518 return err;
484} 519}
@@ -487,23 +522,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 522{
488 struct gfar_private *priv = netdev_priv(dev); 523 struct gfar_private *priv = netdev_priv(dev);
489 unsigned long flags; 524 unsigned long flags;
490 int err = 0; 525 int err = 0, i = 0;
491 526
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 527 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
494 529
530
495 if (dev->flags & IFF_UP) { 531 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 532 /* Halt TX and RX, and process the frames which
497 * have already been received */ 533 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 534 local_irq_save(flags);
499 spin_lock(&priv->rxlock); 535 lock_tx_qs(priv);
536 lock_rx_qs(priv);
500 537
501 gfar_halt(dev); 538 gfar_halt(dev);
502 539
503 spin_unlock(&priv->rxlock); 540 unlock_tx_qs(priv);
504 spin_unlock_irqrestore(&priv->txlock, flags); 541 unlock_rx_qs(priv);
542 local_irq_save(flags);
505 543
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 544 for (i = 0; i < priv->num_rx_queues; i++)
545 gfar_clean_rx_ring(priv->rx_queue[i],
546 priv->rx_queue[i]->rx_ring_size);
507 547
508 /* Now we take down the rings to rebuild them */ 548 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 549 stop_gfar(dev);
@@ -515,7 +555,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
515 555
516 if (dev->flags & IFF_UP) { 556 if (dev->flags & IFF_UP) {
517 err = startup_gfar(dev); 557 err = startup_gfar(dev);
518 netif_wake_queue(dev); 558 netif_tx_wake_all_queues(dev);
519 } 559 }
520 return err; 560 return err;
521} 561}
@@ -605,6 +645,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
605} 645}
606#endif 646#endif
607 647
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
687
688 if (ethflow & RXH_L2DA) {
689 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
690 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
691 ftp_rqfpr[priv->cur_filer_idx] = fpr;
692 ftp_rqfcr[priv->cur_filer_idx] = fcr;
693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
694 priv->cur_filer_idx = priv->cur_filer_idx - 1;
695
696 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
697 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
698 ftp_rqfpr[priv->cur_filer_idx] = fpr;
699 ftp_rqfcr[priv->cur_filer_idx] = fcr;
700 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
701 priv->cur_filer_idx = priv->cur_filer_idx - 1;
702 }
703
704 if (ethflow & RXH_VLAN) {
705 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
706 RQFCR_AND | RQFCR_HASHTBL_0;
707 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
708 ftp_rqfpr[priv->cur_filer_idx] = fpr;
709 ftp_rqfcr[priv->cur_filer_idx] = fcr;
710 priv->cur_filer_idx = priv->cur_filer_idx - 1;
711 }
712
713 if (ethflow & RXH_IP_SRC) {
714 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
715 RQFCR_AND | RQFCR_HASHTBL_0;
716 ftp_rqfpr[priv->cur_filer_idx] = fpr;
717 ftp_rqfcr[priv->cur_filer_idx] = fcr;
718 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
719 priv->cur_filer_idx = priv->cur_filer_idx - 1;
720 }
721
722 if (ethflow & (RXH_IP_DST)) {
723 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
724 RQFCR_AND | RQFCR_HASHTBL_0;
725 ftp_rqfpr[priv->cur_filer_idx] = fpr;
726 ftp_rqfcr[priv->cur_filer_idx] = fcr;
727 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
728 priv->cur_filer_idx = priv->cur_filer_idx - 1;
729 }
730
731 if (ethflow & RXH_L3_PROTO) {
732 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
733 RQFCR_AND | RQFCR_HASHTBL_0;
734 ftp_rqfpr[priv->cur_filer_idx] = fpr;
735 ftp_rqfcr[priv->cur_filer_idx] = fcr;
736 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
737 priv->cur_filer_idx = priv->cur_filer_idx - 1;
738 }
739
740 if (ethflow & RXH_L4_B_0_1) {
741 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
742 RQFCR_AND | RQFCR_HASHTBL_0;
743 ftp_rqfpr[priv->cur_filer_idx] = fpr;
744 ftp_rqfcr[priv->cur_filer_idx] = fcr;
745 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
746 priv->cur_filer_idx = priv->cur_filer_idx - 1;
747 }
748
749 if (ethflow & RXH_L4_B_2_3) {
750 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
751 RQFCR_AND | RQFCR_HASHTBL_0;
752 ftp_rqfpr[priv->cur_filer_idx] = fpr;
753 ftp_rqfcr[priv->cur_filer_idx] = fcr;
754 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
755 priv->cur_filer_idx = priv->cur_filer_idx - 1;
756 }
757}
758
759static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
760{
761 unsigned int last_rule_idx = priv->cur_filer_idx;
762 unsigned int cmp_rqfpr;
763 unsigned int local_rqfpr[MAX_FILER_IDX + 1];
764 unsigned int local_rqfcr[MAX_FILER_IDX + 1];
765 int i = 0x0, k = 0x0;
766 int j = MAX_FILER_IDX, l = 0x0;
767
768 switch (class) {
769 case TCP_V4_FLOW:
770 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
771 break;
772 case UDP_V4_FLOW:
773 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
774 break;
775 case TCP_V6_FLOW:
776 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
777 break;
778 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default:
787 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0;
789 }
790
791 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
792 local_rqfpr[j] = ftp_rqfpr[i];
793 local_rqfcr[j] = ftp_rqfcr[i];
794 j--;
795 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
796 RQFCR_CLE |RQFCR_AND)) &&
797 (ftp_rqfpr[i] == cmp_rqfpr))
798 break;
799 }
800
801 if (i == MAX_FILER_IDX + 1) {
802 printk(KERN_ERR "No parse rule found, ");
803 printk(KERN_ERR "can't create hash rules\n");
804 return 0;
805 }
806
807 /* If a match was found, then it begins the starting of a cluster rule
808 * if it was already programmed, we need to overwrite these rules
809 */
810 for (l = i+1; l < MAX_FILER_IDX; l++) {
811 if ((ftp_rqfcr[l] & RQFCR_CLE) &&
812 !(ftp_rqfcr[l] & RQFCR_AND)) {
813 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
814 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
815 ftp_rqfpr[l] = FPR_FILER_MASK;
816 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
817 break;
818 }
819
820 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
821 continue;
822 else {
823 local_rqfpr[j] = ftp_rqfpr[l];
824 local_rqfcr[j] = ftp_rqfcr[l];
825 j--;
826 }
827 }
828
829 priv->cur_filer_idx = l - 1;
830 last_rule_idx = l;
831
832 /* hash rules */
833 ethflow_to_filer_rules(priv, ethflow);
834
835 /* Write back the popped out rules again */
836 for (k = j+1; k < MAX_FILER_IDX; k++) {
837 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
838 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
839 gfar_write_filer(priv, priv->cur_filer_idx,
840 local_rqfcr[k], local_rqfpr[k]);
841 if (!priv->cur_filer_idx)
842 break;
843 priv->cur_filer_idx = priv->cur_filer_idx - 1;
844 }
845
846 return 1;
847}
848
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1;
863
864 return 0;
865}
866
867static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
868{
869 struct gfar_private *priv = netdev_priv(dev);
870 int ret = 0;
871
872 switch(cmd->cmd) {
873 case ETHTOOL_SRXFH:
874 ret = gfar_set_hash_opts(priv, cmd);
875 break;
876 default:
877 ret = -EINVAL;
878 }
879
880 return ret;
881}
882
608const struct ethtool_ops gfar_ethtool_ops = { 883const struct ethtool_ops gfar_ethtool_ops = {
609 .get_settings = gfar_gsettings, 884 .get_settings = gfar_gsettings,
610 .set_settings = gfar_ssettings, 885 .set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
630 .get_wol = gfar_get_wol, 905 .get_wol = gfar_get_wol,
631 .set_wol = gfar_set_wol, 906 .set_wol = gfar_set_wol,
632#endif 907#endif
908 .set_rxnfc = gfar_set_nfc,
633}; 909};