aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar_ethtool.c')
-rw-r--r--drivers/net/gianfar_ethtool.c377
1 files changed, 326 insertions, 51 deletions
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..9bda023c0235 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -18,7 +19,6 @@
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/string.h> 20#include <linux/string.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
@@ -41,7 +41,7 @@
41#include "gianfar.h" 41#include "gianfar.h"
42 42
43extern void gfar_start(struct net_device *dev); 43extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 44extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 45
46#define GFAR_MAX_COAL_USECS 0xffff 46#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 47#define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +136,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
136{ 136{
137 int i; 137 int i;
138 struct gfar_private *priv = netdev_priv(dev); 138 struct gfar_private *priv = netdev_priv(dev);
139 struct gfar __iomem *regs = priv->gfargrp[0].regs;
139 u64 *extra = (u64 *) & priv->extra_stats; 140 u64 *extra = (u64 *) & priv->extra_stats;
140 141
141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 142 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 143 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 144 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 145
145 for (i = 0; i < GFAR_RMON_LEN; i++) 146 for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +198,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 198{
198 struct gfar_private *priv = netdev_priv(dev); 199 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 200 struct phy_device *phydev = priv->phydev;
201 struct gfar_priv_rx_q *rx_queue = NULL;
202 struct gfar_priv_tx_q *tx_queue = NULL;
200 203
201 if (NULL == phydev) 204 if (NULL == phydev)
202 return -ENODEV; 205 return -ENODEV;
206 tx_queue = priv->tx_queue[0];
207 rx_queue = priv->rx_queue[0];
203 208
204 cmd->maxtxpkt = get_icft_value(priv->txic); 209 /* etsec-1.7 and older versions have only one txic
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 210 * and rxic regs although they support multiple queues */
211 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
212 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 213
207 return phy_ethtool_gset(phydev, cmd); 214 return phy_ethtool_gset(phydev, cmd);
208} 215}
@@ -218,7 +225,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
218{ 225{
219 int i; 226 int i;
220 struct gfar_private *priv = netdev_priv(dev); 227 struct gfar_private *priv = netdev_priv(dev);
221 u32 __iomem *theregs = (u32 __iomem *) priv->regs; 228 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
222 u32 *buf = (u32 *) regbuf; 229 u32 *buf = (u32 *) regbuf;
223 230
224 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 231 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +286,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 286static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 287{
281 struct gfar_private *priv = netdev_priv(dev); 288 struct gfar_private *priv = netdev_priv(dev);
289 struct gfar_priv_rx_q *rx_queue = NULL;
290 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 291 unsigned long rxtime;
283 unsigned long rxcount; 292 unsigned long rxcount;
284 unsigned long txtime; 293 unsigned long txtime;
@@ -290,10 +299,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 299 if (NULL == priv->phydev)
291 return -ENODEV; 300 return -ENODEV;
292 301
293 rxtime = get_ictt_value(priv->rxic); 302 rx_queue = priv->rx_queue[0];
294 rxcount = get_icft_value(priv->rxic); 303 tx_queue = priv->tx_queue[0];
295 txtime = get_ictt_value(priv->txic); 304
296 txcount = get_icft_value(priv->txic); 305 rxtime = get_ictt_value(rx_queue->rxic);
306 rxcount = get_icft_value(rx_queue->rxic);
307 txtime = get_ictt_value(tx_queue->txic);
308 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 309 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 310 cvals->rx_max_coalesced_frames = rxcount;
299 311
@@ -339,16 +351,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 351static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 352{
341 struct gfar_private *priv = netdev_priv(dev); 353 struct gfar_private *priv = netdev_priv(dev);
354 int i = 0;
342 355
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 356 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 357 return -EOPNOTSUPP;
345 358
346 /* Set up rx coalescing */ 359 /* Set up rx coalescing */
360 /* As of now, we will enable/disable coalescing for all
361 * queues together in case of eTSEC2, this will be modified
362 * along with the ethtool interface */
347 if ((cvals->rx_coalesce_usecs == 0) || 363 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 364 (cvals->rx_max_coalesced_frames == 0)) {
349 priv->rxcoalescing = 0; 365 for (i = 0; i < priv->num_rx_queues; i++)
350 else 366 priv->rx_queue[i]->rxcoalescing = 0;
351 priv->rxcoalescing = 1; 367 } else {
368 for (i = 0; i < priv->num_rx_queues; i++)
369 priv->rx_queue[i]->rxcoalescing = 1;
370 }
352 371
353 if (NULL == priv->phydev) 372 if (NULL == priv->phydev)
354 return -ENODEV; 373 return -ENODEV;
@@ -366,15 +385,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 385 return -EINVAL;
367 } 386 }
368 387
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 388 for (i = 0; i < priv->num_rx_queues; i++) {
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 389 priv->rx_queue[i]->rxic = mk_ic_value(
390 cvals->rx_max_coalesced_frames,
391 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
392 }
371 393
372 /* Set up tx coalescing */ 394 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 395 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 396 (cvals->tx_max_coalesced_frames == 0)) {
375 priv->txcoalescing = 0; 397 for (i = 0; i < priv->num_tx_queues; i++)
376 else 398 priv->tx_queue[i]->txcoalescing = 0;
377 priv->txcoalescing = 1; 399 } else {
400 for (i = 0; i < priv->num_tx_queues; i++)
401 priv->tx_queue[i]->txcoalescing = 1;
402 }
378 403
379 /* Check the bounds of the values */ 404 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 405 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +414,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 414 return -EINVAL;
390 } 415 }
391 416
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 417 for (i = 0; i < priv->num_tx_queues; i++) {
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 418 priv->tx_queue[i]->txic = mk_ic_value(
394 419 cvals->tx_max_coalesced_frames,
395 gfar_write(&priv->regs->rxic, 0); 420 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
396 if (priv->rxcoalescing) 421 }
397 gfar_write(&priv->regs->rxic, priv->rxic);
398 422
399 gfar_write(&priv->regs->txic, 0); 423 gfar_configure_coalescing(priv, 0xFF, 0xFF);
400 if (priv->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic);
402 424
403 return 0; 425 return 0;
404} 426}
@@ -409,6 +431,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 431static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 432{
411 struct gfar_private *priv = netdev_priv(dev); 433 struct gfar_private *priv = netdev_priv(dev);
434 struct gfar_priv_tx_q *tx_queue = NULL;
435 struct gfar_priv_rx_q *rx_queue = NULL;
436
437 tx_queue = priv->tx_queue[0];
438 rx_queue = priv->rx_queue[0];
412 439
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 440 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 441 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +445,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 445 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 446 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 447 */
421 rvals->rx_pending = priv->rx_ring_size; 448 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 449 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 450 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 451 rvals->tx_pending = tx_queue->tx_ring_size;
425} 452}
426 453
427/* Change the current ring parameters, stopping the controller if 454/* Change the current ring parameters, stopping the controller if
@@ -431,7 +458,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 458static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 459{
433 struct gfar_private *priv = netdev_priv(dev); 460 struct gfar_private *priv = netdev_priv(dev);
434 int err = 0; 461 int err = 0, i = 0;
435 462
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 463 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
437 return -EINVAL; 464 return -EINVAL;
@@ -451,34 +478,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 478 return -EINVAL;
452 } 479 }
453 480
481
454 if (dev->flags & IFF_UP) { 482 if (dev->flags & IFF_UP) {
455 unsigned long flags; 483 unsigned long flags;
456 484
457 /* Halt TX and RX, and process the frames which 485 /* Halt TX and RX, and process the frames which
458 * have already been received */ 486 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 487 local_irq_save(flags);
460 spin_lock(&priv->rxlock); 488 lock_tx_qs(priv);
489 lock_rx_qs(priv);
461 490
462 gfar_halt(dev); 491 gfar_halt(dev);
463 492
464 spin_unlock(&priv->rxlock); 493 unlock_rx_qs(priv);
465 spin_unlock_irqrestore(&priv->txlock, flags); 494 unlock_tx_qs(priv);
495 local_irq_restore(flags);
466 496
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 497 for (i = 0; i < priv->num_rx_queues; i++)
498 gfar_clean_rx_ring(priv->rx_queue[i],
499 priv->rx_queue[i]->rx_ring_size);
468 500
469 /* Now we take down the rings to rebuild them */ 501 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 502 stop_gfar(dev);
471 } 503 }
472 504
473 /* Change the size */ 505 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 506 for (i = 0; i < priv->num_rx_queues; i++) {
475 priv->tx_ring_size = rvals->tx_pending; 507 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 508 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
509 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
510 }
477 511
478 /* Rebuild the rings with the new size */ 512 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 513 if (dev->flags & IFF_UP) {
480 err = startup_gfar(dev); 514 err = startup_gfar(dev);
481 netif_wake_queue(dev); 515 netif_tx_wake_all_queues(dev);
482 } 516 }
483 return err; 517 return err;
484} 518}
@@ -487,23 +521,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 521{
488 struct gfar_private *priv = netdev_priv(dev); 522 struct gfar_private *priv = netdev_priv(dev);
489 unsigned long flags; 523 unsigned long flags;
490 int err = 0; 524 int err = 0, i = 0;
491 525
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 526 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 527 return -EOPNOTSUPP;
494 528
529
495 if (dev->flags & IFF_UP) { 530 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 531 /* Halt TX and RX, and process the frames which
497 * have already been received */ 532 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 533 local_irq_save(flags);
499 spin_lock(&priv->rxlock); 534 lock_tx_qs(priv);
535 lock_rx_qs(priv);
500 536
501 gfar_halt(dev); 537 gfar_halt(dev);
502 538
503 spin_unlock(&priv->rxlock); 539 unlock_tx_qs(priv);
504 spin_unlock_irqrestore(&priv->txlock, flags); 540 unlock_rx_qs(priv);
541 local_irq_save(flags);
505 542
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 543 for (i = 0; i < priv->num_rx_queues; i++)
544 gfar_clean_rx_ring(priv->rx_queue[i],
545 priv->rx_queue[i]->rx_ring_size);
507 546
508 /* Now we take down the rings to rebuild them */ 547 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 548 stop_gfar(dev);
@@ -515,7 +554,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
515 554
516 if (dev->flags & IFF_UP) { 555 if (dev->flags & IFF_UP) {
517 err = startup_gfar(dev); 556 err = startup_gfar(dev);
518 netif_wake_queue(dev); 557 netif_tx_wake_all_queues(dev);
519 } 558 }
520 return err; 559 return err;
521} 560}
@@ -605,6 +644,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
605} 644}
606#endif 645#endif
607 646
647static int gfar_ethflow_to_class(int flow_type, u64 *class)
648{
649 switch (flow_type) {
650 case TCP_V4_FLOW:
651 *class = CLASS_CODE_TCP_IPV4;
652 break;
653 case UDP_V4_FLOW:
654 *class = CLASS_CODE_UDP_IPV4;
655 break;
656 case AH_V4_FLOW:
657 case ESP_V4_FLOW:
658 *class = CLASS_CODE_AH_ESP_IPV4;
659 break;
660 case SCTP_V4_FLOW:
661 *class = CLASS_CODE_SCTP_IPV4;
662 break;
663 case TCP_V6_FLOW:
664 *class = CLASS_CODE_TCP_IPV6;
665 break;
666 case UDP_V6_FLOW:
667 *class = CLASS_CODE_UDP_IPV6;
668 break;
669 case AH_V6_FLOW:
670 case ESP_V6_FLOW:
671 *class = CLASS_CODE_AH_ESP_IPV6;
672 break;
673 case SCTP_V6_FLOW:
674 *class = CLASS_CODE_SCTP_IPV6;
675 break;
676 default:
677 return 0;
678 }
679
680 return 1;
681}
682
683static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
684{
685 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
686
687 if (ethflow & RXH_L2DA) {
688 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
689 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
690 ftp_rqfpr[priv->cur_filer_idx] = fpr;
691 ftp_rqfcr[priv->cur_filer_idx] = fcr;
692 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
693 priv->cur_filer_idx = priv->cur_filer_idx - 1;
694
695 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
696 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
697 ftp_rqfpr[priv->cur_filer_idx] = fpr;
698 ftp_rqfcr[priv->cur_filer_idx] = fcr;
699 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
700 priv->cur_filer_idx = priv->cur_filer_idx - 1;
701 }
702
703 if (ethflow & RXH_VLAN) {
704 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
705 RQFCR_AND | RQFCR_HASHTBL_0;
706 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
707 ftp_rqfpr[priv->cur_filer_idx] = fpr;
708 ftp_rqfcr[priv->cur_filer_idx] = fcr;
709 priv->cur_filer_idx = priv->cur_filer_idx - 1;
710 }
711
712 if (ethflow & RXH_IP_SRC) {
713 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
714 RQFCR_AND | RQFCR_HASHTBL_0;
715 ftp_rqfpr[priv->cur_filer_idx] = fpr;
716 ftp_rqfcr[priv->cur_filer_idx] = fcr;
717 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
718 priv->cur_filer_idx = priv->cur_filer_idx - 1;
719 }
720
721 if (ethflow & (RXH_IP_DST)) {
722 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
723 RQFCR_AND | RQFCR_HASHTBL_0;
724 ftp_rqfpr[priv->cur_filer_idx] = fpr;
725 ftp_rqfcr[priv->cur_filer_idx] = fcr;
726 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
727 priv->cur_filer_idx = priv->cur_filer_idx - 1;
728 }
729
730 if (ethflow & RXH_L3_PROTO) {
731 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
732 RQFCR_AND | RQFCR_HASHTBL_0;
733 ftp_rqfpr[priv->cur_filer_idx] = fpr;
734 ftp_rqfcr[priv->cur_filer_idx] = fcr;
735 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
736 priv->cur_filer_idx = priv->cur_filer_idx - 1;
737 }
738
739 if (ethflow & RXH_L4_B_0_1) {
740 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
741 RQFCR_AND | RQFCR_HASHTBL_0;
742 ftp_rqfpr[priv->cur_filer_idx] = fpr;
743 ftp_rqfcr[priv->cur_filer_idx] = fcr;
744 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
745 priv->cur_filer_idx = priv->cur_filer_idx - 1;
746 }
747
748 if (ethflow & RXH_L4_B_2_3) {
749 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
750 RQFCR_AND | RQFCR_HASHTBL_0;
751 ftp_rqfpr[priv->cur_filer_idx] = fpr;
752 ftp_rqfcr[priv->cur_filer_idx] = fcr;
753 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
754 priv->cur_filer_idx = priv->cur_filer_idx - 1;
755 }
756}
757
758static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
759{
760 unsigned int last_rule_idx = priv->cur_filer_idx;
761 unsigned int cmp_rqfpr;
762 unsigned int local_rqfpr[MAX_FILER_IDX + 1];
763 unsigned int local_rqfcr[MAX_FILER_IDX + 1];
764 int i = 0x0, k = 0x0;
765 int j = MAX_FILER_IDX, l = 0x0;
766
767 switch (class) {
768 case TCP_V4_FLOW:
769 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
770 break;
771 case UDP_V4_FLOW:
772 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
773 break;
774 case TCP_V6_FLOW:
775 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
776 break;
777 case UDP_V6_FLOW:
778 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
779 break;
780 case IPV4_FLOW:
781 cmp_rqfpr = RQFPR_IPV4;
782 case IPV6_FLOW:
783 cmp_rqfpr = RQFPR_IPV6;
784 break;
785 default:
786 printk(KERN_ERR "Right now this class is not supported\n");
787 return 0;
788 }
789
790 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
791 local_rqfpr[j] = ftp_rqfpr[i];
792 local_rqfcr[j] = ftp_rqfcr[i];
793 j--;
794 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
795 RQFCR_CLE |RQFCR_AND)) &&
796 (ftp_rqfpr[i] == cmp_rqfpr))
797 break;
798 }
799
800 if (i == MAX_FILER_IDX + 1) {
801 printk(KERN_ERR "No parse rule found, ");
802 printk(KERN_ERR "can't create hash rules\n");
803 return 0;
804 }
805
806 /* If a match was found, then it begins the starting of a cluster rule
807 * if it was already programmed, we need to overwrite these rules
808 */
809 for (l = i+1; l < MAX_FILER_IDX; l++) {
810 if ((ftp_rqfcr[l] & RQFCR_CLE) &&
811 !(ftp_rqfcr[l] & RQFCR_AND)) {
812 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
813 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
814 ftp_rqfpr[l] = FPR_FILER_MASK;
815 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
816 break;
817 }
818
819 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
820 continue;
821 else {
822 local_rqfpr[j] = ftp_rqfpr[l];
823 local_rqfcr[j] = ftp_rqfcr[l];
824 j--;
825 }
826 }
827
828 priv->cur_filer_idx = l - 1;
829 last_rule_idx = l;
830
831 /* hash rules */
832 ethflow_to_filer_rules(priv, ethflow);
833
834 /* Write back the popped out rules again */
835 for (k = j+1; k < MAX_FILER_IDX; k++) {
836 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
837 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
838 gfar_write_filer(priv, priv->cur_filer_idx,
839 local_rqfcr[k], local_rqfpr[k]);
840 if (!priv->cur_filer_idx)
841 break;
842 priv->cur_filer_idx = priv->cur_filer_idx - 1;
843 }
844
845 return 1;
846}
847
848static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
849{
850 u64 class;
851
852 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
853 return -EINVAL;
854
855 if (class < CLASS_CODE_USER_PROG1 ||
856 class > CLASS_CODE_SCTP_IPV6)
857 return -EINVAL;
858
859 /* write the filer rules here */
860 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
861 return -1;
862
863 return 0;
864}
865
866static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
867{
868 struct gfar_private *priv = netdev_priv(dev);
869 int ret = 0;
870
871 switch(cmd->cmd) {
872 case ETHTOOL_SRXFH:
873 ret = gfar_set_hash_opts(priv, cmd);
874 break;
875 default:
876 ret = -EINVAL;
877 }
878
879 return ret;
880}
881
608const struct ethtool_ops gfar_ethtool_ops = { 882const struct ethtool_ops gfar_ethtool_ops = {
609 .get_settings = gfar_gsettings, 883 .get_settings = gfar_gsettings,
610 .set_settings = gfar_ssettings, 884 .set_settings = gfar_ssettings,
@@ -630,4 +904,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
630 .get_wol = gfar_get_wol, 904 .get_wol = gfar_get_wol,
631 .set_wol = gfar_set_wol, 905 .set_wol = gfar_set_wol,
632#endif 906#endif
907 .set_rxnfc = gfar_set_nfc,
633}; 908};