aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/nbd.c112
-rw-r--r--drivers/char/hpet.c22
-rw-r--r--drivers/char/random.c3
-rw-r--r--drivers/char/synclink_gt.c58
-rw-r--r--drivers/char/tty_io.c6
-rw-r--r--drivers/crypto/hifn_795x.c2
-rw-r--r--drivers/edac/Kconfig36
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/amd8111_edac.c595
-rw-r--r--drivers/edac/amd8111_edac.h130
-rw-r--r--drivers/edac/amd8131_edac.c379
-rw-r--r--drivers/edac/amd8131_edac.h119
-rw-r--r--drivers/edac/edac_core.h16
-rw-r--r--drivers/edac/edac_pci.c14
-rw-r--r--drivers/edac/ppc4xx_edac.c1448
-rw-r--r--drivers/edac/ppc4xx_edac.h172
-rw-r--r--drivers/gpio/gpiolib.c19
-rw-r--r--drivers/input/mouse/hgpk.c2
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/eeprom/at24.c67
-rw-r--r--drivers/misc/eeprom/at25.c58
-rw-r--r--drivers/misc/sgi-gru/Makefile2
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h22
-rw-r--r--drivers/misc/sgi-gru/grufault.c130
-rw-r--r--drivers/misc/sgi-gru/grufile.c36
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c183
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h178
-rw-r--r--drivers/misc/sgi-gru/grukservices.c131
-rw-r--r--drivers/misc/sgi-gru/grukservices.h33
-rw-r--r--drivers/misc/sgi-gru/grumain.c84
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c45
-rw-r--r--drivers/misc/sgi-gru/grutables.h41
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c7
-rw-r--r--drivers/misc/sgi-xp/xpc.h33
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c8
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c6
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c20
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c229
-rw-r--r--drivers/net/Kconfig19
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/dnet.c1
-rw-r--r--drivers/net/fec_mpc52xx.c77
-rw-r--r--drivers/net/fsl_pq_mdio.c3
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/hamradio/yam.c64
-rw-r--r--drivers/net/hamradio/yam1200.h343
-rw-r--r--drivers/net/hamradio/yam9600.h343
-rw-r--r--drivers/net/igb/e1000_phy.c7
-rw-r--r--drivers/net/igb/igb_ethtool.c14
-rw-r--r--drivers/net/igb/igb_main.c54
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c129
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c110
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/mlx4/en_netdev.c2
-rw-r--r--drivers/net/mlx4/en_rx.c2
-rw-r--r--drivers/net/mlx4/sense.c2
-rw-r--r--drivers/net/pcmcia/ositech.h358
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c44
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/r8169.c5
-rw-r--r--drivers/net/sfc/efx.c7
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tokenring/3c359.c63
-rw-r--r--drivers/net/tokenring/3c359.h3
-rw-r--r--drivers/net/tokenring/3c359_microcode.h1581
-rw-r--r--drivers/net/ucc_geth.c22
-rw-r--r--drivers/net/ucc_geth.h4
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/usb/hso.c40
-rw-r--r--drivers/net/usb/kaweth.c7
-rw-r--r--drivers/net/vxge/Makefile7
-rw-r--r--drivers/net/vxge/vxge-config.c5264
-rw-r--r--drivers/net/vxge/vxge-config.h2259
-rw-r--r--drivers/net/vxge/vxge-ethtool.c1148
-rw-r--r--drivers/net/vxge/vxge-ethtool.h67
-rw-r--r--drivers/net/vxge/vxge-main.c4502
-rw-r--r--drivers/net/vxge/vxge-main.h557
-rw-r--r--drivers/net/vxge/vxge-reg.h4608
-rw-r--r--drivers/net/vxge/vxge-traffic.c2528
-rw-r--r--drivers/net/vxge/vxge-traffic.h2409
-rw-r--r--drivers/net/vxge/vxge-version.h23
-rw-r--r--drivers/parport/parport_serial.c30
-rw-r--r--drivers/pnp/pnpbios/core.c16
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-m41t80.c18
-rw-r--r--drivers/rtc/rtc-v3020.c190
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/spi/spi_gpio.c21
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c8
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c8
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/video/nvidia/nv_setup.c1
-rw-r--r--drivers/w1/w1_io.c16
100 files changed, 28052 insertions, 3433 deletions
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c2c95e614506..1300df6f1642 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -177,6 +177,7 @@ static int print_unex = 1;
177#include <linux/interrupt.h> 177#include <linux/interrupt.h>
178#include <linux/init.h> 178#include <linux/init.h>
179#include <linux/platform_device.h> 179#include <linux/platform_device.h>
180#include <linux/mod_devicetable.h>
180#include <linux/buffer_head.h> /* for invalidate_buffers() */ 181#include <linux/buffer_head.h> /* for invalidate_buffers() */
181#include <linux/mutex.h> 182#include <linux/mutex.h>
182 183
@@ -4597,6 +4598,13 @@ MODULE_AUTHOR("Alain L. Knaff");
4597MODULE_SUPPORTED_DEVICE("fd"); 4598MODULE_SUPPORTED_DEVICE("fd");
4598MODULE_LICENSE("GPL"); 4599MODULE_LICENSE("GPL");
4599 4600
4601/* This doesn't actually get used other than for module information */
4602static const struct pnp_device_id floppy_pnpids[] = {
4603 { "PNP0700", 0 },
4604 { }
4605};
4606MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
4607
4600#else 4608#else
4601 4609
4602__setup("floppy=", floppy_setup); 4610__setup("floppy=", floppy_setup);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 8299e2d3b611..4d6de4f15ccb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -4,7 +4,7 @@
4 * Note that you can not swap over this thing, yet. Seems to work but 4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general. 5 * deadlocks sometimes - you can not swap over TCP in general.
6 * 6 *
7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz> 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 * 9 *
10 * This file is released under GPLv2 or later. 10 * This file is released under GPLv2 or later.
@@ -276,7 +276,7 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
276 return 0; 276 return 0;
277 277
278error_out: 278error_out:
279 return 1; 279 return -EIO;
280} 280}
281 281
282static struct request *nbd_find_request(struct nbd_device *lo, 282static struct request *nbd_find_request(struct nbd_device *lo,
@@ -467,9 +467,7 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
467 mutex_unlock(&lo->tx_lock); 467 mutex_unlock(&lo->tx_lock);
468 printk(KERN_ERR "%s: Attempted send on closed socket\n", 468 printk(KERN_ERR "%s: Attempted send on closed socket\n",
469 lo->disk->disk_name); 469 lo->disk->disk_name);
470 req->errors++; 470 goto error_out;
471 nbd_end_request(req);
472 return;
473 } 471 }
474 472
475 lo->active_req = req; 473 lo->active_req = req;
@@ -531,7 +529,7 @@ static int nbd_thread(void *data)
531 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 529 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
532 */ 530 */
533 531
534static void do_nbd_request(struct request_queue * q) 532static void do_nbd_request(struct request_queue *q)
535{ 533{
536 struct request *req; 534 struct request *req;
537 535
@@ -568,27 +566,17 @@ static void do_nbd_request(struct request_queue * q)
568 } 566 }
569} 567}
570 568
571static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 569/* Must be called with tx_lock held */
572 unsigned int cmd, unsigned long arg)
573{
574 struct nbd_device *lo = bdev->bd_disk->private_data;
575 struct file *file;
576 int error;
577 struct request sreq ;
578 struct task_struct *thread;
579
580 if (!capable(CAP_SYS_ADMIN))
581 return -EPERM;
582
583 BUG_ON(lo->magic != LO_MAGIC);
584
585 /* Anyone capable of this syscall can do *real bad* things */
586 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
587 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
588 570
571static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
572 unsigned int cmd, unsigned long arg)
573{
589 switch (cmd) { 574 switch (cmd) {
590 case NBD_DISCONNECT: 575 case NBD_DISCONNECT: {
576 struct request sreq;
577
591 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 578 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
579
592 blk_rq_init(NULL, &sreq); 580 blk_rq_init(NULL, &sreq);
593 sreq.cmd_type = REQ_TYPE_SPECIAL; 581 sreq.cmd_type = REQ_TYPE_SPECIAL;
594 nbd_cmd(&sreq) = NBD_CMD_DISC; 582 nbd_cmd(&sreq) = NBD_CMD_DISC;
@@ -599,29 +587,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
599 */ 587 */
600 sreq.sector = 0; 588 sreq.sector = 0;
601 sreq.nr_sectors = 0; 589 sreq.nr_sectors = 0;
602 if (!lo->sock) 590 if (!lo->sock)
603 return -EINVAL; 591 return -EINVAL;
604 mutex_lock(&lo->tx_lock); 592 nbd_send_req(lo, &sreq);
605 nbd_send_req(lo, &sreq);
606 mutex_unlock(&lo->tx_lock);
607 return 0; 593 return 0;
594 }
608 595
609 case NBD_CLEAR_SOCK: 596 case NBD_CLEAR_SOCK: {
610 error = 0; 597 struct file *file;
611 mutex_lock(&lo->tx_lock); 598
612 lo->sock = NULL; 599 lo->sock = NULL;
613 mutex_unlock(&lo->tx_lock);
614 file = lo->file; 600 file = lo->file;
615 lo->file = NULL; 601 lo->file = NULL;
616 nbd_clear_que(lo); 602 nbd_clear_que(lo);
617 BUG_ON(!list_empty(&lo->queue_head)); 603 BUG_ON(!list_empty(&lo->queue_head));
618 if (file) 604 if (file)
619 fput(file); 605 fput(file);
620 return error; 606 return 0;
621 case NBD_SET_SOCK: 607 }
608
609 case NBD_SET_SOCK: {
610 struct file *file;
622 if (lo->file) 611 if (lo->file)
623 return -EBUSY; 612 return -EBUSY;
624 error = -EINVAL;
625 file = fget(arg); 613 file = fget(arg);
626 if (file) { 614 if (file) {
627 struct inode *inode = file->f_path.dentry->d_inode; 615 struct inode *inode = file->f_path.dentry->d_inode;
@@ -630,12 +618,14 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
630 lo->sock = SOCKET_I(inode); 618 lo->sock = SOCKET_I(inode);
631 if (max_part > 0) 619 if (max_part > 0)
632 bdev->bd_invalidated = 1; 620 bdev->bd_invalidated = 1;
633 error = 0; 621 return 0;
634 } else { 622 } else {
635 fput(file); 623 fput(file);
636 } 624 }
637 } 625 }
638 return error; 626 return -EINVAL;
627 }
628
639 case NBD_SET_BLKSIZE: 629 case NBD_SET_BLKSIZE:
640 lo->blksize = arg; 630 lo->blksize = arg;
641 lo->bytesize &= ~(lo->blksize-1); 631 lo->bytesize &= ~(lo->blksize-1);
@@ -643,35 +633,50 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
643 set_blocksize(bdev, lo->blksize); 633 set_blocksize(bdev, lo->blksize);
644 set_capacity(lo->disk, lo->bytesize >> 9); 634 set_capacity(lo->disk, lo->bytesize >> 9);
645 return 0; 635 return 0;
636
646 case NBD_SET_SIZE: 637 case NBD_SET_SIZE:
647 lo->bytesize = arg & ~(lo->blksize-1); 638 lo->bytesize = arg & ~(lo->blksize-1);
648 bdev->bd_inode->i_size = lo->bytesize; 639 bdev->bd_inode->i_size = lo->bytesize;
649 set_blocksize(bdev, lo->blksize); 640 set_blocksize(bdev, lo->blksize);
650 set_capacity(lo->disk, lo->bytesize >> 9); 641 set_capacity(lo->disk, lo->bytesize >> 9);
651 return 0; 642 return 0;
643
652 case NBD_SET_TIMEOUT: 644 case NBD_SET_TIMEOUT:
653 lo->xmit_timeout = arg * HZ; 645 lo->xmit_timeout = arg * HZ;
654 return 0; 646 return 0;
647
655 case NBD_SET_SIZE_BLOCKS: 648 case NBD_SET_SIZE_BLOCKS:
656 lo->bytesize = ((u64) arg) * lo->blksize; 649 lo->bytesize = ((u64) arg) * lo->blksize;
657 bdev->bd_inode->i_size = lo->bytesize; 650 bdev->bd_inode->i_size = lo->bytesize;
658 set_blocksize(bdev, lo->blksize); 651 set_blocksize(bdev, lo->blksize);
659 set_capacity(lo->disk, lo->bytesize >> 9); 652 set_capacity(lo->disk, lo->bytesize >> 9);
660 return 0; 653 return 0;
661 case NBD_DO_IT: 654
655 case NBD_DO_IT: {
656 struct task_struct *thread;
657 struct file *file;
658 int error;
659
662 if (lo->pid) 660 if (lo->pid)
663 return -EBUSY; 661 return -EBUSY;
664 if (!lo->file) 662 if (!lo->file)
665 return -EINVAL; 663 return -EINVAL;
664
665 mutex_unlock(&lo->tx_lock);
666
666 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); 667 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
667 if (IS_ERR(thread)) 668 if (IS_ERR(thread)) {
669 mutex_lock(&lo->tx_lock);
668 return PTR_ERR(thread); 670 return PTR_ERR(thread);
671 }
669 wake_up_process(thread); 672 wake_up_process(thread);
670 error = nbd_do_it(lo); 673 error = nbd_do_it(lo);
671 kthread_stop(thread); 674 kthread_stop(thread);
675
676 mutex_lock(&lo->tx_lock);
672 if (error) 677 if (error)
673 return error; 678 return error;
674 sock_shutdown(lo, 1); 679 sock_shutdown(lo, 0);
675 file = lo->file; 680 file = lo->file;
676 lo->file = NULL; 681 lo->file = NULL;
677 nbd_clear_que(lo); 682 nbd_clear_que(lo);
@@ -684,6 +689,8 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
684 if (max_part > 0) 689 if (max_part > 0)
685 ioctl_by_bdev(bdev, BLKRRPART, 0); 690 ioctl_by_bdev(bdev, BLKRRPART, 0);
686 return lo->harderror; 691 return lo->harderror;
692 }
693
687 case NBD_CLEAR_QUE: 694 case NBD_CLEAR_QUE:
688 /* 695 /*
689 * This is for compatibility only. The queue is always cleared 696 * This is for compatibility only. The queue is always cleared
@@ -691,6 +698,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
691 */ 698 */
692 BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); 699 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
693 return 0; 700 return 0;
701
694 case NBD_PRINT_DEBUG: 702 case NBD_PRINT_DEBUG:
695 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 703 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
696 bdev->bd_disk->disk_name, 704 bdev->bd_disk->disk_name,
@@ -698,7 +706,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
698 &lo->queue_head); 706 &lo->queue_head);
699 return 0; 707 return 0;
700 } 708 }
701 return -EINVAL; 709 return -ENOTTY;
710}
711
712static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
713 unsigned int cmd, unsigned long arg)
714{
715 struct nbd_device *lo = bdev->bd_disk->private_data;
716 int error;
717
718 if (!capable(CAP_SYS_ADMIN))
719 return -EPERM;
720
721 BUG_ON(lo->magic != LO_MAGIC);
722
723 /* Anyone capable of this syscall can do *real bad* things */
724 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
725 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
726
727 mutex_lock(&lo->tx_lock);
728 error = __nbd_ioctl(bdev, lo, cmd, arg);
729 mutex_unlock(&lo->tx_lock);
730
731 return error;
702} 732}
703 733
704static struct block_device_operations nbd_fops = 734static struct block_device_operations nbd_fops =
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 32b8bbf5003e..50dfa3bc71ce 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -713,7 +713,7 @@ static struct ctl_table_header *sysctl_header;
713 */ 713 */
714#define TICK_CALIBRATE (1000UL) 714#define TICK_CALIBRATE (1000UL)
715 715
716static unsigned long hpet_calibrate(struct hpets *hpetp) 716static unsigned long __hpet_calibrate(struct hpets *hpetp)
717{ 717{
718 struct hpet_timer __iomem *timer = NULL; 718 struct hpet_timer __iomem *timer = NULL;
719 unsigned long t, m, count, i, flags, start; 719 unsigned long t, m, count, i, flags, start;
@@ -750,6 +750,26 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
750 return (m - start) / i; 750 return (m - start) / i;
751} 751}
752 752
753static unsigned long hpet_calibrate(struct hpets *hpetp)
754{
755 unsigned long ret = -1;
756 unsigned long tmp;
757
758 /*
759 * Try to calibrate until return value becomes stable small value.
760 * If SMI interruption occurs in calibration loop, the return value
761 * will be big. This avoids its impact.
762 */
763 for ( ; ; ) {
764 tmp = __hpet_calibrate(hpetp);
765 if (ret <= tmp)
766 break;
767 ret = tmp;
768 }
769
770 return ret;
771}
772
753int hpet_alloc(struct hpet_data *hdp) 773int hpet_alloc(struct hpet_data *hdp)
754{ 774{
755 u64 cap, mcfg; 775 u64 cap, mcfg;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7c43ae782b26..f824ef8a9273 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1488,7 +1488,8 @@ static void rekey_seq_generator(struct work_struct *work)
1488 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; 1488 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
1489 smp_wmb(); 1489 smp_wmb();
1490 ip_cnt++; 1490 ip_cnt++;
1491 schedule_delayed_work(&rekey_work, REKEY_INTERVAL); 1491 schedule_delayed_work(&rekey_work,
1492 round_jiffies_relative(REKEY_INTERVAL));
1492} 1493}
1493 1494
1494static inline struct keydata *get_keyptr(void) 1495static inline struct keydata *get_keyptr(void)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 6ec6e13d47d7..5e256494686a 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -298,6 +298,7 @@ struct slgt_info {
298 298
299 unsigned int rbuf_fill_level; 299 unsigned int rbuf_fill_level;
300 unsigned int if_mode; 300 unsigned int if_mode;
301 unsigned int base_clock;
301 302
302 /* device status */ 303 /* device status */
303 304
@@ -1156,22 +1157,26 @@ static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *ne
1156 return -EFAULT; 1157 return -EFAULT;
1157 1158
1158 spin_lock(&info->lock); 1159 spin_lock(&info->lock);
1159 info->params.mode = tmp_params.mode; 1160 if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
1160 info->params.loopback = tmp_params.loopback; 1161 info->base_clock = tmp_params.clock_speed;
1161 info->params.flags = tmp_params.flags; 1162 } else {
1162 info->params.encoding = tmp_params.encoding; 1163 info->params.mode = tmp_params.mode;
1163 info->params.clock_speed = tmp_params.clock_speed; 1164 info->params.loopback = tmp_params.loopback;
1164 info->params.addr_filter = tmp_params.addr_filter; 1165 info->params.flags = tmp_params.flags;
1165 info->params.crc_type = tmp_params.crc_type; 1166 info->params.encoding = tmp_params.encoding;
1166 info->params.preamble_length = tmp_params.preamble_length; 1167 info->params.clock_speed = tmp_params.clock_speed;
1167 info->params.preamble = tmp_params.preamble; 1168 info->params.addr_filter = tmp_params.addr_filter;
1168 info->params.data_rate = tmp_params.data_rate; 1169 info->params.crc_type = tmp_params.crc_type;
1169 info->params.data_bits = tmp_params.data_bits; 1170 info->params.preamble_length = tmp_params.preamble_length;
1170 info->params.stop_bits = tmp_params.stop_bits; 1171 info->params.preamble = tmp_params.preamble;
1171 info->params.parity = tmp_params.parity; 1172 info->params.data_rate = tmp_params.data_rate;
1173 info->params.data_bits = tmp_params.data_bits;
1174 info->params.stop_bits = tmp_params.stop_bits;
1175 info->params.parity = tmp_params.parity;
1176 }
1172 spin_unlock(&info->lock); 1177 spin_unlock(&info->lock);
1173 1178
1174 change_params(info); 1179 program_hw(info);
1175 1180
1176 return 0; 1181 return 0;
1177} 1182}
@@ -2559,10 +2564,13 @@ static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
2559 return -EFAULT; 2564 return -EFAULT;
2560 2565
2561 spin_lock_irqsave(&info->lock, flags); 2566 spin_lock_irqsave(&info->lock, flags);
2562 memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS)); 2567 if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
2568 info->base_clock = tmp_params.clock_speed;
2569 else
2570 memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
2563 spin_unlock_irqrestore(&info->lock, flags); 2571 spin_unlock_irqrestore(&info->lock, flags);
2564 2572
2565 change_params(info); 2573 program_hw(info);
2566 2574
2567 return 0; 2575 return 0;
2568} 2576}
@@ -3432,6 +3440,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
3432 info->magic = MGSL_MAGIC; 3440 info->magic = MGSL_MAGIC;
3433 INIT_WORK(&info->task, bh_handler); 3441 INIT_WORK(&info->task, bh_handler);
3434 info->max_frame_size = 4096; 3442 info->max_frame_size = 4096;
3443 info->base_clock = 14745600;
3435 info->rbuf_fill_level = DMABUFSIZE; 3444 info->rbuf_fill_level = DMABUFSIZE;
3436 info->port.close_delay = 5*HZ/10; 3445 info->port.close_delay = 5*HZ/10;
3437 info->port.closing_wait = 30*HZ; 3446 info->port.closing_wait = 30*HZ;
@@ -3779,7 +3788,7 @@ static void enable_loopback(struct slgt_info *info)
3779static void set_rate(struct slgt_info *info, u32 rate) 3788static void set_rate(struct slgt_info *info, u32 rate)
3780{ 3789{
3781 unsigned int div; 3790 unsigned int div;
3782 static unsigned int osc = 14745600; 3791 unsigned int osc = info->base_clock;
3783 3792
3784 /* div = osc/rate - 1 3793 /* div = osc/rate - 1
3785 * 3794 *
@@ -4083,18 +4092,27 @@ static void async_mode(struct slgt_info *info)
4083 * 06 CTS IRQ enable 4092 * 06 CTS IRQ enable
4084 * 05 DCD IRQ enable 4093 * 05 DCD IRQ enable
4085 * 04 RI IRQ enable 4094 * 04 RI IRQ enable
4086 * 03 reserved, must be zero 4095 * 03 0=16x sampling, 1=8x sampling
4087 * 02 1=txd->rxd internal loopback enable 4096 * 02 1=txd->rxd internal loopback enable
4088 * 01 reserved, must be zero 4097 * 01 reserved, must be zero
4089 * 00 1=master IRQ enable 4098 * 00 1=master IRQ enable
4090 */ 4099 */
4091 val = BIT15 + BIT14 + BIT0; 4100 val = BIT15 + BIT14 + BIT0;
4101 /* JCR[8] : 1 = x8 async mode feature available */
4102 if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
4103 ((info->base_clock < (info->params.data_rate * 16)) ||
4104 (info->base_clock % (info->params.data_rate * 16)))) {
4105 /* use 8x sampling */
4106 val |= BIT3;
4107 set_rate(info, info->params.data_rate * 8);
4108 } else {
4109 /* use 16x sampling */
4110 set_rate(info, info->params.data_rate * 16);
4111 }
4092 wr_reg16(info, SCR, val); 4112 wr_reg16(info, SCR, val);
4093 4113
4094 slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER); 4114 slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
4095 4115
4096 set_rate(info, info->params.data_rate * 16);
4097
4098 if (info->params.loopback) 4116 if (info->params.loopback)
4099 enable_loopback(info); 4117 enable_loopback(info);
4100} 4118}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 33dac94922a7..66b99a2049e3 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1758,7 +1758,7 @@ static int __tty_open(struct inode *inode, struct file *filp)
1758 struct tty_driver *driver; 1758 struct tty_driver *driver;
1759 int index; 1759 int index;
1760 dev_t device = inode->i_rdev; 1760 dev_t device = inode->i_rdev;
1761 unsigned short saved_flags = filp->f_flags; 1761 unsigned saved_flags = filp->f_flags;
1762 1762
1763 nonseekable_open(inode, filp); 1763 nonseekable_open(inode, filp);
1764 1764
@@ -2681,7 +2681,7 @@ void __do_SAK(struct tty_struct *tty)
2681 /* Kill the entire session */ 2681 /* Kill the entire session */
2682 do_each_pid_task(session, PIDTYPE_SID, p) { 2682 do_each_pid_task(session, PIDTYPE_SID, p) {
2683 printk(KERN_NOTICE "SAK: killed process %d" 2683 printk(KERN_NOTICE "SAK: killed process %d"
2684 " (%s): task_session_nr(p)==tty->session\n", 2684 " (%s): task_session(p)==tty->session\n",
2685 task_pid_nr(p), p->comm); 2685 task_pid_nr(p), p->comm);
2686 send_sig(SIGKILL, p, 1); 2686 send_sig(SIGKILL, p, 1);
2687 } while_each_pid_task(session, PIDTYPE_SID, p); 2687 } while_each_pid_task(session, PIDTYPE_SID, p);
@@ -2691,7 +2691,7 @@ void __do_SAK(struct tty_struct *tty)
2691 do_each_thread(g, p) { 2691 do_each_thread(g, p) {
2692 if (p->signal->tty == tty) { 2692 if (p->signal->tty == tty) {
2693 printk(KERN_NOTICE "SAK: killed process %d" 2693 printk(KERN_NOTICE "SAK: killed process %d"
2694 " (%s): task_session_nr(p)==tty->session\n", 2694 " (%s): task_session(p)==tty->session\n",
2695 task_pid_nr(p), p->comm); 2695 task_pid_nr(p), p->comm);
2696 send_sig(SIGKILL, p, 1); 2696 send_sig(SIGKILL, p, 1);
2697 continue; 2697 continue;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 0c79fe7f1567..4d85402a9e4a 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1882,7 +1882,7 @@ static void hifn_clear_rings(struct hifn_device *dev, int error)
1882 1882
1883static void hifn_work(struct work_struct *work) 1883static void hifn_work(struct work_struct *work)
1884{ 1884{
1885 struct delayed_work *dw = container_of(work, struct delayed_work, work); 1885 struct delayed_work *dw = to_delayed_work(work);
1886 struct hifn_device *dev = container_of(dw, struct hifn_device, work); 1886 struct hifn_device *dev = container_of(dw, struct hifn_device, work);
1887 unsigned long flags; 1887 unsigned long flags;
1888 int reset = 0; 1888 int reset = 0;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index eee47fd16d79..e5f5c5a8ba6c 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -1,13 +1,12 @@
1# 1#
2# EDAC Kconfig 2# EDAC Kconfig
3# Copyright (c) 2003 Linux Networx 3# Copyright (c) 2008 Doug Thompson www.softwarebitmaker.com
4# Licensed and distributed under the GPL 4# Licensed and distributed under the GPL
5# 5#
6 6
7menuconfig EDAC 7menuconfig EDAC
8 bool "EDAC - error detection and reporting (EXPERIMENTAL)" 8 bool "EDAC - error detection and reporting"
9 depends on HAS_IOMEM 9 depends on HAS_IOMEM
10 depends on EXPERIMENTAL
11 depends on X86 || PPC 10 depends on X86 || PPC
12 help 11 help
13 EDAC is designed to report errors in the core system. 12 EDAC is designed to report errors in the core system.
@@ -40,6 +39,14 @@ config EDAC_DEBUG
40 there're four debug levels (x=0,1,2,3 from low to high). 39 there're four debug levels (x=0,1,2,3 from low to high).
41 Usually you should select 'N'. 40 Usually you should select 'N'.
42 41
42config EDAC_DEBUG_VERBOSE
43 bool "More verbose debugging"
44 depends on EDAC_DEBUG
45 help
46 This option makes debugging information more verbose.
47 Source file name and line number where debugging message
48 printed will be added to debugging message.
49
43config EDAC_MM_EDAC 50config EDAC_MM_EDAC
44 tristate "Main Memory EDAC (Error Detection And Correction) reporting" 51 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
45 default y 52 default y
@@ -174,4 +181,27 @@ config EDAC_CELL
174 Cell Broadband Engine internal memory controller 181 Cell Broadband Engine internal memory controller
175 on platform without a hypervisor 182 on platform without a hypervisor
176 183
184config EDAC_PPC4XX
185 tristate "PPC4xx IBM DDR2 Memory Controller"
186 depends on EDAC_MM_EDAC && 4xx
187 help
188 This enables support for EDAC on the ECC memory used
189 with the IBM DDR2 memory controller found in various
190 PowerPC 4xx embedded processors such as the 405EX[r],
191 440SP, 440SPe, 460EX, 460GT and 460SX.
192
193config EDAC_AMD8131
194 tristate "AMD8131 HyperTransport PCI-X Tunnel"
195 depends on EDAC_MM_EDAC && PCI
196 help
197 Support for error detection and correction on the
198 AMD8131 HyperTransport PCI-X Tunnel chip.
199
200config EDAC_AMD8111
201 tristate "AMD8111 HyperTransport I/O Hub"
202 depends on EDAC_MM_EDAC && PCI
203 help
204 Support for error detection and correction on the
205 AMD8111 HyperTransport I/O Hub chip.
206
177endif # EDAC 207endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index b75196927de3..a5fdcf02f591 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -34,4 +34,4 @@ obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
34obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o 34obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o 35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
36obj-$(CONFIG_EDAC_CELL) += cell_edac.o 36obj-$(CONFIG_EDAC_CELL) += cell_edac.o
37 37obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
new file mode 100644
index 000000000000..614692181120
--- /dev/null
+++ b/drivers/edac/amd8111_edac.c
@@ -0,0 +1,595 @@
1/*
2 * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
3 *
4 * Copyright (c) 2008 Wind River Systems, Inc.
5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * Benjamin Walsh <benjamin.walsh@windriver.com>
8 * Hu Yongqi <yongqi.hu@windriver.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 * See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/bitops.h>
28#include <linux/edac.h>
29#include <linux/pci_ids.h>
30#include <asm/io.h>
31
32#include "edac_core.h"
33#include "edac_module.h"
34#include "amd8111_edac.h"
35
36#define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__
37#define AMD8111_EDAC_MOD_STR "amd8111_edac"
38
39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
40static int edac_dev_idx;
41
42enum amd8111_edac_devs {
43 LPC_BRIDGE = 0,
44};
45
46enum amd8111_edac_pcis {
47 PCI_BRIDGE = 0,
48};
49
50/* Wrapper functions for accessing PCI configuration space */
51static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
52{
53 int ret;
54
55 ret = pci_read_config_dword(dev, reg, val32);
56 if (ret != 0)
57 printk(KERN_ERR AMD8111_EDAC_MOD_STR
58 " PCI Access Read Error at 0x%x\n", reg);
59
60 return ret;
61}
62
63static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
64{
65 int ret;
66
67 ret = pci_read_config_byte(dev, reg, val8);
68 if (ret != 0)
69 printk(KERN_ERR AMD8111_EDAC_MOD_STR
70 " PCI Access Read Error at 0x%x\n", reg);
71}
72
73static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
74{
75 int ret;
76
77 ret = pci_write_config_dword(dev, reg, val32);
78 if (ret != 0)
79 printk(KERN_ERR AMD8111_EDAC_MOD_STR
80 " PCI Access Write Error at 0x%x\n", reg);
81}
82
83static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
84{
85 int ret;
86
87 ret = pci_write_config_byte(dev, reg, val8);
88 if (ret != 0)
89 printk(KERN_ERR AMD8111_EDAC_MOD_STR
90 " PCI Access Write Error at 0x%x\n", reg);
91}
92
93/*
94 * device-specific methods for amd8111 PCI Bridge Controller
95 *
96 * Error Reporting and Handling for amd8111 chipset could be found
97 * in its datasheet 3.1.2 section, P37
98 */
99static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
100{
101 u32 val32;
102 struct pci_dev *dev = pci_info->dev;
103
104 /* First clear error detection flags on the host interface */
105
106 /* Clear SSE/SMA/STA flags in the global status register*/
107 edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
108 if (val32 & PCI_STSCMD_CLEAR_MASK)
109 edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
110
111 /* Clear CRC and Link Fail flags in HT Link Control reg */
112 edac_pci_read_dword(dev, REG_HT_LINK, &val32);
113 if (val32 & HT_LINK_CLEAR_MASK)
114 edac_pci_write_dword(dev, REG_HT_LINK, val32);
115
116 /* Second clear all fault on the secondary interface */
117
118 /* Clear error flags in the memory-base limit reg. */
119 edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
120 if (val32 & MEM_LIMIT_CLEAR_MASK)
121 edac_pci_write_dword(dev, REG_MEM_LIM, val32);
122
123 /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
124 edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
125 if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
126 edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
127
128 /* Last enable error detections */
129 if (edac_op_state == EDAC_OPSTATE_POLL) {
130 /* Enable System Error reporting in global status register */
131 edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
132 val32 |= PCI_STSCMD_SERREN;
133 edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
134
135 /* Enable CRC Sync flood packets to HyperTransport Link */
136 edac_pci_read_dword(dev, REG_HT_LINK, &val32);
137 val32 |= HT_LINK_CRCFEN;
138 edac_pci_write_dword(dev, REG_HT_LINK, val32);
139
140 /* Enable SSE reporting etc in Interrupt control reg */
141 edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
142 val32 |= PCI_INTBRG_CTRL_POLL_MASK;
143 edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
144 }
145}
146
147static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
148{
149 u32 val32;
150 struct pci_dev *dev = pci_info->dev;
151
152 if (edac_op_state == EDAC_OPSTATE_POLL) {
153 /* Disable System Error reporting */
154 edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
155 val32 &= ~PCI_STSCMD_SERREN;
156 edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
157
158 /* Disable CRC flood packets */
159 edac_pci_read_dword(dev, REG_HT_LINK, &val32);
160 val32 &= ~HT_LINK_CRCFEN;
161 edac_pci_write_dword(dev, REG_HT_LINK, val32);
162
163 /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
164 edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
165 val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
166 edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
167 }
168}
169
170static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
171{
172 struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
173 struct pci_dev *dev = pci_info->dev;
174 u32 val32;
175
176 /* Check out PCI Bridge Status and Command Register */
177 edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
178 if (val32 & PCI_STSCMD_CLEAR_MASK) {
179 printk(KERN_INFO "Error(s) in PCI bridge status and command"
180 "register on device %s\n", pci_info->ctl_name);
181 printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
182 (val32 & PCI_STSCMD_SSE) != 0,
183 (val32 & PCI_STSCMD_RMA) != 0,
184 (val32 & PCI_STSCMD_RTA) != 0);
185
186 val32 |= PCI_STSCMD_CLEAR_MASK;
187 edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
188
189 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
190 }
191
192 /* Check out HyperTransport Link Control Register */
193 edac_pci_read_dword(dev, REG_HT_LINK, &val32);
194 if (val32 & HT_LINK_LKFAIL) {
195 printk(KERN_INFO "Error(s) in hypertransport link control"
196 "register on device %s\n", pci_info->ctl_name);
197 printk(KERN_INFO "LKFAIL: %d\n",
198 (val32 & HT_LINK_LKFAIL) != 0);
199
200 val32 |= HT_LINK_LKFAIL;
201 edac_pci_write_dword(dev, REG_HT_LINK, val32);
202
203 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
204 }
205
206 /* Check out PCI Interrupt and Bridge Control Register */
207 edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
208 if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
209 printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
210 "register on device %s\n", pci_info->ctl_name);
211 printk(KERN_INFO "DTSTAT: %d\n",
212 (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
213
214 val32 |= PCI_INTBRG_CTRL_DTSTAT;
215 edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
216
217 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
218 }
219
220 /* Check out PCI Bridge Memory Base-Limit Register */
221 edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
222 if (val32 & MEM_LIMIT_CLEAR_MASK) {
223 printk(KERN_INFO
224 "Error(s) in mem limit register on %s device\n",
225 pci_info->ctl_name);
226 printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
227 "RTA: %d, STA: %d, MDPE: %d\n",
228 (val32 & MEM_LIMIT_DPE) != 0,
229 (val32 & MEM_LIMIT_RSE) != 0,
230 (val32 & MEM_LIMIT_RMA) != 0,
231 (val32 & MEM_LIMIT_RTA) != 0,
232 (val32 & MEM_LIMIT_STA) != 0,
233 (val32 & MEM_LIMIT_MDPE) != 0);
234
235 val32 |= MEM_LIMIT_CLEAR_MASK;
236 edac_pci_write_dword(dev, REG_MEM_LIM, val32);
237
238 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
239 }
240}
241
242static struct resource *legacy_io_res;
243static int at_compat_reg_broken;
244#define LEGACY_NR_PORTS 1
245
246/* device-specific methods for amd8111 LPC Bridge device */
247static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
248{
249 u8 val8;
250 struct pci_dev *dev = dev_info->dev;
251
252 /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
253 legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
254 AMD8111_EDAC_MOD_STR);
255 if (!legacy_io_res)
256 printk(KERN_INFO "%s: failed to request legacy I/O region "
257 "start %d, len %d\n", __func__,
258 REG_AT_COMPAT, LEGACY_NR_PORTS);
259 else {
260 val8 = __do_inb(REG_AT_COMPAT);
261 if (val8 == 0xff) { /* buggy port */
262 printk(KERN_INFO "%s: port %d is buggy, not supported"
263 " by hardware?\n", __func__, REG_AT_COMPAT);
264 at_compat_reg_broken = 1;
265 release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
266 legacy_io_res = NULL;
267 } else {
268 u8 out8 = 0;
269 if (val8 & AT_COMPAT_SERR)
270 out8 = AT_COMPAT_CLRSERR;
271 if (val8 & AT_COMPAT_IOCHK)
272 out8 |= AT_COMPAT_CLRIOCHK;
273 if (out8 > 0)
274 __do_outb(out8, REG_AT_COMPAT);
275 }
276 }
277
278 /* Second clear error flags on LPC bridge */
279 edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
280 if (val8 & IO_CTRL_1_CLEAR_MASK)
281 edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
282}
283
284static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
285{
286 if (legacy_io_res)
287 release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
288}
289
290static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
291{
292 struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
293 struct pci_dev *dev = dev_info->dev;
294 u8 val8;
295
296 edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
297 if (val8 & IO_CTRL_1_CLEAR_MASK) {
298 printk(KERN_INFO
299 "Error(s) in IO control register on %s device\n",
300 dev_info->ctl_name);
301 printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
302 (val8 & IO_CTRL_1_LPC_ERR) != 0,
303 (val8 & IO_CTRL_1_PW2LPC) != 0);
304
305 val8 |= IO_CTRL_1_CLEAR_MASK;
306 edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
307
308 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
309 }
310
311 if (at_compat_reg_broken == 0) {
312 u8 out8 = 0;
313 val8 = __do_inb(REG_AT_COMPAT);
314 if (val8 & AT_COMPAT_SERR)
315 out8 = AT_COMPAT_CLRSERR;
316 if (val8 & AT_COMPAT_IOCHK)
317 out8 |= AT_COMPAT_CLRIOCHK;
318 if (out8 > 0) {
319 __do_outb(out8, REG_AT_COMPAT);
320 edac_device_handle_ue(edac_dev, 0, 0,
321 edac_dev->ctl_name);
322 }
323 }
324}
325
326/* General devices represented by edac_device_ctl_info */
327static struct amd8111_dev_info amd8111_devices[] = {
328 [LPC_BRIDGE] = {
329 .err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
330 .ctl_name = "lpc",
331 .init = amd8111_lpc_bridge_init,
332 .exit = amd8111_lpc_bridge_exit,
333 .check = amd8111_lpc_bridge_check,
334 },
335 {0},
336};
337
338/* PCI controllers represented by edac_pci_ctl_info */
339static struct amd8111_pci_info amd8111_pcis[] = {
340 [PCI_BRIDGE] = {
341 .err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
342 .ctl_name = "AMD8111_PCI_Controller",
343 .init = amd8111_pci_bridge_init,
344 .exit = amd8111_pci_bridge_exit,
345 .check = amd8111_pci_bridge_check,
346 },
347 {0},
348};
349
350static int amd8111_dev_probe(struct pci_dev *dev,
351 const struct pci_device_id *id)
352{
353 struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
354
355 dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
356 dev_info->err_dev, NULL);
357
358 if (!dev_info->dev) {
359 printk(KERN_ERR "EDAC device not found:"
360 "vendor %x, device %x, name %s\n",
361 PCI_VENDOR_ID_AMD, dev_info->err_dev,
362 dev_info->ctl_name);
363 return -ENODEV;
364 }
365
366 if (pci_enable_device(dev_info->dev)) {
367 pci_dev_put(dev_info->dev);
368 printk(KERN_ERR "failed to enable:"
369 "vendor %x, device %x, name %s\n",
370 PCI_VENDOR_ID_AMD, dev_info->err_dev,
371 dev_info->ctl_name);
372 return -ENODEV;
373 }
374
375 /*
376 * we do not allocate extra private structure for
377 * edac_device_ctl_info, but make use of existing
378 * one instead.
379 */
380 dev_info->edac_idx = edac_dev_idx++;
381 dev_info->edac_dev =
382 edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
383 NULL, 0, 0,
384 NULL, 0, dev_info->edac_idx);
385 if (!dev_info->edac_dev)
386 return -ENOMEM;
387
388 dev_info->edac_dev->pvt_info = dev_info;
389 dev_info->edac_dev->dev = &dev_info->dev->dev;
390 dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
391 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
392 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
393
394 if (edac_op_state == EDAC_OPSTATE_POLL)
395 dev_info->edac_dev->edac_check = dev_info->check;
396
397 if (dev_info->init)
398 dev_info->init(dev_info);
399
400 if (edac_device_add_device(dev_info->edac_dev) > 0) {
401 printk(KERN_ERR "failed to add edac_dev for %s\n",
402 dev_info->ctl_name);
403 edac_device_free_ctl_info(dev_info->edac_dev);
404 return -ENODEV;
405 }
406
407 printk(KERN_INFO "added one edac_dev on AMD8111 "
408 "vendor %x, device %x, name %s\n",
409 PCI_VENDOR_ID_AMD, dev_info->err_dev,
410 dev_info->ctl_name);
411
412 return 0;
413}
414
415static void amd8111_dev_remove(struct pci_dev *dev)
416{
417 struct amd8111_dev_info *dev_info;
418
419 for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
420 if (dev_info->dev->device == dev->device)
421 break;
422
423 if (!dev_info->err_dev) /* should never happen */
424 return;
425
426 if (dev_info->edac_dev) {
427 edac_device_del_device(dev_info->edac_dev->dev);
428 edac_device_free_ctl_info(dev_info->edac_dev);
429 }
430
431 if (dev_info->exit)
432 dev_info->exit(dev_info);
433
434 pci_dev_put(dev_info->dev);
435}
436
437static int amd8111_pci_probe(struct pci_dev *dev,
438 const struct pci_device_id *id)
439{
440 struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
441
442 pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
443 pci_info->err_dev, NULL);
444
445 if (!pci_info->dev) {
446 printk(KERN_ERR "EDAC device not found:"
447 "vendor %x, device %x, name %s\n",
448 PCI_VENDOR_ID_AMD, pci_info->err_dev,
449 pci_info->ctl_name);
450 return -ENODEV;
451 }
452
453 if (pci_enable_device(pci_info->dev)) {
454 pci_dev_put(pci_info->dev);
455 printk(KERN_ERR "failed to enable:"
456 "vendor %x, device %x, name %s\n",
457 PCI_VENDOR_ID_AMD, pci_info->err_dev,
458 pci_info->ctl_name);
459 return -ENODEV;
460 }
461
462 /*
463 * we do not allocate extra private structure for
464 * edac_pci_ctl_info, but make use of existing
465 * one instead.
466 */
467 pci_info->edac_idx = edac_pci_alloc_index();
468 pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
469 if (!pci_info->edac_dev)
470 return -ENOMEM;
471
472 pci_info->edac_dev->pvt_info = pci_info;
473 pci_info->edac_dev->dev = &pci_info->dev->dev;
474 pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
475 pci_info->edac_dev->ctl_name = pci_info->ctl_name;
476 pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id;
477
478 if (edac_op_state == EDAC_OPSTATE_POLL)
479 pci_info->edac_dev->edac_check = pci_info->check;
480
481 if (pci_info->init)
482 pci_info->init(pci_info);
483
484 if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
485 printk(KERN_ERR "failed to add edac_pci for %s\n",
486 pci_info->ctl_name);
487 edac_pci_free_ctl_info(pci_info->edac_dev);
488 return -ENODEV;
489 }
490
491 printk(KERN_INFO "added one edac_pci on AMD8111 "
492 "vendor %x, device %x, name %s\n",
493 PCI_VENDOR_ID_AMD, pci_info->err_dev,
494 pci_info->ctl_name);
495
496 return 0;
497}
498
499static void amd8111_pci_remove(struct pci_dev *dev)
500{
501 struct amd8111_pci_info *pci_info;
502
503 for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
504 if (pci_info->dev->device == dev->device)
505 break;
506
507 if (!pci_info->err_dev) /* should never happen */
508 return;
509
510 if (pci_info->edac_dev) {
511 edac_pci_del_device(pci_info->edac_dev->dev);
512 edac_pci_free_ctl_info(pci_info->edac_dev);
513 }
514
515 if (pci_info->exit)
516 pci_info->exit(pci_info);
517
518 pci_dev_put(pci_info->dev);
519}
520
521/* PCI Device ID talbe for general EDAC device */
522static const struct pci_device_id amd8111_edac_dev_tbl[] = {
523 {
524 PCI_VEND_DEV(AMD, 8111_LPC),
525 .subvendor = PCI_ANY_ID,
526 .subdevice = PCI_ANY_ID,
527 .class = 0,
528 .class_mask = 0,
529 .driver_data = LPC_BRIDGE,
530 },
531 {
532 0,
533 } /* table is NULL-terminated */
534};
535MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
536
537static struct pci_driver amd8111_edac_dev_driver = {
538 .name = "AMD8111_EDAC_DEV",
539 .probe = amd8111_dev_probe,
540 .remove = amd8111_dev_remove,
541 .id_table = amd8111_edac_dev_tbl,
542};
543
544/* PCI Device ID table for EDAC PCI controller */
545static const struct pci_device_id amd8111_edac_pci_tbl[] = {
546 {
547 PCI_VEND_DEV(AMD, 8111_PCI),
548 .subvendor = PCI_ANY_ID,
549 .subdevice = PCI_ANY_ID,
550 .class = 0,
551 .class_mask = 0,
552 .driver_data = PCI_BRIDGE,
553 },
554 {
555 0,
556 } /* table is NULL-terminated */
557};
558MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
559
560static struct pci_driver amd8111_edac_pci_driver = {
561 .name = "AMD8111_EDAC_PCI",
562 .probe = amd8111_pci_probe,
563 .remove = amd8111_pci_remove,
564 .id_table = amd8111_edac_pci_tbl,
565};
566
567static int __init amd8111_edac_init(void)
568{
569 int val;
570
571 printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n");
572 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
573
574 /* Only POLL mode supported so far */
575 edac_op_state = EDAC_OPSTATE_POLL;
576
577 val = pci_register_driver(&amd8111_edac_dev_driver);
578 val |= pci_register_driver(&amd8111_edac_pci_driver);
579
580 return val;
581}
582
583static void __exit amd8111_edac_exit(void)
584{
585 pci_unregister_driver(&amd8111_edac_pci_driver);
586 pci_unregister_driver(&amd8111_edac_dev_driver);
587}
588
589
590module_init(amd8111_edac_init);
591module_exit(amd8111_edac_exit);
592
593MODULE_LICENSE("GPL");
594MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
595MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h
new file mode 100644
index 000000000000..35794331debc
--- /dev/null
+++ b/drivers/edac/amd8111_edac.h
@@ -0,0 +1,130 @@
1/*
2 * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
3 *
4 * Copyright (c) 2008 Wind River Systems, Inc.
5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * Benjamin Walsh <benjamin.walsh@windriver.com>
8 * Hu Yongqi <yongqi.hu@windriver.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 * See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _AMD8111_EDAC_H_
25#define _AMD8111_EDAC_H_
26
27/************************************************************
28 * PCI Bridge Status and Command Register, DevA:0x04
29 ************************************************************/
30#define REG_PCI_STSCMD 0x04
31enum pci_stscmd_bits {
32 PCI_STSCMD_SSE = BIT(30),
33 PCI_STSCMD_RMA = BIT(29),
34 PCI_STSCMD_RTA = BIT(28),
35 PCI_STSCMD_SERREN = BIT(8),
36 PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE |
37 PCI_STSCMD_RMA |
38 PCI_STSCMD_RTA)
39};
40
41/************************************************************
42 * PCI Bridge Memory Base-Limit Register, DevA:0x1c
43 ************************************************************/
44#define REG_MEM_LIM 0x1c
45enum mem_limit_bits {
46 MEM_LIMIT_DPE = BIT(31),
47 MEM_LIMIT_RSE = BIT(30),
48 MEM_LIMIT_RMA = BIT(29),
49 MEM_LIMIT_RTA = BIT(28),
50 MEM_LIMIT_STA = BIT(27),
51 MEM_LIMIT_MDPE = BIT(24),
52 MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE |
53 MEM_LIMIT_RSE |
54 MEM_LIMIT_RMA |
55 MEM_LIMIT_RTA |
56 MEM_LIMIT_STA |
57 MEM_LIMIT_MDPE)
58};
59
60/************************************************************
61 * HyperTransport Link Control Register, DevA:0xc4
62 ************************************************************/
63#define REG_HT_LINK 0xc4
64enum ht_link_bits {
65 HT_LINK_LKFAIL = BIT(4),
66 HT_LINK_CRCFEN = BIT(1),
67 HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
68};
69
70/************************************************************
71 * PCI Bridge Interrupt and Bridge Control, DevA:0x3c
72 ************************************************************/
73#define REG_PCI_INTBRG_CTRL 0x3c
74enum pci_intbrg_ctrl_bits {
75 PCI_INTBRG_CTRL_DTSERREN = BIT(27),
76 PCI_INTBRG_CTRL_DTSTAT = BIT(26),
77 PCI_INTBRG_CTRL_MARSP = BIT(21),
78 PCI_INTBRG_CTRL_SERREN = BIT(17),
79 PCI_INTBRG_CTRL_PEREN = BIT(16),
80 PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT),
81 PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN |
82 PCI_INTBRG_CTRL_MARSP |
83 PCI_INTBRG_CTRL_SERREN)
84};
85
86/************************************************************
87 * I/O Control 1 Register, DevB:0x40
88 ************************************************************/
89#define REG_IO_CTRL_1 0x40
90enum io_ctrl_1_bits {
91 IO_CTRL_1_NMIONERR = BIT(7),
92 IO_CTRL_1_LPC_ERR = BIT(6),
93 IO_CTRL_1_PW2LPC = BIT(1),
94 IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
95};
96
97/************************************************************
98 * Legacy I/O Space Registers
99 ************************************************************/
100#define REG_AT_COMPAT 0x61
101enum at_compat_bits {
102 AT_COMPAT_SERR = BIT(7),
103 AT_COMPAT_IOCHK = BIT(6),
104 AT_COMPAT_CLRIOCHK = BIT(3),
105 AT_COMPAT_CLRSERR = BIT(2),
106};
107
108struct amd8111_dev_info {
109 u16 err_dev; /* PCI Device ID */
110 struct pci_dev *dev;
111 int edac_idx; /* device index */
112 char *ctl_name;
113 struct edac_device_ctl_info *edac_dev;
114 void (*init)(struct amd8111_dev_info *dev_info);
115 void (*exit)(struct amd8111_dev_info *dev_info);
116 void (*check)(struct edac_device_ctl_info *edac_dev);
117};
118
119struct amd8111_pci_info {
120 u16 err_dev; /* PCI Device ID */
121 struct pci_dev *dev;
122 int edac_idx; /* pci index */
123 const char *ctl_name;
124 struct edac_pci_ctl_info *edac_dev;
125 void (*init)(struct amd8111_pci_info *dev_info);
126 void (*exit)(struct amd8111_pci_info *dev_info);
127 void (*check)(struct edac_pci_ctl_info *edac_dev);
128};
129
130#endif /* _AMD8111_EDAC_H_ */
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
new file mode 100644
index 000000000000..c083b31cac5a
--- /dev/null
+++ b/drivers/edac/amd8131_edac.c
@@ -0,0 +1,379 @@
1/*
2 * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
3 *
4 * Copyright (c) 2008 Wind River Systems, Inc.
5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * Benjamin Walsh <benjamin.walsh@windriver.com>
8 * Hu Yongqi <yongqi.hu@windriver.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 * See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/bitops.h>
29#include <linux/edac.h>
30#include <linux/pci_ids.h>
31
32#include "edac_core.h"
33#include "edac_module.h"
34#include "amd8131_edac.h"
35
36#define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__
37#define AMD8131_EDAC_MOD_STR "amd8131_edac"
38
39/* Wrapper functions for accessing PCI configuration space */
40static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
41{
42 int ret;
43
44 ret = pci_read_config_dword(dev, reg, val32);
45 if (ret != 0)
46 printk(KERN_ERR AMD8131_EDAC_MOD_STR
47 " PCI Access Read Error at 0x%x\n", reg);
48}
49
50static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
51{
52 int ret;
53
54 ret = pci_write_config_dword(dev, reg, val32);
55 if (ret != 0)
56 printk(KERN_ERR AMD8131_EDAC_MOD_STR
57 " PCI Access Write Error at 0x%x\n", reg);
58}
59
60static char * const bridge_str[] = {
61 [NORTH_A] = "NORTH A",
62 [NORTH_B] = "NORTH B",
63 [SOUTH_A] = "SOUTH A",
64 [SOUTH_B] = "SOUTH B",
65 [NO_BRIDGE] = "NO BRIDGE",
66};
67
68/* Support up to two AMD8131 chipsets on a platform */
69static struct amd8131_dev_info amd8131_devices[] = {
70 {
71 .inst = NORTH_A,
72 .devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
73 .ctl_name = "AMD8131_PCIX_NORTH_A",
74 },
75 {
76 .inst = NORTH_B,
77 .devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
78 .ctl_name = "AMD8131_PCIX_NORTH_B",
79 },
80 {
81 .inst = SOUTH_A,
82 .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
83 .ctl_name = "AMD8131_PCIX_SOUTH_A",
84 },
85 {
86 .inst = SOUTH_B,
87 .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
88 .ctl_name = "AMD8131_PCIX_SOUTH_B",
89 },
90 {.inst = NO_BRIDGE,},
91};
92
93static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
94{
95 u32 val32;
96 struct pci_dev *dev = dev_info->dev;
97
98 /* First clear error detection flags */
99 edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
100 if (val32 & MEM_LIMIT_MASK)
101 edac_pci_write_dword(dev, REG_MEM_LIM, val32);
102
103 /* Clear Discard Timer Timedout flag */
104 edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
105 if (val32 & INT_CTLR_DTS)
106 edac_pci_write_dword(dev, REG_INT_CTLR, val32);
107
108 /* Clear CRC Error flag on link side A */
109 edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
110 if (val32 & LNK_CTRL_CRCERR_A)
111 edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
112
113 /* Clear CRC Error flag on link side B */
114 edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
115 if (val32 & LNK_CTRL_CRCERR_B)
116 edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
117
118 /*
119 * Then enable all error detections.
120 *
121 * Setup Discard Timer Sync Flood Enable,
122 * System Error Enable and Parity Error Enable.
123 */
124 edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
125 val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
126 edac_pci_write_dword(dev, REG_INT_CTLR, val32);
127
128 /* Enable overall SERR Error detection */
129 edac_pci_read_dword(dev, REG_STS_CMD, &val32);
130 val32 |= STS_CMD_SERREN;
131 edac_pci_write_dword(dev, REG_STS_CMD, val32);
132
133 /* Setup CRC Flood Enable for link side A */
134 edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
135 val32 |= LNK_CTRL_CRCFEN;
136 edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
137
138 /* Setup CRC Flood Enable for link side B */
139 edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
140 val32 |= LNK_CTRL_CRCFEN;
141 edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
142}
143
144static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
145{
146 u32 val32;
147 struct pci_dev *dev = dev_info->dev;
148
149 /* Disable SERR, PERR and DTSE Error detection */
150 edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
151 val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
152 edac_pci_write_dword(dev, REG_INT_CTLR, val32);
153
154 /* Disable overall System Error detection */
155 edac_pci_read_dword(dev, REG_STS_CMD, &val32);
156 val32 &= ~STS_CMD_SERREN;
157 edac_pci_write_dword(dev, REG_STS_CMD, val32);
158
159 /* Disable CRC Sync Flood on link side A */
160 edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
161 val32 &= ~LNK_CTRL_CRCFEN;
162 edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
163
164 /* Disable CRC Sync Flood on link side B */
165 edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
166 val32 &= ~LNK_CTRL_CRCFEN;
167 edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
168}
169
170static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
171{
172 struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
173 struct pci_dev *dev = dev_info->dev;
174 u32 val32;
175
176 /* Check PCI-X Bridge Memory Base-Limit Register for errors */
177 edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
178 if (val32 & MEM_LIMIT_MASK) {
179 printk(KERN_INFO "Error(s) in mem limit register "
180 "on %s bridge\n", dev_info->ctl_name);
181 printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
182 "RTA: %d, STA: %d, MDPE: %d\n",
183 val32 & MEM_LIMIT_DPE,
184 val32 & MEM_LIMIT_RSE,
185 val32 & MEM_LIMIT_RMA,
186 val32 & MEM_LIMIT_RTA,
187 val32 & MEM_LIMIT_STA,
188 val32 & MEM_LIMIT_MDPE);
189
190 val32 |= MEM_LIMIT_MASK;
191 edac_pci_write_dword(dev, REG_MEM_LIM, val32);
192
193 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
194 }
195
196 /* Check if Discard Timer timed out */
197 edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
198 if (val32 & INT_CTLR_DTS) {
199 printk(KERN_INFO "Error(s) in interrupt and control register "
200 "on %s bridge\n", dev_info->ctl_name);
201 printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
202
203 val32 |= INT_CTLR_DTS;
204 edac_pci_write_dword(dev, REG_INT_CTLR, val32);
205
206 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
207 }
208
209 /* Check if CRC error happens on link side A */
210 edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
211 if (val32 & LNK_CTRL_CRCERR_A) {
212 printk(KERN_INFO "Error(s) in link conf and control register "
213 "on %s bridge\n", dev_info->ctl_name);
214 printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
215
216 val32 |= LNK_CTRL_CRCERR_A;
217 edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
218
219 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
220 }
221
222 /* Check if CRC error happens on link side B */
223 edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
224 if (val32 & LNK_CTRL_CRCERR_B) {
225 printk(KERN_INFO "Error(s) in link conf and control register "
226 "on %s bridge\n", dev_info->ctl_name);
227 printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
228
229 val32 |= LNK_CTRL_CRCERR_B;
230 edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
231
232 edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
233 }
234}
235
236static struct amd8131_info amd8131_chipset = {
237 .err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
238 .devices = amd8131_devices,
239 .init = amd8131_pcix_init,
240 .exit = amd8131_pcix_exit,
241 .check = amd8131_pcix_check,
242};
243
244/*
245 * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
246 * so amd8131_probe() would be called by kernel 4 times, with different
247 * address of pci_dev for each of them each time.
248 */
249static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
250{
251 struct amd8131_dev_info *dev_info;
252
253 for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
254 dev_info++)
255 if (dev_info->devfn == dev->devfn)
256 break;
257
258 if (dev_info->inst == NO_BRIDGE) /* should never happen */
259 return -ENODEV;
260
261 /*
262 * We can't call pci_get_device() as we are used to do because
263 * there are 4 of them but pci_dev_get() instead.
264 */
265 dev_info->dev = pci_dev_get(dev);
266
267 if (pci_enable_device(dev_info->dev)) {
268 pci_dev_put(dev_info->dev);
269 printk(KERN_ERR "failed to enable:"
270 "vendor %x, device %x, devfn %x, name %s\n",
271 PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
272 dev_info->devfn, dev_info->ctl_name);
273 return -ENODEV;
274 }
275
276 /*
277 * we do not allocate extra private structure for
278 * edac_pci_ctl_info, but make use of existing
279 * one instead.
280 */
281 dev_info->edac_idx = edac_pci_alloc_index();
282 dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
283 if (!dev_info->edac_dev)
284 return -ENOMEM;
285
286 dev_info->edac_dev->pvt_info = dev_info;
287 dev_info->edac_dev->dev = &dev_info->dev->dev;
288 dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
289 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
290 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id;
291
292 if (edac_op_state == EDAC_OPSTATE_POLL)
293 dev_info->edac_dev->edac_check = amd8131_chipset.check;
294
295 if (amd8131_chipset.init)
296 amd8131_chipset.init(dev_info);
297
298 if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
299 printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
300 dev_info->ctl_name);
301 edac_pci_free_ctl_info(dev_info->edac_dev);
302 return -ENODEV;
303 }
304
305 printk(KERN_INFO "added one device on AMD8131 "
306 "vendor %x, device %x, devfn %x, name %s\n",
307 PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
308 dev_info->devfn, dev_info->ctl_name);
309
310 return 0;
311}
312
313static void amd8131_remove(struct pci_dev *dev)
314{
315 struct amd8131_dev_info *dev_info;
316
317 for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
318 dev_info++)
319 if (dev_info->devfn == dev->devfn)
320 break;
321
322 if (dev_info->inst == NO_BRIDGE) /* should never happen */
323 return;
324
325 if (dev_info->edac_dev) {
326 edac_pci_del_device(dev_info->edac_dev->dev);
327 edac_pci_free_ctl_info(dev_info->edac_dev);
328 }
329
330 if (amd8131_chipset.exit)
331 amd8131_chipset.exit(dev_info);
332
333 pci_dev_put(dev_info->dev);
334}
335
336static const struct pci_device_id amd8131_edac_pci_tbl[] = {
337 {
338 PCI_VEND_DEV(AMD, 8131_BRIDGE),
339 .subvendor = PCI_ANY_ID,
340 .subdevice = PCI_ANY_ID,
341 .class = 0,
342 .class_mask = 0,
343 .driver_data = 0,
344 },
345 {
346 0,
347 } /* table is NULL-terminated */
348};
349MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
350
351static struct pci_driver amd8131_edac_driver = {
352 .name = AMD8131_EDAC_MOD_STR,
353 .probe = amd8131_probe,
354 .remove = amd8131_remove,
355 .id_table = amd8131_edac_pci_tbl,
356};
357
358static int __init amd8131_edac_init(void)
359{
360 printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
361 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
362
363 /* Only POLL mode supported so far */
364 edac_op_state = EDAC_OPSTATE_POLL;
365
366 return pci_register_driver(&amd8131_edac_driver);
367}
368
369static void __exit amd8131_edac_exit(void)
370{
371 pci_unregister_driver(&amd8131_edac_driver);
372}
373
374module_init(amd8131_edac_init);
375module_exit(amd8131_edac_exit);
376
377MODULE_LICENSE("GPL");
378MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
379MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
new file mode 100644
index 000000000000..60e0d1c72dee
--- /dev/null
+++ b/drivers/edac/amd8131_edac.h
@@ -0,0 +1,119 @@
1/*
2 * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
3 *
4 * Copyright (c) 2008 Wind River Systems, Inc.
5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * Benjamin Walsh <benjamin.walsh@windriver.com>
8 * Hu Yongqi <yongqi.hu@windriver.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 * See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _AMD8131_EDAC_H_
25#define _AMD8131_EDAC_H_
26
27#define DEVFN_PCIX_BRIDGE_NORTH_A 8
28#define DEVFN_PCIX_BRIDGE_NORTH_B 16
29#define DEVFN_PCIX_BRIDGE_SOUTH_A 24
30#define DEVFN_PCIX_BRIDGE_SOUTH_B 32
31
32/************************************************************
33 * PCI-X Bridge Status and Command Register, DevA:0x04
34 ************************************************************/
35#define REG_STS_CMD 0x04
36enum sts_cmd_bits {
37 STS_CMD_SSE = BIT(30),
38 STS_CMD_SERREN = BIT(8)
39};
40
41/************************************************************
42 * PCI-X Bridge Interrupt and Bridge Control Register,
43 ************************************************************/
44#define REG_INT_CTLR 0x3c
45enum int_ctlr_bits {
46 INT_CTLR_DTSE = BIT(27),
47 INT_CTLR_DTS = BIT(26),
48 INT_CTLR_SERR = BIT(17),
49 INT_CTLR_PERR = BIT(16)
50};
51
52/************************************************************
53 * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
54 ************************************************************/
55#define REG_MEM_LIM 0x1c
56enum mem_limit_bits {
57 MEM_LIMIT_DPE = BIT(31),
58 MEM_LIMIT_RSE = BIT(30),
59 MEM_LIMIT_RMA = BIT(29),
60 MEM_LIMIT_RTA = BIT(28),
61 MEM_LIMIT_STA = BIT(27),
62 MEM_LIMIT_MDPE = BIT(24),
63 MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
64 MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
65};
66
67/************************************************************
68 * Link Configuration And Control Register, side A
69 ************************************************************/
70#define REG_LNK_CTRL_A 0xc4
71
72/************************************************************
73 * Link Configuration And Control Register, side B
74 ************************************************************/
75#define REG_LNK_CTRL_B 0xc8
76
77enum lnk_ctrl_bits {
78 LNK_CTRL_CRCERR_A = BIT(9),
79 LNK_CTRL_CRCERR_B = BIT(8),
80 LNK_CTRL_CRCFEN = BIT(1)
81};
82
83enum pcix_bridge_inst {
84 NORTH_A = 0,
85 NORTH_B = 1,
86 SOUTH_A = 2,
87 SOUTH_B = 3,
88 NO_BRIDGE = 4
89};
90
91struct amd8131_dev_info {
92 int devfn;
93 enum pcix_bridge_inst inst;
94 struct pci_dev *dev;
95 int edac_idx; /* pci device index */
96 char *ctl_name;
97 struct edac_pci_ctl_info *edac_dev;
98};
99
100/*
101 * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
102 * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
103 * four PCIX Bridges on ATCA-6101 altogether.
104 *
105 * These PCIX Bridges share the same PCI Device ID and are all of
106 * Function Zero, they could be discrimated by their pci_dev->devfn.
107 * They share the same set of init/check/exit methods, and their
108 * private structures are collected in the devices[] array.
109 */
110struct amd8131_info {
111 u16 err_dev; /* PCI Device ID for AMD8131 APIC*/
112 struct amd8131_dev_info *devices;
113 void (*init)(struct amd8131_dev_info *dev_info);
114 void (*exit)(struct amd8131_dev_info *dev_info);
115 void (*check)(struct edac_pci_ctl_info *edac_dev);
116};
117
118#endif /* _AMD8131_EDAC_H_ */
119
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 4b55ec607a88..28f2c3f959b5 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -49,6 +49,10 @@
49#define edac_printk(level, prefix, fmt, arg...) \ 49#define edac_printk(level, prefix, fmt, arg...) \
50 printk(level "EDAC " prefix ": " fmt, ##arg) 50 printk(level "EDAC " prefix ": " fmt, ##arg)
51 51
52#define edac_printk_verbose(level, prefix, fmt, arg...) \
53 printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt, \
54 __FILE__, __LINE__, ##arg)
55
52#define edac_mc_printk(mci, level, fmt, arg...) \ 56#define edac_mc_printk(mci, level, fmt, arg...) \
53 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) 57 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
54 58
@@ -71,11 +75,20 @@
71#ifdef CONFIG_EDAC_DEBUG 75#ifdef CONFIG_EDAC_DEBUG
72extern int edac_debug_level; 76extern int edac_debug_level;
73 77
78#ifndef CONFIG_EDAC_DEBUG_VERBOSE
74#define edac_debug_printk(level, fmt, arg...) \ 79#define edac_debug_printk(level, fmt, arg...) \
75 do { \ 80 do { \
76 if (level <= edac_debug_level) \ 81 if (level <= edac_debug_level) \
77 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ 82 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
78 } while(0) 83 } while (0)
84#else /* CONFIG_EDAC_DEBUG_VERBOSE */
85#define edac_debug_printk(level, fmt, arg...) \
86 do { \
87 if (level <= edac_debug_level) \
88 edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \
89 ##arg); \
90 } while (0)
91#endif
79 92
80#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) 93#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
81#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) 94#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
@@ -831,6 +844,7 @@ extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
831extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, 844extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
832 unsigned long value); 845 unsigned long value);
833 846
847extern int edac_pci_alloc_index(void);
834extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx); 848extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
835extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev); 849extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
836 850
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 5d3c8083a40e..5b150aea703a 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -30,6 +30,7 @@
30 30
31static DEFINE_MUTEX(edac_pci_ctls_mutex); 31static DEFINE_MUTEX(edac_pci_ctls_mutex);
32static LIST_HEAD(edac_pci_list); 32static LIST_HEAD(edac_pci_list);
33static atomic_t pci_indexes = ATOMIC_INIT(0);
33 34
34/* 35/*
35 * edac_pci_alloc_ctl_info 36 * edac_pci_alloc_ctl_info
@@ -318,6 +319,19 @@ void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
318EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period); 319EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
319 320
320/* 321/*
322 * edac_pci_alloc_index: Allocate a unique PCI index number
323 *
324 * Return:
325 * allocated index number
326 *
327 */
328int edac_pci_alloc_index(void)
329{
330 return atomic_inc_return(&pci_indexes) - 1;
331}
332EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
333
334/*
321 * edac_pci_add_device: Insert the 'edac_dev' structure into the 335 * edac_pci_add_device: Insert the 'edac_dev' structure into the
322 * edac_pci global list and create sysfs entries associated with 336 * edac_pci global list and create sysfs entries associated with
323 * edac_pci structure. 337 * edac_pci structure.
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
new file mode 100644
index 000000000000..11f2172aa1e6
--- /dev/null
+++ b/drivers/edac/ppc4xx_edac.c
@@ -0,0 +1,1448 @@
1/*
2 * Copyright (c) 2008 Nuovation System Designs, LLC
3 * Grant Erickson <gerickson@nuovations.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; version 2 of the
8 * License.
9 *
10 */
11
12#include <linux/edac.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/of_platform.h>
20#include <linux/types.h>
21
22#include <asm/dcr.h>
23
24#include "edac_core.h"
25#include "ppc4xx_edac.h"
26
27/*
28 * This file implements a driver for monitoring and handling events
29 * associated with the IMB DDR2 ECC controller found in the AMCC/IBM
30 * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX.
31 *
32 * As realized in the 405EX[r], this controller features:
33 *
34 * - Support for registered- and non-registered DDR1 and DDR2 memory.
35 * - 32-bit or 16-bit memory interface with optional ECC.
36 *
37 * o ECC support includes:
38 *
39 * - 4-bit SEC/DED
40 * - Aligned-nibble error detect
41 * - Bypass mode
42 *
43 * - Two (2) memory banks/ranks.
44 * - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per
45 * bank/rank in 16-bit mode.
46 *
47 * As realized in the 440SP and 440SPe, this controller changes/adds:
48 *
49 * - 64-bit or 32-bit memory interface with optional ECC.
50 *
51 * o ECC support includes:
52 *
53 * - 8-bit SEC/DED
54 * - Aligned-nibble error detect
55 * - Bypass mode
56 *
57 * - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB
58 * per bank/rank in 32-bit mode.
59 *
60 * As realized in the 460EX and 460GT, this controller changes/adds:
61 *
62 * - 64-bit or 32-bit memory interface with optional ECC.
63 *
64 * o ECC support includes:
65 *
66 * - 8-bit SEC/DED
67 * - Aligned-nibble error detect
68 * - Bypass mode
69 *
70 * - Four (4) memory banks/ranks.
71 * - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB
72 * per bank/rank in 32-bit mode.
73 *
74 * At present, this driver has ONLY been tested against the controller
75 * realization in the 405EX[r] on the AMCC Kilauea and Haleakala
76 * boards (256 MiB w/o ECC memory soldered onto the board) and a
77 * proprietary board based on those designs (128 MiB ECC memory, also
78 * soldered onto the board).
79 *
80 * Dynamic feature detection and handling needs to be added for the
81 * other realizations of this controller listed above.
82 *
83 * Eventually, this driver will likely be adapted to the above variant
84 * realizations of this controller as well as broken apart to handle
85 * the other known ECC-capable controllers prevalent in other 4xx
86 * processors:
87 *
88 * - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx"
89 * - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr"
90 * - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2"
91 *
92 * For this controller, unfortunately, correctable errors report
93 * nothing more than the beat/cycle and byte/lane the correction
94 * occurred on and the check bit group that covered the error.
95 *
96 * In contrast, uncorrectable errors also report the failing address,
97 * the bus master and the transaction direction (i.e. read or write)
98 *
99 * Regardless of whether the error is a CE or a UE, we report the
100 * following pieces of information in the driver-unique message to the
101 * EDAC subsystem:
102 *
103 * - Device tree path
104 * - Bank(s)
105 * - Check bit error group
106 * - Beat(s)/lane(s)
107 */
108
109/* Preprocessor Definitions */
110
111#define EDAC_OPSTATE_INT_STR "interrupt"
112#define EDAC_OPSTATE_POLL_STR "polled"
113#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
114
115#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
116#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__
117
118#define PPC4XX_EDAC_MESSAGE_SIZE 256
119
120/*
121 * Kernel logging without an EDAC instance
122 */
123#define ppc4xx_edac_printk(level, fmt, arg...) \
124 edac_printk(level, "PPC4xx MC", fmt, ##arg)
125
126/*
127 * Kernel logging with an EDAC instance
128 */
129#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \
130 edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg)
131
132/*
133 * Macros to convert bank configuration size enumerations into MiB and
134 * page values.
135 */
136#define SDRAM_MBCF_SZ_MiB_MIN 4
137#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \
138 << (SDRAM_MBCF_SZ_DECODE(n)))
139#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \
140 << (20 - PAGE_SHIFT + \
141 SDRAM_MBCF_SZ_DECODE(n)))
142
143/*
144 * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
145 * indirectly acccessed and have a base and length defined by the
146 * device tree. The base can be anything; however, we expect the
147 * length to be precisely two registers, the first for the address
148 * window and the second for the data window.
149 */
150#define SDRAM_DCR_RESOURCE_LEN 2
151#define SDRAM_DCR_ADDR_OFFSET 0
152#define SDRAM_DCR_DATA_OFFSET 1
153
154/*
155 * Device tree interrupt indices
156 */
157#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */
158#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */
159
160/* Type Definitions */
161
162/*
163 * PPC4xx SDRAM memory controller private instance data
164 */
165struct ppc4xx_edac_pdata {
166 dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */
167 struct {
168 int sec; /* Single-bit correctable error IRQ assigned */
169 int ded; /* Double-bit detectable error IRQ assigned */
170 } irqs;
171};
172
173/*
174 * Various status data gathered and manipulated when checking and
175 * reporting ECC status.
176 */
177struct ppc4xx_ecc_status {
178 u32 ecces;
179 u32 besr;
180 u32 bearh;
181 u32 bearl;
182 u32 wmirq;
183};
184
185/* Function Prototypes */
186
187static int ppc4xx_edac_probe(struct of_device *device,
188 const struct of_device_id *device_id);
189static int ppc4xx_edac_remove(struct of_device *device);
190
191/* Global Variables */
192
193/*
194 * Device tree node type and compatible tuples this driver can match
195 * on.
196 */
197static struct of_device_id ppc4xx_edac_match[] = {
198 {
199 .compatible = "ibm,sdram-4xx-ddr2"
200 },
201 { }
202};
203
204static struct of_platform_driver ppc4xx_edac_driver = {
205 .match_table = ppc4xx_edac_match,
206 .probe = ppc4xx_edac_probe,
207 .remove = ppc4xx_edac_remove,
208 .driver = {
209 .owner = THIS_MODULE,
210 .name = PPC4XX_EDAC_MODULE_NAME
211 }
212};
213
214/*
215 * TODO: The row and channel parameters likely need to be dynamically
216 * set based on the aforementioned variant controller realizations.
217 */
218static const unsigned ppc4xx_edac_nr_csrows = 2;
219static const unsigned ppc4xx_edac_nr_chans = 1;
220
221/*
222 * Strings associated with PLB master IDs capable of being posted in
223 * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors.
224 */
225static const char * const ppc4xx_plb_masters[9] = {
226 [SDRAM_PLB_M0ID_ICU] = "ICU",
227 [SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0",
228 [SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1",
229 [SDRAM_PLB_M0ID_DMA] = "DMA",
230 [SDRAM_PLB_M0ID_DCU] = "DCU",
231 [SDRAM_PLB_M0ID_OPB] = "OPB",
232 [SDRAM_PLB_M0ID_MAL] = "MAL",
233 [SDRAM_PLB_M0ID_SEC] = "SEC",
234 [SDRAM_PLB_M0ID_AHB] = "AHB"
235};
236
237/**
238 * mfsdram - read and return controller register data
239 * @dcr_host: A pointer to the DCR mapping.
240 * @idcr_n: The indirect DCR register to read.
241 *
242 * This routine reads and returns the data associated with the
243 * controller's specified indirect DCR register.
244 *
245 * Returns the read data.
246 */
247static inline u32
248mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n)
249{
250 return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
251 dcr_host->base + SDRAM_DCR_DATA_OFFSET,
252 idcr_n);
253}
254
255/**
256 * mtsdram - write controller register data
257 * @dcr_host: A pointer to the DCR mapping.
258 * @idcr_n: The indirect DCR register to write.
259 * @value: The data to write.
260 *
261 * This routine writes the provided data to the controller's specified
262 * indirect DCR register.
263 */
264static inline void
265mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value)
266{
267 return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET,
268 dcr_host->base + SDRAM_DCR_DATA_OFFSET,
269 idcr_n,
270 value);
271}
272
273/**
274 * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error
275 * @status: A pointer to the ECC status structure to check for an
276 * ECC bank error.
277 * @bank: The bank to check for an ECC error.
278 *
279 * This routine determines whether the specified bank has an ECC
280 * error.
281 *
282 * Returns true if the specified bank has an ECC error; otherwise,
283 * false.
284 */
285static bool
286ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status,
287 unsigned int bank)
288{
289 switch (bank) {
290 case 0:
291 return status->ecces & SDRAM_ECCES_BK0ER;
292 case 1:
293 return status->ecces & SDRAM_ECCES_BK1ER;
294 default:
295 return false;
296 }
297}
298
299/**
300 * ppc4xx_edac_generate_bank_message - generate interpretted bank status message
301 * @mci: A pointer to the EDAC memory controller instance associated
302 * with the bank message being generated.
303 * @status: A pointer to the ECC status structure to generate the
304 * message from.
305 * @buffer: A pointer to the buffer in which to generate the
306 * message.
307 * @size: The size, in bytes, of space available in buffer.
308 *
309 * This routine generates to the provided buffer the portion of the
310 * driver-unique report message associated with the ECCESS[BKNER]
311 * field of the specified ECC status.
312 *
313 * Returns the number of characters generated on success; otherwise, <
314 * 0 on error.
315 */
316static int
317ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci,
318 const struct ppc4xx_ecc_status *status,
319 char *buffer,
320 size_t size)
321{
322 int n, total = 0;
323 unsigned int row, rows;
324
325 n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name);
326
327 if (n < 0 || n >= size)
328 goto fail;
329
330 buffer += n;
331 size -= n;
332 total += n;
333
334 for (rows = 0, row = 0; row < mci->nr_csrows; row++) {
335 if (ppc4xx_edac_check_bank_error(status, row)) {
336 n = snprintf(buffer, size, "%s%u",
337 (rows++ ? ", " : ""), row);
338
339 if (n < 0 || n >= size)
340 goto fail;
341
342 buffer += n;
343 size -= n;
344 total += n;
345 }
346 }
347
348 n = snprintf(buffer, size, "%s; ", rows ? "" : "None");
349
350 if (n < 0 || n >= size)
351 goto fail;
352
353 buffer += n;
354 size -= n;
355 total += n;
356
357 fail:
358 return total;
359}
360
361/**
362 * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message
363 * @mci: A pointer to the EDAC memory controller instance associated
364 * with the checkbit message being generated.
365 * @status: A pointer to the ECC status structure to generate the
366 * message from.
367 * @buffer: A pointer to the buffer in which to generate the
368 * message.
369 * @size: The size, in bytes, of space available in buffer.
370 *
371 * This routine generates to the provided buffer the portion of the
372 * driver-unique report message associated with the ECCESS[CKBER]
373 * field of the specified ECC status.
374 *
375 * Returns the number of characters generated on success; otherwise, <
376 * 0 on error.
377 */
378static int
379ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci,
380 const struct ppc4xx_ecc_status *status,
381 char *buffer,
382 size_t size)
383{
384 const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
385 const char *ckber = NULL;
386
387 switch (status->ecces & SDRAM_ECCES_CKBER_MASK) {
388 case SDRAM_ECCES_CKBER_NONE:
389 ckber = "None";
390 break;
391 case SDRAM_ECCES_CKBER_32_ECC_0_3:
392 ckber = "ECC0:3";
393 break;
394 case SDRAM_ECCES_CKBER_32_ECC_4_8:
395 switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) &
396 SDRAM_MCOPT1_WDTH_MASK) {
397 case SDRAM_MCOPT1_WDTH_16:
398 ckber = "ECC0:3";
399 break;
400 case SDRAM_MCOPT1_WDTH_32:
401 ckber = "ECC4:8";
402 break;
403 default:
404 ckber = "Unknown";
405 break;
406 }
407 break;
408 case SDRAM_ECCES_CKBER_32_ECC_0_8:
409 ckber = "ECC0:8";
410 break;
411 default:
412 ckber = "Unknown";
413 break;
414 }
415
416 return snprintf(buffer, size, "Checkbit Error: %s", ckber);
417}
418
419/**
420 * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message
421 * @mci: A pointer to the EDAC memory controller instance associated
422 * with the byte lane message being generated.
423 * @status: A pointer to the ECC status structure to generate the
424 * message from.
425 * @buffer: A pointer to the buffer in which to generate the
426 * message.
427 * @size: The size, in bytes, of space available in buffer.
428 *
429 * This routine generates to the provided buffer the portion of the
430 * driver-unique report message associated with the ECCESS[BNCE]
431 * field of the specified ECC status.
432 *
433 * Returns the number of characters generated on success; otherwise, <
434 * 0 on error.
435 */
436static int
437ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci,
438 const struct ppc4xx_ecc_status *status,
439 char *buffer,
440 size_t size)
441{
442 int n, total = 0;
443 unsigned int lane, lanes;
444 const unsigned int first_lane = 0;
445 const unsigned int lane_count = 16;
446
447 n = snprintf(buffer, size, "; Byte Lane Errors: ");
448
449 if (n < 0 || n >= size)
450 goto fail;
451
452 buffer += n;
453 size -= n;
454 total += n;
455
456 for (lanes = 0, lane = first_lane; lane < lane_count; lane++) {
457 if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) {
458 n = snprintf(buffer, size,
459 "%s%u",
460 (lanes++ ? ", " : ""), lane);
461
462 if (n < 0 || n >= size)
463 goto fail;
464
465 buffer += n;
466 size -= n;
467 total += n;
468 }
469 }
470
471 n = snprintf(buffer, size, "%s; ", lanes ? "" : "None");
472
473 if (n < 0 || n >= size)
474 goto fail;
475
476 buffer += n;
477 size -= n;
478 total += n;
479
480 fail:
481 return total;
482}
483
484/**
485 * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message
486 * @mci: A pointer to the EDAC memory controller instance associated
487 * with the ECCES message being generated.
488 * @status: A pointer to the ECC status structure to generate the
489 * message from.
490 * @buffer: A pointer to the buffer in which to generate the
491 * message.
492 * @size: The size, in bytes, of space available in buffer.
493 *
494 * This routine generates to the provided buffer the portion of the
495 * driver-unique report message associated with the ECCESS register of
496 * the specified ECC status.
497 *
498 * Returns the number of characters generated on success; otherwise, <
499 * 0 on error.
500 */
501static int
502ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci,
503 const struct ppc4xx_ecc_status *status,
504 char *buffer,
505 size_t size)
506{
507 int n, total = 0;
508
509 n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size);
510
511 if (n < 0 || n >= size)
512 goto fail;
513
514 buffer += n;
515 size -= n;
516 total += n;
517
518 n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size);
519
520 if (n < 0 || n >= size)
521 goto fail;
522
523 buffer += n;
524 size -= n;
525 total += n;
526
527 n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size);
528
529 if (n < 0 || n >= size)
530 goto fail;
531
532 buffer += n;
533 size -= n;
534 total += n;
535
536 fail:
537 return total;
538}
539
540/**
541 * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message
542 * @mci: A pointer to the EDAC memory controller instance associated
543 * with the PLB message being generated.
544 * @status: A pointer to the ECC status structure to generate the
545 * message from.
546 * @buffer: A pointer to the buffer in which to generate the
547 * message.
548 * @size: The size, in bytes, of space available in buffer.
549 *
550 * This routine generates to the provided buffer the portion of the
551 * driver-unique report message associated with the PLB-related BESR
552 * and/or WMIRQ registers of the specified ECC status.
553 *
554 * Returns the number of characters generated on success; otherwise, <
555 * 0 on error.
556 */
557static int
558ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci,
559 const struct ppc4xx_ecc_status *status,
560 char *buffer,
561 size_t size)
562{
563 unsigned int master;
564 bool read;
565
566 if ((status->besr & SDRAM_BESR_MASK) == 0)
567 return 0;
568
569 if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE)
570 return 0;
571
572 read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ);
573
574 master = SDRAM_BESR_M0ID_DECODE(status->besr);
575
576 return snprintf(buffer, size,
577 "%s error w/ PLB master %u \"%s\"; ",
578 (read ? "Read" : "Write"),
579 master,
580 (((master >= SDRAM_PLB_M0ID_FIRST) &&
581 (master <= SDRAM_PLB_M0ID_LAST)) ?
582 ppc4xx_plb_masters[master] : "UNKNOWN"));
583}
584
585/**
586 * ppc4xx_edac_generate_message - generate interpretted status message
587 * @mci: A pointer to the EDAC memory controller instance associated
588 * with the driver-unique message being generated.
589 * @status: A pointer to the ECC status structure to generate the
590 * message from.
591 * @buffer: A pointer to the buffer in which to generate the
592 * message.
593 * @size: The size, in bytes, of space available in buffer.
594 *
595 * This routine generates to the provided buffer the driver-unique
596 * EDAC report message from the specified ECC status.
597 */
598static void
599ppc4xx_edac_generate_message(const struct mem_ctl_info *mci,
600 const struct ppc4xx_ecc_status *status,
601 char *buffer,
602 size_t size)
603{
604 int n;
605
606 if (buffer == NULL || size == 0)
607 return;
608
609 n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size);
610
611 if (n < 0 || n >= size)
612 return;
613
614 buffer += n;
615 size -= n;
616
617 ppc4xx_edac_generate_plb_message(mci, status, buffer, size);
618}
619
620#ifdef DEBUG
621/**
622 * ppc4xx_ecc_dump_status - dump controller ECC status registers
623 * @mci: A pointer to the EDAC memory controller instance
624 * associated with the status being dumped.
625 * @status: A pointer to the ECC status structure to generate the
626 * dump from.
627 *
628 * This routine dumps to the kernel log buffer the raw and
629 * interpretted specified ECC status.
630 */
631static void
632ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci,
633 const struct ppc4xx_ecc_status *status)
634{
635 char message[PPC4XX_EDAC_MESSAGE_SIZE];
636
637 ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
638
639 ppc4xx_edac_mc_printk(KERN_INFO, mci,
640 "\n"
641 "\tECCES: 0x%08x\n"
642 "\tWMIRQ: 0x%08x\n"
643 "\tBESR: 0x%08x\n"
644 "\tBEAR: 0x%08x%08x\n"
645 "\t%s\n",
646 status->ecces,
647 status->wmirq,
648 status->besr,
649 status->bearh,
650 status->bearl,
651 message);
652}
653#endif /* DEBUG */
654
655/**
656 * ppc4xx_ecc_get_status - get controller ECC status
657 * @mci: A pointer to the EDAC memory controller instance
658 * associated with the status being retrieved.
659 * @status: A pointer to the ECC status structure to populate the
660 * ECC status with.
661 *
662 * This routine reads and masks, as appropriate, all the relevant
663 * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors.
664 * While we read all of them, for correctable errors, we only expect
665 * to deal with ECCES. For uncorrectable errors, we expect to deal
666 * with all of them.
667 */
668static void
669ppc4xx_ecc_get_status(const struct mem_ctl_info *mci,
670 struct ppc4xx_ecc_status *status)
671{
672 const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
673 const dcr_host_t *dcr_host = &pdata->dcr_host;
674
675 status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK;
676 status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK;
677 status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK;
678 status->bearl = mfsdram(dcr_host, SDRAM_BEARL);
679 status->bearh = mfsdram(dcr_host, SDRAM_BEARH);
680}
681
682/**
683 * ppc4xx_ecc_clear_status - clear controller ECC status
684 * @mci: A pointer to the EDAC memory controller instance
685 * associated with the status being cleared.
686 * @status: A pointer to the ECC status structure containing the
687 * values to write to clear the ECC status.
688 *
689 * This routine clears--by writing the masked (as appropriate) status
690 * values back to--the status registers that deal with
691 * ibm,sdram-4xx-ddr2 ECC errors.
692 */
693static void
694ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci,
695 const struct ppc4xx_ecc_status *status)
696{
697 const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
698 const dcr_host_t *dcr_host = &pdata->dcr_host;
699
700 mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK);
701 mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK);
702 mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK);
703 mtsdram(dcr_host, SDRAM_BEARL, 0);
704 mtsdram(dcr_host, SDRAM_BEARH, 0);
705}
706
707/**
708 * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE)
709 * @mci: A pointer to the EDAC memory controller instance
710 * associated with the correctable error being handled and reported.
711 * @status: A pointer to the ECC status structure associated with
712 * the correctable error being handled and reported.
713 *
714 * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
715 * correctable error. Per the aforementioned discussion, there's not
716 * enough status available to use the full EDAC correctable error
717 * interface, so we just pass driver-unique message to the "no info"
718 * interface.
719 */
720static void
721ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
722 const struct ppc4xx_ecc_status *status)
723{
724 int row;
725 char message[PPC4XX_EDAC_MESSAGE_SIZE];
726
727 ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
728
729 for (row = 0; row < mci->nr_csrows; row++)
730 if (ppc4xx_edac_check_bank_error(status, row))
731 edac_mc_handle_ce_no_info(mci, message);
732}
733
734/**
735 * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE)
736 * @mci: A pointer to the EDAC memory controller instance
737 * associated with the uncorrectable error being handled and
738 * reported.
739 * @status: A pointer to the ECC status structure associated with
740 * the uncorrectable error being handled and reported.
741 *
742 * This routine handles an ibm,sdram-4xx-ddr2 controller ECC
743 * uncorrectable error.
744 */
745static void
746ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
747 const struct ppc4xx_ecc_status *status)
748{
749 const u64 bear = ((u64)status->bearh << 32 | status->bearl);
750 const unsigned long page = bear >> PAGE_SHIFT;
751 const unsigned long offset = bear & ~PAGE_MASK;
752 int row;
753 char message[PPC4XX_EDAC_MESSAGE_SIZE];
754
755 ppc4xx_edac_generate_message(mci, status, message, sizeof(message));
756
757 for (row = 0; row < mci->nr_csrows; row++)
758 if (ppc4xx_edac_check_bank_error(status, row))
759 edac_mc_handle_ue(mci, page, offset, row, message);
760}
761
762/**
763 * ppc4xx_edac_check - check controller for ECC errors
764 * @mci: A pointer to the EDAC memory controller instance
765 * associated with the ibm,sdram-4xx-ddr2 controller being
766 * checked.
767 *
768 * This routine is used to check and post ECC errors and is called by
769 * both the EDAC polling thread and this driver's CE and UE interrupt
770 * handler.
771 */
772static void
773ppc4xx_edac_check(struct mem_ctl_info *mci)
774{
775#ifdef DEBUG
776 static unsigned int count;
777#endif
778 struct ppc4xx_ecc_status status;
779
780 ppc4xx_ecc_get_status(mci, &status);
781
782#ifdef DEBUG
783 if (count++ % 30 == 0)
784 ppc4xx_ecc_dump_status(mci, &status);
785#endif
786
787 if (status.ecces & SDRAM_ECCES_UE)
788 ppc4xx_edac_handle_ue(mci, &status);
789
790 if (status.ecces & SDRAM_ECCES_CE)
791 ppc4xx_edac_handle_ce(mci, &status);
792
793 ppc4xx_ecc_clear_status(mci, &status);
794}
795
796/**
797 * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine
798 * @irq: The virtual interrupt number being serviced.
799 * @dev_id: A pointer to the EDAC memory controller instance
800 * associated with the interrupt being handled.
801 *
802 * This routine implements the interrupt handler for both correctable
803 * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2
804 * controller. It simply calls through to the same routine used during
805 * polling to check, report and clear the ECC status.
806 *
807 * Unconditionally returns IRQ_HANDLED.
808 */
809static irqreturn_t
810ppc4xx_edac_isr(int irq, void *dev_id)
811{
812 struct mem_ctl_info *mci = dev_id;
813
814 ppc4xx_edac_check(mci);
815
816 return IRQ_HANDLED;
817}
818
819/**
820 * ppc4xx_edac_get_dtype - return the controller memory width
821 * @mcopt1: The 32-bit Memory Controller Option 1 register value
822 * currently set for the controller, from which the width
823 * is derived.
824 *
825 * This routine returns the EDAC device type width appropriate for the
826 * current controller configuration.
827 *
828 * TODO: This needs to be conditioned dynamically through feature
829 * flags or some such when other controller variants are supported as
830 * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the
831 * 16- and 64-bit field definition/value/enumeration (b1) overloaded
832 * among them.
833 *
834 * Returns a device type width enumeration.
835 */
836static enum dev_type __devinit
837ppc4xx_edac_get_dtype(u32 mcopt1)
838{
839 switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
840 case SDRAM_MCOPT1_WDTH_16:
841 return DEV_X2;
842 case SDRAM_MCOPT1_WDTH_32:
843 return DEV_X4;
844 default:
845 return DEV_UNKNOWN;
846 }
847}
848
849/**
850 * ppc4xx_edac_get_mtype - return controller memory type
851 * @mcopt1: The 32-bit Memory Controller Option 1 register value
852 * currently set for the controller, from which the memory type
853 * is derived.
854 *
855 * This routine returns the EDAC memory type appropriate for the
856 * current controller configuration.
857 *
858 * Returns a memory type enumeration.
859 */
860static enum mem_type __devinit
861ppc4xx_edac_get_mtype(u32 mcopt1)
862{
863 bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
864
865 switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) {
866 case SDRAM_MCOPT1_DDR2_TYPE:
867 return rden ? MEM_RDDR2 : MEM_DDR2;
868 case SDRAM_MCOPT1_DDR1_TYPE:
869 return rden ? MEM_RDDR : MEM_DDR;
870 default:
871 return MEM_UNKNOWN;
872 }
873}
874
875/**
876 * ppc4xx_edac_init_csrows - intialize driver instance rows
877 * @mci: A pointer to the EDAC memory controller instance
878 * associated with the ibm,sdram-4xx-ddr2 controller for which
879 * the csrows (i.e. banks/ranks) are being initialized.
880 * @mcopt1: The 32-bit Memory Controller Option 1 register value
881 * currently set for the controller, from which bank width
882 * and memory typ information is derived.
883 *
884 * This routine intializes the virtual "chip select rows" associated
885 * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
886 * controller bank/rank is mapped to a row.
887 *
888 * Returns 0 if OK; otherwise, -EINVAL if the memory bank size
889 * configuration cannot be determined.
890 */
891static int __devinit
892ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
893{
894 const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
895 int status = 0;
896 enum mem_type mtype;
897 enum dev_type dtype;
898 enum edac_type edac_mode;
899 int row;
900 u32 mbxcf, size;
901 static u32 ppc4xx_last_page;
902
903 /* Establish the memory type and width */
904
905 mtype = ppc4xx_edac_get_mtype(mcopt1);
906 dtype = ppc4xx_edac_get_dtype(mcopt1);
907
908 /* Establish EDAC mode */
909
910 if (mci->edac_cap & EDAC_FLAG_SECDED)
911 edac_mode = EDAC_SECDED;
912 else if (mci->edac_cap & EDAC_FLAG_EC)
913 edac_mode = EDAC_EC;
914 else
915 edac_mode = EDAC_NONE;
916
917 /*
918 * Initialize each chip select row structure which correspond
919 * 1:1 with a controller bank/rank.
920 */
921
922 for (row = 0; row < mci->nr_csrows; row++) {
923 struct csrow_info *csi = &mci->csrows[row];
924
925 /*
926 * Get the configuration settings for this
927 * row/bank/rank and skip disabled banks.
928 */
929
930 mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row));
931
932 if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE)
933 continue;
934
935 /* Map the bank configuration size setting to pages. */
936
937 size = mbxcf & SDRAM_MBCF_SZ_MASK;
938
939 switch (size) {
940 case SDRAM_MBCF_SZ_4MB:
941 case SDRAM_MBCF_SZ_8MB:
942 case SDRAM_MBCF_SZ_16MB:
943 case SDRAM_MBCF_SZ_32MB:
944 case SDRAM_MBCF_SZ_64MB:
945 case SDRAM_MBCF_SZ_128MB:
946 case SDRAM_MBCF_SZ_256MB:
947 case SDRAM_MBCF_SZ_512MB:
948 case SDRAM_MBCF_SZ_1GB:
949 case SDRAM_MBCF_SZ_2GB:
950 case SDRAM_MBCF_SZ_4GB:
951 case SDRAM_MBCF_SZ_8GB:
952 csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
953 break;
954 default:
955 ppc4xx_edac_mc_printk(KERN_ERR, mci,
956 "Unrecognized memory bank %d "
957 "size 0x%08x\n",
958 row, SDRAM_MBCF_SZ_DECODE(size));
959 status = -EINVAL;
960 goto done;
961 }
962
963 csi->first_page = ppc4xx_last_page;
964 csi->last_page = csi->first_page + csi->nr_pages - 1;
965 csi->page_mask = 0;
966
967 /*
968 * It's unclear exactly what grain should be set to
969 * here. The SDRAM_ECCES register allows resolution of
970 * an error down to a nibble which would potentially
971 * argue for a grain of '1' byte, even though we only
972 * know the associated address for uncorrectable
973 * errors. This value is not used at present for
974 * anything other than error reporting so getting it
975 * wrong should be of little consequence. Other
976 * possible values would be the PLB width (16), the
977 * page size (PAGE_SIZE) or the memory width (2 or 4).
978 */
979
980 csi->grain = 1;
981
982 csi->mtype = mtype;
983 csi->dtype = dtype;
984
985 csi->edac_mode = edac_mode;
986
987 ppc4xx_last_page += csi->nr_pages;
988 }
989
990 done:
991 return status;
992}
993
994/**
995 * ppc4xx_edac_mc_init - intialize driver instance
996 * @mci: A pointer to the EDAC memory controller instance being
997 * initialized.
998 * @op: A pointer to the OpenFirmware device tree node associated
999 * with the controller this EDAC instance is bound to.
1000 * @match: A pointer to the OpenFirmware device tree match
1001 * information associated with the controller this EDAC instance
1002 * is bound to.
1003 * @dcr_host: A pointer to the DCR data containing the DCR mapping
1004 * for this controller instance.
1005 * @mcopt1: The 32-bit Memory Controller Option 1 register value
1006 * currently set for the controller, from which ECC capabilities
1007 * and scrub mode are derived.
1008 *
1009 * This routine performs initialization of the EDAC memory controller
1010 * instance and related driver-private data associated with the
1011 * ibm,sdram-4xx-ddr2 memory controller the instance is bound to.
1012 *
1013 * Returns 0 if OK; otherwise, < 0 on error.
1014 */
1015static int __devinit
1016ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1017 struct of_device *op,
1018 const struct of_device_id *match,
1019 const dcr_host_t *dcr_host,
1020 u32 mcopt1)
1021{
1022 int status = 0;
1023 const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
1024 struct ppc4xx_edac_pdata *pdata = NULL;
1025 const struct device_node *np = op->node;
1026
1027 if (match == NULL)
1028 return -EINVAL;
1029
1030 /* Initial driver pointers and private data */
1031
1032 mci->dev = &op->dev;
1033
1034 dev_set_drvdata(mci->dev, mci);
1035
1036 pdata = mci->pvt_info;
1037
1038 pdata->dcr_host = *dcr_host;
1039 pdata->irqs.sec = NO_IRQ;
1040 pdata->irqs.ded = NO_IRQ;
1041
1042 /* Initialize controller capabilities and configuration */
1043
1044 mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR |
1045 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2);
1046
1047 mci->edac_ctl_cap = (EDAC_FLAG_NONE |
1048 EDAC_FLAG_EC |
1049 EDAC_FLAG_SECDED);
1050
1051 mci->scrub_cap = SCRUB_NONE;
1052 mci->scrub_mode = SCRUB_NONE;
1053
1054 /*
1055 * Update the actual capabilites based on the MCOPT1[MCHK]
1056 * settings. Scrubbing is only useful if reporting is enabled.
1057 */
1058
1059 switch (memcheck) {
1060 case SDRAM_MCOPT1_MCHK_CHK:
1061 mci->edac_cap = EDAC_FLAG_EC;
1062 break;
1063 case SDRAM_MCOPT1_MCHK_CHK_REP:
1064 mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED);
1065 mci->scrub_mode = SCRUB_SW_SRC;
1066 break;
1067 default:
1068 mci->edac_cap = EDAC_FLAG_NONE;
1069 break;
1070 }
1071
1072 /* Initialize strings */
1073
1074 mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
1075 mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION;
1076 mci->ctl_name = match->compatible,
1077 mci->dev_name = np->full_name;
1078
1079 /* Initialize callbacks */
1080
1081 mci->edac_check = ppc4xx_edac_check;
1082 mci->ctl_page_to_phys = NULL;
1083
1084 /* Initialize chip select rows */
1085
1086 status = ppc4xx_edac_init_csrows(mci, mcopt1);
1087
1088 if (status)
1089 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1090 "Failed to initialize rows!\n");
1091
1092 return status;
1093}
1094
1095/**
1096 * ppc4xx_edac_register_irq - setup and register controller interrupts
1097 * @op: A pointer to the OpenFirmware device tree node associated
1098 * with the controller this EDAC instance is bound to.
1099 * @mci: A pointer to the EDAC memory controller instance
1100 * associated with the ibm,sdram-4xx-ddr2 controller for which
1101 * interrupts are being registered.
1102 *
1103 * This routine parses the correctable (CE) and uncorrectable error (UE)
1104 * interrupts from the device tree node and maps and assigns them to
1105 * the associated EDAC memory controller instance.
1106 *
1107 * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
1108 * mapped and assigned.
1109 */
1110static int __devinit
1111ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
1112{
1113 int status = 0;
1114 int ded_irq, sec_irq;
1115 struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
1116 struct device_node *np = op->node;
1117
1118 ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
1119 sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
1120
1121 if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) {
1122 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1123 "Unable to map interrupts.\n");
1124 status = -ENODEV;
1125 goto fail;
1126 }
1127
1128 status = request_irq(ded_irq,
1129 ppc4xx_edac_isr,
1130 IRQF_DISABLED,
1131 "[EDAC] MC ECCDED",
1132 mci);
1133
1134 if (status < 0) {
1135 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1136 "Unable to request irq %d for ECC DED",
1137 ded_irq);
1138 status = -ENODEV;
1139 goto fail1;
1140 }
1141
1142 status = request_irq(sec_irq,
1143 ppc4xx_edac_isr,
1144 IRQF_DISABLED,
1145 "[EDAC] MC ECCSEC",
1146 mci);
1147
1148 if (status < 0) {
1149 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1150 "Unable to request irq %d for ECC SEC",
1151 sec_irq);
1152 status = -ENODEV;
1153 goto fail2;
1154 }
1155
1156 ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq);
1157 ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq);
1158
1159 pdata->irqs.ded = ded_irq;
1160 pdata->irqs.sec = sec_irq;
1161
1162 return 0;
1163
1164 fail2:
1165 free_irq(sec_irq, mci);
1166
1167 fail1:
1168 free_irq(ded_irq, mci);
1169
1170 fail:
1171 return status;
1172}
1173
1174/**
1175 * ppc4xx_edac_map_dcrs - locate and map controller registers
1176 * @np: A pointer to the device tree node containing the DCR
1177 * resources to map.
1178 * @dcr_host: A pointer to the DCR data to populate with the
1179 * DCR mapping.
1180 *
1181 * This routine attempts to locate in the device tree and map the DCR
1182 * register resources associated with the controller's indirect DCR
1183 * address and data windows.
1184 *
1185 * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
1186 * error.
1187 */
1188static int __devinit
1189ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
1190{
1191 unsigned int dcr_base, dcr_len;
1192
1193 if (np == NULL || dcr_host == NULL)
1194 return -EINVAL;
1195
1196 /* Get the DCR resource extent and sanity check the values. */
1197
1198 dcr_base = dcr_resource_start(np, 0);
1199 dcr_len = dcr_resource_len(np, 0);
1200
1201 if (dcr_base == 0 || dcr_len == 0) {
1202 ppc4xx_edac_printk(KERN_ERR,
1203 "Failed to obtain DCR property.\n");
1204 return -ENODEV;
1205 }
1206
1207 if (dcr_len != SDRAM_DCR_RESOURCE_LEN) {
1208 ppc4xx_edac_printk(KERN_ERR,
1209 "Unexpected DCR length %d, expected %d.\n",
1210 dcr_len, SDRAM_DCR_RESOURCE_LEN);
1211 return -ENODEV;
1212 }
1213
1214 /* Attempt to map the DCR extent. */
1215
1216 *dcr_host = dcr_map(np, dcr_base, dcr_len);
1217
1218 if (!DCR_MAP_OK(*dcr_host)) {
1219 ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n");
1220 return -ENODEV;
1221 }
1222
1223 return 0;
1224}
1225
1226/**
1227 * ppc4xx_edac_probe - check controller and bind driver
1228 * @op: A pointer to the OpenFirmware device tree node associated
1229 * with the controller being probed for driver binding.
1230 * @match: A pointer to the OpenFirmware device tree match
1231 * information associated with the controller being probed
1232 * for driver binding.
1233 *
1234 * This routine probes a specific ibm,sdram-4xx-ddr2 controller
1235 * instance for binding with the driver.
1236 *
1237 * Returns 0 if the controller instance was successfully bound to the
1238 * driver; otherwise, < 0 on error.
1239 */
1240static int __devinit
1241ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
1242{
1243 int status = 0;
1244 u32 mcopt1, memcheck;
1245 dcr_host_t dcr_host;
1246 const struct device_node *np = op->node;
1247 struct mem_ctl_info *mci = NULL;
1248 static int ppc4xx_edac_instance;
1249
1250 /*
1251 * At this point, we only support the controller realized on
1252 * the AMCC PPC 405EX[r]. Reject anything else.
1253 */
1254
1255 if (!of_device_is_compatible(np, "ibm,sdram-405ex") &&
1256 !of_device_is_compatible(np, "ibm,sdram-405exr")) {
1257 ppc4xx_edac_printk(KERN_NOTICE,
1258 "Only the PPC405EX[r] is supported.\n");
1259 return -ENODEV;
1260 }
1261
1262 /*
1263 * Next, get the DCR property and attempt to map it so that we
1264 * can probe the controller.
1265 */
1266
1267 status = ppc4xx_edac_map_dcrs(np, &dcr_host);
1268
1269 if (status)
1270 return status;
1271
1272 /*
1273 * First determine whether ECC is enabled at all. If not,
1274 * there is no useful checking or monitoring that can be done
1275 * for this controller.
1276 */
1277
1278 mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1);
1279 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
1280
1281 if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
1282 ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
1283 "ECC is disabled.\n", np->full_name);
1284 status = -ENODEV;
1285 goto done;
1286 }
1287
1288 /*
1289 * At this point, we know ECC is enabled, allocate an EDAC
1290 * controller instance and perform the appropriate
1291 * initialization.
1292 */
1293
1294 mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
1295 ppc4xx_edac_nr_csrows,
1296 ppc4xx_edac_nr_chans,
1297 ppc4xx_edac_instance);
1298
1299 if (mci == NULL) {
1300 ppc4xx_edac_printk(KERN_ERR, "%s: "
1301 "Failed to allocate EDAC MC instance!\n",
1302 np->full_name);
1303 status = -ENOMEM;
1304 goto done;
1305 }
1306
1307 status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1);
1308
1309 if (status) {
1310 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1311 "Failed to initialize instance!\n");
1312 goto fail;
1313 }
1314
1315 /*
1316 * We have a valid, initialized EDAC instance bound to the
1317 * controller. Attempt to register it with the EDAC subsystem
1318 * and, if necessary, register interrupts.
1319 */
1320
1321 if (edac_mc_add_mc(mci)) {
1322 ppc4xx_edac_mc_printk(KERN_ERR, mci,
1323 "Failed to add instance!\n");
1324 status = -ENODEV;
1325 goto fail;
1326 }
1327
1328 if (edac_op_state == EDAC_OPSTATE_INT) {
1329 status = ppc4xx_edac_register_irq(op, mci);
1330
1331 if (status)
1332 goto fail1;
1333 }
1334
1335 ppc4xx_edac_instance++;
1336
1337 return 0;
1338
1339 fail1:
1340 edac_mc_del_mc(mci->dev);
1341
1342 fail:
1343 edac_mc_free(mci);
1344
1345 done:
1346 return status;
1347}
1348
1349/**
1350 * ppc4xx_edac_remove - unbind driver from controller
1351 * @op: A pointer to the OpenFirmware device tree node associated
1352 * with the controller this EDAC instance is to be unbound/removed
1353 * from.
1354 *
1355 * This routine unbinds the EDAC memory controller instance associated
1356 * with the specified ibm,sdram-4xx-ddr2 controller described by the
1357 * OpenFirmware device tree node passed as a parameter.
1358 *
1359 * Unconditionally returns 0.
1360 */
1361static int
1362ppc4xx_edac_remove(struct of_device *op)
1363{
1364 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
1365 struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
1366
1367 if (edac_op_state == EDAC_OPSTATE_INT) {
1368 free_irq(pdata->irqs.sec, mci);
1369 free_irq(pdata->irqs.ded, mci);
1370 }
1371
1372 dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
1373
1374 edac_mc_del_mc(mci->dev);
1375 edac_mc_free(mci);
1376
1377 return 0;
1378}
1379
1380/**
1381 * ppc4xx_edac_opstate_init - initialize EDAC reporting method
1382 *
1383 * This routine ensures that the EDAC memory controller reporting
1384 * method is mapped to a sane value as the EDAC core defines the value
1385 * to EDAC_OPSTATE_INVAL by default. We don't call the global
1386 * opstate_init as that defaults to polling and we want interrupt as
1387 * the default.
1388 */
1389static inline void __init
1390ppc4xx_edac_opstate_init(void)
1391{
1392 switch (edac_op_state) {
1393 case EDAC_OPSTATE_POLL:
1394 case EDAC_OPSTATE_INT:
1395 break;
1396 default:
1397 edac_op_state = EDAC_OPSTATE_INT;
1398 break;
1399 }
1400
1401 ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n",
1402 ((edac_op_state == EDAC_OPSTATE_POLL) ?
1403 EDAC_OPSTATE_POLL_STR :
1404 ((edac_op_state == EDAC_OPSTATE_INT) ?
1405 EDAC_OPSTATE_INT_STR :
1406 EDAC_OPSTATE_UNKNOWN_STR)));
1407}
1408
1409/**
1410 * ppc4xx_edac_init - driver/module insertion entry point
1411 *
1412 * This routine is the driver/module insertion entry point. It
1413 * initializes the EDAC memory controller reporting state and
1414 * registers the driver as an OpenFirmware device tree platform
1415 * driver.
1416 */
1417static int __init
1418ppc4xx_edac_init(void)
1419{
1420 ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n");
1421
1422 ppc4xx_edac_opstate_init();
1423
1424 return of_register_platform_driver(&ppc4xx_edac_driver);
1425}
1426
1427/**
1428 * ppc4xx_edac_exit - driver/module removal entry point
1429 *
1430 * This routine is the driver/module removal entry point. It
1431 * unregisters the driver as an OpenFirmware device tree platform
1432 * driver.
1433 */
1434static void __exit
1435ppc4xx_edac_exit(void)
1436{
1437 of_unregister_platform_driver(&ppc4xx_edac_driver);
1438}
1439
1440module_init(ppc4xx_edac_init);
1441module_exit(ppc4xx_edac_exit);
1442
1443MODULE_LICENSE("GPL v2");
1444MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>");
1445MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller");
1446module_param(edac_op_state, int, 0444);
1447MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: "
1448 "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR);
diff --git a/drivers/edac/ppc4xx_edac.h b/drivers/edac/ppc4xx_edac.h
new file mode 100644
index 000000000000..d3154764c449
--- /dev/null
+++ b/drivers/edac/ppc4xx_edac.h
@@ -0,0 +1,172 @@
1/*
2 * Copyright (c) 2008 Nuovation System Designs, LLC
3 * Grant Erickson <gerickson@nuovations.com>
4 *
5 * This file defines processor mnemonics for accessing and managing
6 * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP,
7 * 440SPe, 460EX, 460GT and 460SX.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15
16#ifndef __PPC4XX_EDAC_H
17#define __PPC4XX_EDAC_H
18
19#include <linux/types.h>
20
21/*
22 * Macro for generating register field mnemonics
23 */
24#define PPC_REG_BITS 32
25#define PPC_REG_VAL(bit, val) ((val) << ((PPC_REG_BITS - 1) - (bit)))
26#define PPC_REG_DECODE(bit, val) ((val) >> ((PPC_REG_BITS - 1) - (bit)))
27
28/*
29 * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those
30 * relevant to ECC)
31 */
32#define SDRAM_BESR 0x00 /* Error status (read/clear) */
33#define SDRAM_BESRT 0x01 /* Error statuss (test/set) */
34#define SDRAM_BEARL 0x02 /* Error address low */
35#define SDRAM_BEARH 0x03 /* Error address high */
36#define SDRAM_WMIRQ 0x06 /* Write master (read/clear) */
37#define SDRAM_WMIRQT 0x07 /* Write master (test/set) */
38#define SDRAM_MCOPT1 0x20 /* Controller options 1 */
39#define SDRAM_MBXCF_BASE 0x40 /* Bank n configuration base */
40#define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n)))
41#define SDRAM_MB0CF SDRAM_MBXCF(0)
42#define SDRAM_MB1CF SDRAM_MBXCF(1)
43#define SDRAM_MB2CF SDRAM_MBXCF(2)
44#define SDRAM_MB3CF SDRAM_MBXCF(3)
45#define SDRAM_ECCCR 0x98 /* ECC error status */
46#define SDRAM_ECCES SDRAM_ECCCR
47
48/*
49 * PLB Master IDs
50 */
51#define SDRAM_PLB_M0ID_FIRST 0
52#define SDRAM_PLB_M0ID_ICU SDRAM_PLB_M0ID_FIRST
53#define SDRAM_PLB_M0ID_PCIE0 1
54#define SDRAM_PLB_M0ID_PCIE1 2
55#define SDRAM_PLB_M0ID_DMA 3
56#define SDRAM_PLB_M0ID_DCU 4
57#define SDRAM_PLB_M0ID_OPB 5
58#define SDRAM_PLB_M0ID_MAL 6
59#define SDRAM_PLB_M0ID_SEC 7
60#define SDRAM_PLB_M0ID_AHB 8
61#define SDRAM_PLB_M0ID_LAST SDRAM_PLB_M0ID_AHB
62#define SDRAM_PLB_M0ID_COUNT (SDRAM_PLB_M0ID_LAST - \
63 SDRAM_PLB_M0ID_FIRST + 1)
64
65/*
66 * Memory Controller Bus Error Status Register
67 */
68#define SDRAM_BESR_MASK PPC_REG_VAL(7, 0xFF)
69#define SDRAM_BESR_M0ID_MASK PPC_REG_VAL(3, 0xF)
70#define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n)
71#define SDRAM_BESR_M0ID_ICU PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU)
72#define SDRAM_BESR_M0ID_PCIE0 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0)
73#define SDRAM_BESR_M0ID_PCIE1 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1)
74#define SDRAM_BESR_M0ID_DMA PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA)
75#define SDRAM_BESR_M0ID_DCU PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU)
76#define SDRAM_BESR_M0ID_OPB PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB)
77#define SDRAM_BESR_M0ID_MAL PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL)
78#define SDRAM_BESR_M0ID_SEC PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC)
79#define SDRAM_BESR_M0ID_AHB PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB)
80#define SDRAM_BESR_M0ET_MASK PPC_REG_VAL(6, 0x7)
81#define SDRAM_BESR_M0ET_NONE PPC_REG_VAL(6, 0)
82#define SDRAM_BESR_M0ET_ECC PPC_REG_VAL(6, 1)
83#define SDRAM_BESR_M0RW_MASK PPC_REG_VAL(7, 1)
84#define SDRAM_BESR_M0RW_WRITE PPC_REG_VAL(7, 0)
85#define SDRAM_BESR_M0RW_READ PPC_REG_VAL(7, 1)
86
87/*
88 * Memory Controller PLB Write Master Interrupt Register
89 */
90#define SDRAM_WMIRQ_MASK PPC_REG_VAL(8, 0x1FF)
91#define SDRAM_WMIRQ_ENCODE(id) PPC_REG_VAL((id % \
92 SDRAM_PLB_M0ID_COUNT), 1)
93#define SDRAM_WMIRQ_ICU PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1)
94#define SDRAM_WMIRQ_PCIE0 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1)
95#define SDRAM_WMIRQ_PCIE1 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1)
96#define SDRAM_WMIRQ_DMA PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1)
97#define SDRAM_WMIRQ_DCU PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1)
98#define SDRAM_WMIRQ_OPB PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1)
99#define SDRAM_WMIRQ_MAL PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1)
100#define SDRAM_WMIRQ_SEC PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1)
101#define SDRAM_WMIRQ_AHB PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1)
102
103/*
104 * Memory Controller Options 1 Register
105 */
106#define SDRAM_MCOPT1_MCHK_MASK PPC_REG_VAL(3, 0x3) /* ECC mask */
107#define SDRAM_MCOPT1_MCHK_NON PPC_REG_VAL(3, 0x0) /* No ECC gen */
108#define SDRAM_MCOPT1_MCHK_GEN PPC_REG_VAL(3, 0x2) /* ECC gen */
109#define SDRAM_MCOPT1_MCHK_CHK PPC_REG_VAL(3, 0x1) /* ECC gen and chk */
110#define SDRAM_MCOPT1_MCHK_CHK_REP PPC_REG_VAL(3, 0x3) /* ECC gen/chk/rpt */
111#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3)
112#define SDRAM_MCOPT1_RDEN_MASK PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM mask */
113#define SDRAM_MCOPT1_RDEN PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM enbl */
114#define SDRAM_MCOPT1_WDTH_MASK PPC_REG_VAL(7, 0x1) /* Width mask */
115#define SDRAM_MCOPT1_WDTH_32 PPC_REG_VAL(7, 0x0) /* 32 bits */
116#define SDRAM_MCOPT1_WDTH_16 PPC_REG_VAL(7, 0x1) /* 16 bits */
117#define SDRAM_MCOPT1_DDR_TYPE_MASK PPC_REG_VAL(11, 0x1) /* DDR type mask */
118#define SDRAM_MCOPT1_DDR1_TYPE PPC_REG_VAL(11, 0x0) /* DDR1 type */
119#define SDRAM_MCOPT1_DDR2_TYPE PPC_REG_VAL(11, 0x1) /* DDR2 type */
120
121/*
122 * Memory Bank 0 - n Configuration Register
123 */
124#define SDRAM_MBCF_BA_MASK PPC_REG_VAL(12, 0x1FFF)
125#define SDRAM_MBCF_SZ_MASK PPC_REG_VAL(19, 0xF)
126#define SDRAM_MBCF_SZ_DECODE(mbxcf) PPC_REG_DECODE(19, mbxcf)
127#define SDRAM_MBCF_SZ_4MB PPC_REG_VAL(19, 0x0)
128#define SDRAM_MBCF_SZ_8MB PPC_REG_VAL(19, 0x1)
129#define SDRAM_MBCF_SZ_16MB PPC_REG_VAL(19, 0x2)
130#define SDRAM_MBCF_SZ_32MB PPC_REG_VAL(19, 0x3)
131#define SDRAM_MBCF_SZ_64MB PPC_REG_VAL(19, 0x4)
132#define SDRAM_MBCF_SZ_128MB PPC_REG_VAL(19, 0x5)
133#define SDRAM_MBCF_SZ_256MB PPC_REG_VAL(19, 0x6)
134#define SDRAM_MBCF_SZ_512MB PPC_REG_VAL(19, 0x7)
135#define SDRAM_MBCF_SZ_1GB PPC_REG_VAL(19, 0x8)
136#define SDRAM_MBCF_SZ_2GB PPC_REG_VAL(19, 0x9)
137#define SDRAM_MBCF_SZ_4GB PPC_REG_VAL(19, 0xA)
138#define SDRAM_MBCF_SZ_8GB PPC_REG_VAL(19, 0xB)
139#define SDRAM_MBCF_AM_MASK PPC_REG_VAL(23, 0xF)
140#define SDRAM_MBCF_AM_MODE0 PPC_REG_VAL(23, 0x0)
141#define SDRAM_MBCF_AM_MODE1 PPC_REG_VAL(23, 0x1)
142#define SDRAM_MBCF_AM_MODE2 PPC_REG_VAL(23, 0x2)
143#define SDRAM_MBCF_AM_MODE3 PPC_REG_VAL(23, 0x3)
144#define SDRAM_MBCF_AM_MODE4 PPC_REG_VAL(23, 0x4)
145#define SDRAM_MBCF_AM_MODE5 PPC_REG_VAL(23, 0x5)
146#define SDRAM_MBCF_AM_MODE6 PPC_REG_VAL(23, 0x6)
147#define SDRAM_MBCF_AM_MODE7 PPC_REG_VAL(23, 0x7)
148#define SDRAM_MBCF_AM_MODE8 PPC_REG_VAL(23, 0x8)
149#define SDRAM_MBCF_AM_MODE9 PPC_REG_VAL(23, 0x9)
150#define SDRAM_MBCF_BE_MASK PPC_REG_VAL(31, 0x1)
151#define SDRAM_MBCF_BE_DISABLE PPC_REG_VAL(31, 0x0)
152#define SDRAM_MBCF_BE_ENABLE PPC_REG_VAL(31, 0x1)
153
154/*
155 * ECC Error Status
156 */
157#define SDRAM_ECCES_MASK PPC_REG_VAL(21, 0x3FFFFF)
158#define SDRAM_ECCES_BNCE_MASK PPC_REG_VAL(15, 0xFFFF)
159#define SDRAM_ECCES_BNCE_ENCODE(lane) PPC_REG_VAL(((lane) & 0xF), 1)
160#define SDRAM_ECCES_CKBER_MASK PPC_REG_VAL(17, 0x3)
161#define SDRAM_ECCES_CKBER_NONE PPC_REG_VAL(17, 0)
162#define SDRAM_ECCES_CKBER_16_ECC_0_3 PPC_REG_VAL(17, 2)
163#define SDRAM_ECCES_CKBER_32_ECC_0_3 PPC_REG_VAL(17, 1)
164#define SDRAM_ECCES_CKBER_32_ECC_4_8 PPC_REG_VAL(17, 2)
165#define SDRAM_ECCES_CKBER_32_ECC_0_8 PPC_REG_VAL(17, 3)
166#define SDRAM_ECCES_CE PPC_REG_VAL(18, 1)
167#define SDRAM_ECCES_UE PPC_REG_VAL(19, 1)
168#define SDRAM_ECCES_BKNER_MASK PPC_REG_VAL(21, 0x3)
169#define SDRAM_ECCES_BK0ER PPC_REG_VAL(20, 1)
170#define SDRAM_ECCES_BK1ER PPC_REG_VAL(21, 1)
171
172#endif /* __PPC4XX_EDAC_H */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 42fb2fd24c0c..51a8d4103be5 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -69,20 +69,24 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
69 * those calls have no teeth) we can't avoid autorequesting. This nag 69 * those calls have no teeth) we can't avoid autorequesting. This nag
70 * message should motivate switching to explicit requests... so should 70 * message should motivate switching to explicit requests... so should
71 * the weaker cleanup after faults, compared to gpio_request(). 71 * the weaker cleanup after faults, compared to gpio_request().
72 *
73 * NOTE: the autorequest mechanism is going away; at this point it's
74 * only "legal" in the sense that (old) code using it won't break yet,
75 * but instead only triggers a WARN() stack dump.
72 */ 76 */
73static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset) 77static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
74{ 78{
75 if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { 79 const struct gpio_chip *chip = desc->chip;
76 struct gpio_chip *chip = desc->chip; 80 const int gpio = chip->base + offset;
77 int gpio = chip->base + offset;
78 81
82 if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
83 "autorequest GPIO-%d\n", gpio)) {
79 if (!try_module_get(chip->owner)) { 84 if (!try_module_get(chip->owner)) {
80 pr_err("GPIO-%d: module can't be gotten \n", gpio); 85 pr_err("GPIO-%d: module can't be gotten \n", gpio);
81 clear_bit(FLAG_REQUESTED, &desc->flags); 86 clear_bit(FLAG_REQUESTED, &desc->flags);
82 /* lose */ 87 /* lose */
83 return -EIO; 88 return -EIO;
84 } 89 }
85 pr_warning("GPIO-%d autorequested\n", gpio);
86 desc_set_label(desc, "[auto]"); 90 desc_set_label(desc, "[auto]");
87 /* caller must chip->request() w/o spinlock */ 91 /* caller must chip->request() w/o spinlock */
88 if (chip->request) 92 if (chip->request)
@@ -438,6 +442,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
438 unsigned long flags; 442 unsigned long flags;
439 struct gpio_desc *desc; 443 struct gpio_desc *desc;
440 int status = -EINVAL; 444 int status = -EINVAL;
445 char *ioname = NULL;
441 446
442 /* can't export until sysfs is available ... */ 447 /* can't export until sysfs is available ... */
443 if (!gpio_class.p) { 448 if (!gpio_class.p) {
@@ -461,11 +466,14 @@ int gpio_export(unsigned gpio, bool direction_may_change)
461 } 466 }
462 spin_unlock_irqrestore(&gpio_lock, flags); 467 spin_unlock_irqrestore(&gpio_lock, flags);
463 468
469 if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
470 ioname = desc->chip->names[gpio - desc->chip->base];
471
464 if (status == 0) { 472 if (status == 0) {
465 struct device *dev; 473 struct device *dev;
466 474
467 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 475 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
468 desc, "gpio%d", gpio); 476 desc, ioname ? ioname : "gpio%d", gpio);
469 if (dev) { 477 if (dev) {
470 if (direction_may_change) 478 if (direction_may_change)
471 status = sysfs_create_group(&dev->kobj, 479 status = sysfs_create_group(&dev->kobj,
@@ -513,6 +521,7 @@ void gpio_unexport(unsigned gpio)
513 mutex_lock(&sysfs_lock); 521 mutex_lock(&sysfs_lock);
514 522
515 desc = &gpio_desc[gpio]; 523 desc = &gpio_desc[gpio];
524
516 if (test_bit(FLAG_EXPORT, &desc->flags)) { 525 if (test_bit(FLAG_EXPORT, &desc->flags)) {
517 struct device *dev = NULL; 526 struct device *dev = NULL;
518 527
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 81e6ebf323e9..55cd0fa68339 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -381,7 +381,7 @@ static void hgpk_disconnect(struct psmouse *psmouse)
381 381
382static void hgpk_recalib_work(struct work_struct *work) 382static void hgpk_recalib_work(struct work_struct *work)
383{ 383{
384 struct delayed_work *w = container_of(work, struct delayed_work, work); 384 struct delayed_work *w = to_delayed_work(work);
385 struct hgpk_data *priv = container_of(w, struct hgpk_data, recalib_wq); 385 struct hgpk_data *priv = container_of(w, struct hgpk_data, recalib_wq);
386 struct psmouse *psmouse = priv->psmouse; 386 struct psmouse *psmouse = priv->psmouse;
387 387
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 5f3bff434621..0b92b2f6ea68 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -165,7 +165,7 @@ config SGI_XP
165 depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP 165 depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
166 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 166 select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
167 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 167 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
168 select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP 168 select SGI_GRU if X86_64 && SMP
169 ---help--- 169 ---help---
170 An SGI machine can be divided into multiple Single System 170 An SGI machine can be divided into multiple Single System
171 Images which act independently of each other and have 171 Images which act independently of each other and have
@@ -189,7 +189,7 @@ config HP_ILO
189 189
190config SGI_GRU 190config SGI_GRU
191 tristate "SGI GRU driver" 191 tristate "SGI GRU driver"
192 depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP 192 depends on X86_UV && SMP
193 default n 193 default n
194 select MMU_NOTIFIER 194 select MMU_NOTIFIER
195 ---help--- 195 ---help---
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index d4775528abc6..d184dfab9631 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -53,6 +53,7 @@
53 53
54struct at24_data { 54struct at24_data {
55 struct at24_platform_data chip; 55 struct at24_platform_data chip;
56 struct memory_accessor macc;
56 bool use_smbus; 57 bool use_smbus;
57 58
58 /* 59 /*
@@ -225,14 +226,11 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
225 return status; 226 return status;
226} 227}
227 228
228static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, 229static ssize_t at24_read(struct at24_data *at24,
229 char *buf, loff_t off, size_t count) 230 char *buf, loff_t off, size_t count)
230{ 231{
231 struct at24_data *at24;
232 ssize_t retval = 0; 232 ssize_t retval = 0;
233 233
234 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
235
236 if (unlikely(!count)) 234 if (unlikely(!count))
237 return count; 235 return count;
238 236
@@ -262,12 +260,14 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
262 return retval; 260 return retval;
263} 261}
264 262
263static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
264 char *buf, loff_t off, size_t count)
265{
266 struct at24_data *at24;
265 267
266/* 268 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
267 * REVISIT: export at24_bin{read,write}() to let other kernel code use 269 return at24_read(at24, buf, off, count);
268 * eeprom data. For example, it might hold a board's Ethernet address, or 270}
269 * board-specific calibration data generated on the manufacturing floor.
270 */
271 271
272 272
273/* 273/*
@@ -347,14 +347,11 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
347 return -ETIMEDOUT; 347 return -ETIMEDOUT;
348} 348}
349 349
350static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, 350static ssize_t at24_write(struct at24_data *at24,
351 char *buf, loff_t off, size_t count) 351 char *buf, loff_t off, size_t count)
352{ 352{
353 struct at24_data *at24;
354 ssize_t retval = 0; 353 ssize_t retval = 0;
355 354
356 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
357
358 if (unlikely(!count)) 355 if (unlikely(!count))
359 return count; 356 return count;
360 357
@@ -384,6 +381,39 @@ static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
384 return retval; 381 return retval;
385} 382}
386 383
384static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
385 char *buf, loff_t off, size_t count)
386{
387 struct at24_data *at24;
388
389 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
390 return at24_write(at24, buf, off, count);
391}
392
393/*-------------------------------------------------------------------------*/
394
395/*
396 * This lets other kernel code access the eeprom data. For example, it
397 * might hold a board's Ethernet address, or board-specific calibration
398 * data generated on the manufacturing floor.
399 */
400
401static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
402 off_t offset, size_t count)
403{
404 struct at24_data *at24 = container_of(macc, struct at24_data, macc);
405
406 return at24_read(at24, buf, offset, count);
407}
408
409static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf,
410 off_t offset, size_t count)
411{
412 struct at24_data *at24 = container_of(macc, struct at24_data, macc);
413
414 return at24_write(at24, buf, offset, count);
415}
416
387/*-------------------------------------------------------------------------*/ 417/*-------------------------------------------------------------------------*/
388 418
389static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) 419static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -413,6 +443,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
413 * is recommended anyhow. 443 * is recommended anyhow.
414 */ 444 */
415 chip.page_size = 1; 445 chip.page_size = 1;
446
447 chip.setup = NULL;
448 chip.context = NULL;
416 } 449 }
417 450
418 if (!is_power_of_2(chip.byte_len)) 451 if (!is_power_of_2(chip.byte_len))
@@ -463,6 +496,8 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
463 at24->bin.read = at24_bin_read; 496 at24->bin.read = at24_bin_read;
464 at24->bin.size = chip.byte_len; 497 at24->bin.size = chip.byte_len;
465 498
499 at24->macc.read = at24_macc_read;
500
466 writable = !(chip.flags & AT24_FLAG_READONLY); 501 writable = !(chip.flags & AT24_FLAG_READONLY);
467 if (writable) { 502 if (writable) {
468 if (!use_smbus || i2c_check_functionality(client->adapter, 503 if (!use_smbus || i2c_check_functionality(client->adapter,
@@ -470,6 +505,8 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
470 505
471 unsigned write_max = chip.page_size; 506 unsigned write_max = chip.page_size;
472 507
508 at24->macc.write = at24_macc_write;
509
473 at24->bin.write = at24_bin_write; 510 at24->bin.write = at24_bin_write;
474 at24->bin.attr.mode |= S_IWUSR; 511 at24->bin.attr.mode |= S_IWUSR;
475 512
@@ -520,6 +557,10 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
520 at24->write_max, 557 at24->write_max,
521 use_smbus ? ", use_smbus" : ""); 558 use_smbus ? ", use_smbus" : "");
522 559
560 /* export data to kernel code */
561 if (chip.setup)
562 chip.setup(&at24->macc, chip.context);
563
523 return 0; 564 return 0;
524 565
525err_clients: 566err_clients:
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 290dbe99647a..6bc0dac5c1e8 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -30,6 +30,7 @@
30 30
31struct at25_data { 31struct at25_data {
32 struct spi_device *spi; 32 struct spi_device *spi;
33 struct memory_accessor mem;
33 struct mutex lock; 34 struct mutex lock;
34 struct spi_eeprom chip; 35 struct spi_eeprom chip;
35 struct bin_attribute bin; 36 struct bin_attribute bin;
@@ -75,6 +76,13 @@ at25_ee_read(
75 struct spi_transfer t[2]; 76 struct spi_transfer t[2];
76 struct spi_message m; 77 struct spi_message m;
77 78
79 if (unlikely(offset >= at25->bin.size))
80 return 0;
81 if ((offset + count) > at25->bin.size)
82 count = at25->bin.size - offset;
83 if (unlikely(!count))
84 return count;
85
78 cp = command; 86 cp = command;
79 *cp++ = AT25_READ; 87 *cp++ = AT25_READ;
80 88
@@ -127,13 +135,6 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
127 dev = container_of(kobj, struct device, kobj); 135 dev = container_of(kobj, struct device, kobj);
128 at25 = dev_get_drvdata(dev); 136 at25 = dev_get_drvdata(dev);
129 137
130 if (unlikely(off >= at25->bin.size))
131 return 0;
132 if ((off + count) > at25->bin.size)
133 count = at25->bin.size - off;
134 if (unlikely(!count))
135 return count;
136
137 return at25_ee_read(at25, buf, off, count); 138 return at25_ee_read(at25, buf, off, count);
138} 139}
139 140
@@ -146,6 +147,13 @@ at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count)
146 unsigned buf_size; 147 unsigned buf_size;
147 u8 *bounce; 148 u8 *bounce;
148 149
150 if (unlikely(off >= at25->bin.size))
151 return -EFBIG;
152 if ((off + count) > at25->bin.size)
153 count = at25->bin.size - off;
154 if (unlikely(!count))
155 return count;
156
149 /* Temp buffer starts with command and address */ 157 /* Temp buffer starts with command and address */
150 buf_size = at25->chip.page_size; 158 buf_size = at25->chip.page_size;
151 if (buf_size > io_limit) 159 if (buf_size > io_limit)
@@ -253,18 +261,31 @@ at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
253 dev = container_of(kobj, struct device, kobj); 261 dev = container_of(kobj, struct device, kobj);
254 at25 = dev_get_drvdata(dev); 262 at25 = dev_get_drvdata(dev);
255 263
256 if (unlikely(off >= at25->bin.size))
257 return -EFBIG;
258 if ((off + count) > at25->bin.size)
259 count = at25->bin.size - off;
260 if (unlikely(!count))
261 return count;
262
263 return at25_ee_write(at25, buf, off, count); 264 return at25_ee_write(at25, buf, off, count);
264} 265}
265 266
266/*-------------------------------------------------------------------------*/ 267/*-------------------------------------------------------------------------*/
267 268
269/* Let in-kernel code access the eeprom data. */
270
271static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
272 off_t offset, size_t count)
273{
274 struct at25_data *at25 = container_of(mem, struct at25_data, mem);
275
276 return at25_ee_read(at25, buf, offset, count);
277}
278
279static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf,
280 off_t offset, size_t count)
281{
282 struct at25_data *at25 = container_of(mem, struct at25_data, mem);
283
284 return at25_ee_write(at25, buf, offset, count);
285}
286
287/*-------------------------------------------------------------------------*/
288
268static int at25_probe(struct spi_device *spi) 289static int at25_probe(struct spi_device *spi)
269{ 290{
270 struct at25_data *at25 = NULL; 291 struct at25_data *at25 = NULL;
@@ -317,6 +338,10 @@ static int at25_probe(struct spi_device *spi)
317 at25->addrlen = addrlen; 338 at25->addrlen = addrlen;
318 339
319 /* Export the EEPROM bytes through sysfs, since that's convenient. 340 /* Export the EEPROM bytes through sysfs, since that's convenient.
341 * And maybe to other kernel code; it might hold a board's Ethernet
342 * address, or board-specific calibration data generated on the
343 * manufacturing floor.
344 *
320 * Default to root-only access to the data; EEPROMs often hold data 345 * Default to root-only access to the data; EEPROMs often hold data
321 * that's sensitive for read and/or write, like ethernet addresses, 346 * that's sensitive for read and/or write, like ethernet addresses,
322 * security codes, board-specific manufacturing calibrations, etc. 347 * security codes, board-specific manufacturing calibrations, etc.
@@ -324,17 +349,22 @@ static int at25_probe(struct spi_device *spi)
324 at25->bin.attr.name = "eeprom"; 349 at25->bin.attr.name = "eeprom";
325 at25->bin.attr.mode = S_IRUSR; 350 at25->bin.attr.mode = S_IRUSR;
326 at25->bin.read = at25_bin_read; 351 at25->bin.read = at25_bin_read;
352 at25->mem.read = at25_mem_read;
327 353
328 at25->bin.size = at25->chip.byte_len; 354 at25->bin.size = at25->chip.byte_len;
329 if (!(chip->flags & EE_READONLY)) { 355 if (!(chip->flags & EE_READONLY)) {
330 at25->bin.write = at25_bin_write; 356 at25->bin.write = at25_bin_write;
331 at25->bin.attr.mode |= S_IWUSR; 357 at25->bin.attr.mode |= S_IWUSR;
358 at25->mem.write = at25_mem_write;
332 } 359 }
333 360
334 err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin); 361 err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
335 if (err) 362 if (err)
336 goto fail; 363 goto fail;
337 364
365 if (chip->setup)
366 chip->setup(&at25->mem, chip->context);
367
338 dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n", 368 dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
339 (at25->bin.size < 1024) 369 (at25->bin.size < 1024)
340 ? at25->bin.size 370 ? at25->bin.size
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
index 9e9170b3599a..bcd8136d2f98 100644
--- a/drivers/misc/sgi-gru/Makefile
+++ b/drivers/misc/sgi-gru/Makefile
@@ -3,5 +3,5 @@ ifdef CONFIG_SGI_GRU_DEBUG
3endif 3endif
4 4
5obj-$(CONFIG_SGI_GRU) := gru.o 5obj-$(CONFIG_SGI_GRU) := gru.o
6gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o 6gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o
7 7
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 48762e7b98be..3fde33c1e8f3 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -19,8 +19,11 @@
19#ifndef __GRU_INSTRUCTIONS_H__ 19#ifndef __GRU_INSTRUCTIONS_H__
20#define __GRU_INSTRUCTIONS_H__ 20#define __GRU_INSTRUCTIONS_H__
21 21
22#define gru_flush_cache_hook(p) 22extern int gru_check_status_proc(void *cb);
23#define gru_emulator_wait_hook(p, w) 23extern int gru_wait_proc(void *cb);
24extern void gru_wait_abort_proc(void *cb);
25
26
24 27
25/* 28/*
26 * Architecture dependent functions 29 * Architecture dependent functions
@@ -29,16 +32,16 @@
29#if defined(CONFIG_IA64) 32#if defined(CONFIG_IA64)
30#include <linux/compiler.h> 33#include <linux/compiler.h>
31#include <asm/intrinsics.h> 34#include <asm/intrinsics.h>
32#define __flush_cache(p) ia64_fc(p) 35#define __flush_cache(p) ia64_fc((unsigned long)p)
33/* Use volatile on IA64 to ensure ordering via st4.rel */ 36/* Use volatile on IA64 to ensure ordering via st4.rel */
34#define gru_ordered_store_int(p,v) \ 37#define gru_ordered_store_int(p, v) \
35 do { \ 38 do { \
36 barrier(); \ 39 barrier(); \
37 *((volatile int *)(p)) = v; /* force st.rel */ \ 40 *((volatile int *)(p)) = v; /* force st.rel */ \
38 } while (0) 41 } while (0)
39#elif defined(CONFIG_X86_64) 42#elif defined(CONFIG_X86_64)
40#define __flush_cache(p) clflush(p) 43#define __flush_cache(p) clflush(p)
41#define gru_ordered_store_int(p,v) \ 44#define gru_ordered_store_int(p, v) \
42 do { \ 45 do { \
43 barrier(); \ 46 barrier(); \
44 *(int *)p = v; \ 47 *(int *)p = v; \
@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb,
558 561
559#define GRU_EXC_STR_SIZE 256 562#define GRU_EXC_STR_SIZE 256
560 563
561extern int gru_check_status_proc(void *cb);
562extern int gru_wait_proc(void *cb);
563extern void gru_wait_abort_proc(void *cb);
564 564
565/* 565/*
566 * Control block definition for checking status 566 * Control block definition for checking status
567 */ 567 */
568struct gru_control_block_status { 568struct gru_control_block_status {
569 unsigned int icmd :1; 569 unsigned int icmd :1;
570 unsigned int unused1 :31; 570 unsigned int ima :3;
571 unsigned int reserved0 :4;
572 unsigned int unused1 :24;
571 unsigned int unused2 :24; 573 unsigned int unused2 :24;
572 unsigned int istatus :2; 574 unsigned int istatus :2;
573 unsigned int isubstatus :4; 575 unsigned int isubstatus :4;
574 unsigned int inused3 :2; 576 unsigned int unused3 :2;
575}; 577};
576 578
577/* Get CB status */ 579/* Get CB status */
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 3ee698ad8599..ab118558552e 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -32,6 +32,7 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/uaccess.h> 34#include <linux/uaccess.h>
35#include <linux/security.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include "gru.h" 37#include "gru.h"
37#include "grutables.h" 38#include "grutables.h"
@@ -266,6 +267,44 @@ err:
266 return 1; 267 return 1;
267} 268}
268 269
270static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
272{
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
275 unsigned long paddr;
276 int ret, ps;
277
278 vma = find_vma(mm, vaddr);
279 if (!vma)
280 goto inval;
281
282 /*
283 * Atomic lookup is faster & usually works even if called in non-atomic
284 * context.
285 */
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
288 if (ret) {
289 if (atomic)
290 goto upm;
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
292 goto inval;
293 }
294 if (is_gru_paddr(paddr))
295 goto inval;
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
298 *pageshift = ps;
299 return 0;
300
301inval:
302 return -1;
303upm:
304 return -2;
305}
306
307
269/* 308/*
270 * Drop a TLB entry into the GRU. The fault is described by info in an TFH. 309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
271 * Input: 310 * Input:
@@ -280,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
280 struct gru_tlb_fault_handle *tfh, 319 struct gru_tlb_fault_handle *tfh,
281 unsigned long __user *cb) 320 unsigned long __user *cb)
282{ 321{
283 struct mm_struct *mm = gts->ts_mm; 322 int pageshift = 0, asid, write, ret, atomic = !cb;
284 struct vm_area_struct *vma; 323 unsigned long gpa = 0, vaddr = 0;
285 int pageshift, asid, write, ret;
286 unsigned long paddr, gpa, vaddr;
287 324
288 /* 325 /*
289 * NOTE: The GRU contains magic hardware that eliminates races between 326 * NOTE: The GRU contains magic hardware that eliminates races between
@@ -317,28 +354,19 @@ static int gru_try_dropin(struct gru_thread_state *gts,
317 if (atomic_read(&gts->ts_gms->ms_range_active)) 354 if (atomic_read(&gts->ts_gms->ms_range_active))
318 goto failactive; 355 goto failactive;
319 356
320 vma = find_vma(mm, vaddr); 357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
321 if (!vma) 358 if (ret == -1)
322 goto failinval; 359 goto failinval;
360 if (ret == -2)
361 goto failupm;
323 362
324 /* 363 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
325 * Atomic lookup is faster & usually works even if called in non-atomic 364 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
326 * context. 365 if (atomic || !gru_update_cch(gts, 0)) {
327 */ 366 gts->ts_force_cch_reload = 1;
328 rmb(); /* Must/check ms_range_active before loading PTEs */
329 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
330 if (ret) {
331 if (!cb)
332 goto failupm; 367 goto failupm;
333 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, 368 }
334 &pageshift))
335 goto failinval;
336 } 369 }
337 if (is_gru_paddr(paddr))
338 goto failinval;
339
340 paddr = paddr & ~((1UL << pageshift) - 1);
341 gpa = uv_soc_phys_ram_to_gpa(paddr);
342 gru_cb_set_istatus_active(cb); 370 gru_cb_set_istatus_active(cb);
343 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, 371 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
344 GRU_PAGESIZE(pageshift)); 372 GRU_PAGESIZE(pageshift));
@@ -368,6 +396,7 @@ failupm:
368 396
369failfmm: 397failfmm:
370 /* FMM state on UPM call */ 398 /* FMM state on UPM call */
399 gru_flush_cache(tfh);
371 STAT(tlb_dropin_fail_fmm); 400 STAT(tlb_dropin_fail_fmm);
372 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); 401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
373 return 0; 402 return 0;
@@ -448,6 +477,7 @@ irqreturn_t gru_intr(int irq, void *dev_id)
448 up_read(&gts->ts_mm->mmap_sem); 477 up_read(&gts->ts_mm->mmap_sem);
449 } else { 478 } else {
450 tfh_user_polling_mode(tfh); 479 tfh_user_polling_mode(tfh);
480 STAT(intr_mm_lock_failed);
451 } 481 }
452 } 482 }
453 return IRQ_HANDLED; 483 return IRQ_HANDLED;
@@ -497,10 +527,8 @@ int gru_handle_user_call_os(unsigned long cb)
497 if (!gts) 527 if (!gts)
498 return -EINVAL; 528 return -EINVAL;
499 529
500 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { 530 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
501 ret = -EINVAL;
502 goto exit; 531 goto exit;
503 }
504 532
505 /* 533 /*
506 * If force_unload is set, the UPM TLB fault is phony. The task 534 * If force_unload is set, the UPM TLB fault is phony. The task
@@ -508,6 +536,20 @@ int gru_handle_user_call_os(unsigned long cb)
508 * unload the context. The task will page fault and assign a new 536 * unload the context. The task will page fault and assign a new
509 * context. 537 * context.
510 */ 538 */
539 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
540 gts->ts_blade != uv_numa_blade_id()) {
541 STAT(call_os_offnode_reference);
542 gts->ts_force_unload = 1;
543 }
544
545 /*
546 * CCH may contain stale data if ts_force_cch_reload is set.
547 */
548 if (gts->ts_gru && gts->ts_force_cch_reload) {
549 gru_update_cch(gts, 0);
550 gts->ts_force_cch_reload = 0;
551 }
552
511 ret = -EAGAIN; 553 ret = -EAGAIN;
512 cbrnum = thread_cbr_number(gts, ucbnum); 554 cbrnum = thread_cbr_number(gts, ucbnum);
513 if (gts->ts_force_unload) { 555 if (gts->ts_force_unload) {
@@ -541,11 +583,13 @@ int gru_get_exception_detail(unsigned long arg)
541 if (!gts) 583 if (!gts)
542 return -EINVAL; 584 return -EINVAL;
543 585
544 if (gts->ts_gru) { 586 ucbnum = get_cb_number((void *)excdet.cb);
545 ucbnum = get_cb_number((void *)excdet.cb); 587 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
588 ret = -EINVAL;
589 } else if (gts->ts_gru) {
546 cbrnum = thread_cbr_number(gts, ucbnum); 590 cbrnum = thread_cbr_number(gts, ucbnum);
547 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); 591 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
548 prefetchw(cbe); /* Harmless on hardware, required for emulator */ 592 prefetchw(cbe);/* Harmless on hardware, required for emulator */
549 excdet.opc = cbe->opccpy; 593 excdet.opc = cbe->opccpy;
550 excdet.exopc = cbe->exopccpy; 594 excdet.exopc = cbe->exopccpy;
551 excdet.ecause = cbe->ecause; 595 excdet.ecause = cbe->ecause;
@@ -567,6 +611,31 @@ int gru_get_exception_detail(unsigned long arg)
567/* 611/*
568 * User request to unload a context. Content is saved for possible reload. 612 * User request to unload a context. Content is saved for possible reload.
569 */ 613 */
614static int gru_unload_all_contexts(void)
615{
616 struct gru_thread_state *gts;
617 struct gru_state *gru;
618 int gid, ctxnum;
619
620 if (!capable(CAP_SYS_ADMIN))
621 return -EPERM;
622 foreach_gid(gid) {
623 gru = GID_TO_GRU(gid);
624 spin_lock(&gru->gs_lock);
625 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
626 gts = gru->gs_gts[ctxnum];
627 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
628 spin_unlock(&gru->gs_lock);
629 gru_unload_context(gts, 1);
630 gru_unlock_gts(gts);
631 spin_lock(&gru->gs_lock);
632 }
633 }
634 spin_unlock(&gru->gs_lock);
635 }
636 return 0;
637}
638
570int gru_user_unload_context(unsigned long arg) 639int gru_user_unload_context(unsigned long arg)
571{ 640{
572 struct gru_thread_state *gts; 641 struct gru_thread_state *gts;
@@ -578,6 +647,9 @@ int gru_user_unload_context(unsigned long arg)
578 647
579 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); 648 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
580 649
650 if (!req.gseg)
651 return gru_unload_all_contexts();
652
581 gts = gru_find_lock_gts(req.gseg); 653 gts = gru_find_lock_gts(req.gseg);
582 if (!gts) 654 if (!gts)
583 return -EINVAL; 655 return -EINVAL;
@@ -609,7 +681,7 @@ int gru_user_flush_tlb(unsigned long arg)
609 if (!gts) 681 if (!gts)
610 return -EINVAL; 682 return -EINVAL;
611 683
612 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); 684 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
613 gru_unlock_gts(gts); 685 gru_unlock_gts(gts);
614 686
615 return 0; 687 return 0;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index c67e4e8bd62c..3e6e42d2f01b 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -45,7 +45,9 @@
45#include <asm/uv/uv_mmrs.h> 45#include <asm/uv/uv_mmrs.h>
46 46
47struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; 47struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
48unsigned long gru_start_paddr, gru_end_paddr __read_mostly; 48unsigned long gru_start_paddr __read_mostly;
49unsigned long gru_end_paddr __read_mostly;
50unsigned int gru_max_gids __read_mostly;
49struct gru_stats_s gru_stats; 51struct gru_stats_s gru_stats;
50 52
51/* Guaranteed user available resources on each node */ 53/* Guaranteed user available resources on each node */
@@ -101,7 +103,7 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
101 return -EPERM; 103 return -EPERM;
102 104
103 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || 105 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
104 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) 106 vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
105 return -EINVAL; 107 return -EINVAL;
106 108
107 vma->vm_flags |= 109 vma->vm_flags |=
@@ -273,8 +275,11 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
273 gru->gs_blade_id = bid; 275 gru->gs_blade_id = bid;
274 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; 276 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
275 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; 277 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
278 gru->gs_asid_limit = MAX_ASID;
276 gru_tgh_flush_init(gru); 279 gru_tgh_flush_init(gru);
277 gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", 280 if (gru->gs_gid >= gru_max_gids)
281 gru_max_gids = gru->gs_gid + 1;
282 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
278 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, 283 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
279 gru->gs_gru_base_paddr); 284 gru->gs_gru_base_paddr);
280 gru_kservices_init(gru); 285 gru_kservices_init(gru);
@@ -295,7 +300,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
295 for_each_online_node(nid) { 300 for_each_online_node(nid) {
296 bid = uv_node_to_blade_id(nid); 301 bid = uv_node_to_blade_id(nid);
297 pnode = uv_node_to_pnode(nid); 302 pnode = uv_node_to_pnode(nid);
298 if (gru_base[bid]) 303 if (bid < 0 || gru_base[bid])
299 continue; 304 continue;
300 page = alloc_pages_node(nid, GFP_KERNEL, order); 305 page = alloc_pages_node(nid, GFP_KERNEL, order);
301 if (!page) 306 if (!page)
@@ -308,11 +313,11 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
308 dsrbytes = 0; 313 dsrbytes = 0;
309 cbrs = 0; 314 cbrs = 0;
310 for (gru = gru_base[bid]->bs_grus, chip = 0; 315 for (gru = gru_base[bid]->bs_grus, chip = 0;
311 chip < GRU_CHIPLETS_PER_BLADE; 316 chip < GRU_CHIPLETS_PER_BLADE;
312 chip++, gru++) { 317 chip++, gru++) {
313 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); 318 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
314 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); 319 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
315 gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); 320 gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
316 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; 321 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
317 cbrs = max(cbrs, n); 322 cbrs = max(cbrs, n);
318 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; 323 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -370,26 +375,26 @@ static int __init gru_init(void)
370 void *gru_start_vaddr; 375 void *gru_start_vaddr;
371 376
372 if (!is_uv_system()) 377 if (!is_uv_system())
373 return 0; 378 return -ENODEV;
374 379
375#if defined CONFIG_IA64 380#if defined CONFIG_IA64
376 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ 381 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
377#else 382#else
378 gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & 383 gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
379 0x7fffffffffffUL; 384 0x7fffffffffffUL;
380
381#endif 385#endif
382 gru_start_vaddr = __va(gru_start_paddr); 386 gru_start_vaddr = __va(gru_start_paddr);
383 gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; 387 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
384 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", 388 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
385 gru_start_paddr, gru_end_paddr); 389 gru_start_paddr, gru_end_paddr);
386 irq = get_base_irq(); 390 irq = get_base_irq();
387 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { 391 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
388 ret = request_irq(irq + chip, gru_intr, 0, id, NULL); 392 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
389 /* TODO: fix irq handling on x86. For now ignore failures because 393 /* TODO: fix irq handling on x86. For now ignore failure because
390 * interrupts are not required & not yet fully supported */ 394 * interrupts are not required & not yet fully supported */
391 if (ret) { 395 if (ret) {
392 printk("!!!WARNING: GRU ignoring request failure!!!\n"); 396 printk(KERN_WARNING
397 "!!!WARNING: GRU ignoring request failure!!!\n");
393 ret = 0; 398 ret = 0;
394 } 399 }
395 if (ret) { 400 if (ret) {
@@ -435,7 +440,7 @@ exit1:
435 440
436static void __exit gru_exit(void) 441static void __exit gru_exit(void)
437{ 442{
438 int i, bid; 443 int i, bid, gid;
439 int order = get_order(sizeof(struct gru_state) * 444 int order = get_order(sizeof(struct gru_state) *
440 GRU_CHIPLETS_PER_BLADE); 445 GRU_CHIPLETS_PER_BLADE);
441 446
@@ -445,6 +450,9 @@ static void __exit gru_exit(void)
445 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 450 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
446 free_irq(IRQ_GRU + i, NULL); 451 free_irq(IRQ_GRU + i, NULL);
447 452
453 foreach_gid(gid)
454 gru_kservices_exit(GID_TO_GRU(gid));
455
448 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 456 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
449 free_pages((unsigned long)gru_base[bid], order); 457 free_pages((unsigned long)gru_base[bid], order);
450 458
@@ -469,7 +477,11 @@ struct vm_operations_struct gru_vm_ops = {
469 .fault = gru_fault, 477 .fault = gru_fault,
470}; 478};
471 479
480#ifndef MODULE
472fs_initcall(gru_init); 481fs_initcall(gru_init);
482#else
483module_init(gru_init);
484#endif
473module_exit(gru_exit); 485module_exit(gru_exit);
474 486
475module_param(gru_options, ulong, 0644); 487module_param(gru_options, ulong, 0644);
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
new file mode 100644
index 000000000000..9b7ccb328697
--- /dev/null
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -0,0 +1,183 @@
1/*
2 * GRU KERNEL MCS INSTRUCTIONS
3 *
4 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include "gru.h"
23#include "grulib.h"
24#include "grutables.h"
25
26/* 10 sec */
27#ifdef CONFIG_IA64
28#include <asm/processor.h>
29#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
30#else
31#include <asm/tsc.h>
32#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
33#endif
34
35/* Extract the status field from a kernel handle */
36#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
37
38struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39
40static void update_mcs_stats(enum mcs_op op, unsigned long clks)
41{
42 atomic_long_inc(&mcs_op_statistics[op].count);
43 atomic_long_add(clks, &mcs_op_statistics[op].total);
44 if (mcs_op_statistics[op].max < clks)
45 mcs_op_statistics[op].max = clks;
46}
47
48static void start_instruction(void *h)
49{
50 unsigned long *w0 = h;
51
52 wmb(); /* setting CMD bit must be last */
53 *w0 = *w0 | 1;
54 gru_flush_cache(h);
55}
56
57static int wait_instruction_complete(void *h, enum mcs_op opc)
58{
59 int status;
60 cycles_t start_time = get_cycles();
61
62 while (1) {
63 cpu_relax();
64 status = GET_MSEG_HANDLE_STATUS(h);
65 if (status != CCHSTATUS_ACTIVE)
66 break;
67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
68 panic("GRU %p is malfunctioning\n", h);
69 }
70 if (gru_options & OPT_STATS)
71 update_mcs_stats(opc, get_cycles() - start_time);
72 return status;
73}
74
75int cch_allocate(struct gru_context_configuration_handle *cch,
76 int asidval, int sizeavail, unsigned long cbrmap,
77 unsigned long dsrmap)
78{
79 int i;
80
81 for (i = 0; i < 8; i++) {
82 cch->asid[i] = (asidval++);
83 cch->sizeavail[i] = sizeavail;
84 }
85 cch->dsr_allocation_map = dsrmap;
86 cch->cbr_allocation_map = cbrmap;
87 cch->opc = CCHOP_ALLOCATE;
88 start_instruction(cch);
89 return wait_instruction_complete(cch, cchop_allocate);
90}
91
92int cch_start(struct gru_context_configuration_handle *cch)
93{
94 cch->opc = CCHOP_START;
95 start_instruction(cch);
96 return wait_instruction_complete(cch, cchop_start);
97}
98
99int cch_interrupt(struct gru_context_configuration_handle *cch)
100{
101 cch->opc = CCHOP_INTERRUPT;
102 start_instruction(cch);
103 return wait_instruction_complete(cch, cchop_interrupt);
104}
105
106int cch_deallocate(struct gru_context_configuration_handle *cch)
107{
108 cch->opc = CCHOP_DEALLOCATE;
109 start_instruction(cch);
110 return wait_instruction_complete(cch, cchop_deallocate);
111}
112
113int cch_interrupt_sync(struct gru_context_configuration_handle
114 *cch)
115{
116 cch->opc = CCHOP_INTERRUPT_SYNC;
117 start_instruction(cch);
118 return wait_instruction_complete(cch, cchop_interrupt_sync);
119}
120
121int tgh_invalidate(struct gru_tlb_global_handle *tgh,
122 unsigned long vaddr, unsigned long vaddrmask,
123 int asid, int pagesize, int global, int n,
124 unsigned short ctxbitmap)
125{
126 tgh->vaddr = vaddr;
127 tgh->asid = asid;
128 tgh->pagesize = pagesize;
129 tgh->n = n;
130 tgh->global = global;
131 tgh->vaddrmask = vaddrmask;
132 tgh->ctxbitmap = ctxbitmap;
133 tgh->opc = TGHOP_TLBINV;
134 start_instruction(tgh);
135 return wait_instruction_complete(tgh, tghop_invalidate);
136}
137
138void tfh_write_only(struct gru_tlb_fault_handle *tfh,
139 unsigned long pfn, unsigned long vaddr,
140 int asid, int dirty, int pagesize)
141{
142 tfh->fillasid = asid;
143 tfh->fillvaddr = vaddr;
144 tfh->pfn = pfn;
145 tfh->dirty = dirty;
146 tfh->pagesize = pagesize;
147 tfh->opc = TFHOP_WRITE_ONLY;
148 start_instruction(tfh);
149}
150
151void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
152 unsigned long paddr, int gaa,
153 unsigned long vaddr, int asid, int dirty,
154 int pagesize)
155{
156 tfh->fillasid = asid;
157 tfh->fillvaddr = vaddr;
158 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
159 tfh->gaa = gaa;
160 tfh->dirty = dirty;
161 tfh->pagesize = pagesize;
162 tfh->opc = TFHOP_WRITE_RESTART;
163 start_instruction(tfh);
164}
165
166void tfh_restart(struct gru_tlb_fault_handle *tfh)
167{
168 tfh->opc = TFHOP_RESTART;
169 start_instruction(tfh);
170}
171
172void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
173{
174 tfh->opc = TFHOP_USER_POLLING_MODE;
175 start_instruction(tfh);
176}
177
178void tfh_exception(struct gru_tlb_fault_handle *tfh)
179{
180 tfh->opc = TFHOP_EXCEPTION;
181 start_instruction(tfh);
182}
183
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index b63018d60fe1..1ed74d7508c8 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -489,170 +489,28 @@ enum gru_cbr_state {
489 * 64m 26 8 489 * 64m 26 8
490 * ... 490 * ...
491 */ 491 */
492#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) 492#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
493#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) 493#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
494 494
495/* minimum TLB purge count to ensure a full purge */ 495/* minimum TLB purge count to ensure a full purge */
496#define GRUMAXINVAL 1024UL 496#define GRUMAXINVAL 1024UL
497 497
498 498int cch_allocate(struct gru_context_configuration_handle *cch,
499/* Extract the status field from a kernel handle */ 499 int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
500#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) 500
501 501int cch_start(struct gru_context_configuration_handle *cch);
502static inline void start_instruction(void *h) 502int cch_interrupt(struct gru_context_configuration_handle *cch);
503{ 503int cch_deallocate(struct gru_context_configuration_handle *cch);
504 unsigned long *w0 = h; 504int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
505 505int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
506 wmb(); /* setting CMD bit must be last */ 506 unsigned long vaddrmask, int asid, int pagesize, int global, int n,
507 *w0 = *w0 | 1; 507 unsigned short ctxbitmap);
508 gru_flush_cache(h); 508void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
509} 509 unsigned long vaddr, int asid, int dirty, int pagesize);
510 510void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
511static inline int wait_instruction_complete(void *h) 511 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
512{ 512void tfh_restart(struct gru_tlb_fault_handle *tfh);
513 int status; 513void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh);
514 514void tfh_exception(struct gru_tlb_fault_handle *tfh);
515 do {
516 cpu_relax();
517 barrier();
518 status = GET_MSEG_HANDLE_STATUS(h);
519 } while (status == CCHSTATUS_ACTIVE);
520 return status;
521}
522
523#if defined CONFIG_IA64
524static inline void cch_allocate_set_asids(
525 struct gru_context_configuration_handle *cch, int asidval)
526{
527 int i;
528
529 for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */
530 cch->asid[i] = (asidval++);
531#if 0
532 /* ZZZ hugepages not supported yet */
533 if (i == RGN_HPAGE)
534 cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
535 else
536#endif
537 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
538 }
539}
540#elif defined CONFIG_X86_64
541static inline void cch_allocate_set_asids(
542 struct gru_context_configuration_handle *cch, int asidval)
543{
544 int i;
545
546 for (i = 0; i < 8; i++) {
547 cch->asid[i] = asidval++;
548 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
549 GRU_SIZEAVAIL(21);
550 }
551}
552#endif
553
554static inline int cch_allocate(struct gru_context_configuration_handle *cch,
555 int asidval, unsigned long cbrmap,
556 unsigned long dsrmap)
557{
558 cch_allocate_set_asids(cch, asidval);
559 cch->dsr_allocation_map = dsrmap;
560 cch->cbr_allocation_map = cbrmap;
561 cch->opc = CCHOP_ALLOCATE;
562 start_instruction(cch);
563 return wait_instruction_complete(cch);
564}
565
566static inline int cch_start(struct gru_context_configuration_handle *cch)
567{
568 cch->opc = CCHOP_START;
569 start_instruction(cch);
570 return wait_instruction_complete(cch);
571}
572
573static inline int cch_interrupt(struct gru_context_configuration_handle *cch)
574{
575 cch->opc = CCHOP_INTERRUPT;
576 start_instruction(cch);
577 return wait_instruction_complete(cch);
578}
579
580static inline int cch_deallocate(struct gru_context_configuration_handle *cch)
581{
582 cch->opc = CCHOP_DEALLOCATE;
583 start_instruction(cch);
584 return wait_instruction_complete(cch);
585}
586
587static inline int cch_interrupt_sync(struct gru_context_configuration_handle
588 *cch)
589{
590 cch->opc = CCHOP_INTERRUPT_SYNC;
591 start_instruction(cch);
592 return wait_instruction_complete(cch);
593}
594
595static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh,
596 unsigned long vaddr, unsigned long vaddrmask,
597 int asid, int pagesize, int global, int n,
598 unsigned short ctxbitmap)
599{
600 tgh->vaddr = vaddr;
601 tgh->asid = asid;
602 tgh->pagesize = pagesize;
603 tgh->n = n;
604 tgh->global = global;
605 tgh->vaddrmask = vaddrmask;
606 tgh->ctxbitmap = ctxbitmap;
607 tgh->opc = TGHOP_TLBINV;
608 start_instruction(tgh);
609 return wait_instruction_complete(tgh);
610}
611
612static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh,
613 unsigned long pfn, unsigned long vaddr,
614 int asid, int dirty, int pagesize)
615{
616 tfh->fillasid = asid;
617 tfh->fillvaddr = vaddr;
618 tfh->pfn = pfn;
619 tfh->dirty = dirty;
620 tfh->pagesize = pagesize;
621 tfh->opc = TFHOP_WRITE_ONLY;
622 start_instruction(tfh);
623}
624
625static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
626 unsigned long paddr, int gaa,
627 unsigned long vaddr, int asid, int dirty,
628 int pagesize)
629{
630 tfh->fillasid = asid;
631 tfh->fillvaddr = vaddr;
632 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
633 tfh->gaa = gaa;
634 tfh->dirty = dirty;
635 tfh->pagesize = pagesize;
636 tfh->opc = TFHOP_WRITE_RESTART;
637 start_instruction(tfh);
638}
639
640static inline void tfh_restart(struct gru_tlb_fault_handle *tfh)
641{
642 tfh->opc = TFHOP_RESTART;
643 start_instruction(tfh);
644}
645
646static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
647{
648 tfh->opc = TFHOP_USER_POLLING_MODE;
649 start_instruction(tfh);
650}
651
652static inline void tfh_exception(struct gru_tlb_fault_handle *tfh)
653{
654 tfh->opc = TFHOP_EXCEPTION;
655 start_instruction(tfh);
656}
657 515
658#endif /* __GRUHANDLES_H__ */ 516#endif /* __GRUHANDLES_H__ */
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 880c55dfb662..d8bd7d84a7cf 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -52,8 +52,10 @@
52 */ 52 */
53 53
54/* Blade percpu resources PERMANENTLY reserved for kernel use */ 54/* Blade percpu resources PERMANENTLY reserved for kernel use */
55#define GRU_NUM_KERNEL_CBR 1 55#define GRU_NUM_KERNEL_CBR 1
56#define GRU_NUM_KERNEL_DSR_BYTES 256 56#define GRU_NUM_KERNEL_DSR_BYTES 256
57#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
58 GRU_CACHE_LINE_BYTES)
57#define KERNEL_CTXNUM 15 59#define KERNEL_CTXNUM 15
58 60
59/* GRU instruction attributes for all instructions */ 61/* GRU instruction attributes for all instructions */
@@ -94,7 +96,6 @@ struct message_header {
94 char fill; 96 char fill;
95}; 97};
96 98
97#define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines))
98#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) 99#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
99 100
100static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) 101static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
@@ -122,7 +123,7 @@ int gru_get_cb_exception_detail(void *cb,
122 struct gru_control_block_extended *cbe; 123 struct gru_control_block_extended *cbe;
123 124
124 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); 125 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
125 prefetchw(cbe); /* Harmless on hardware, required for emulator */ 126 prefetchw(cbe); /* Harmless on hardware, required for emulator */
126 excdet->opc = cbe->opccpy; 127 excdet->opc = cbe->opccpy;
127 excdet->exopc = cbe->exopccpy; 128 excdet->exopc = cbe->exopccpy;
128 excdet->ecause = cbe->ecause; 129 excdet->ecause = cbe->ecause;
@@ -250,7 +251,8 @@ static inline void restore_present2(void *p, int val)
250 * Create a message queue. 251 * Create a message queue.
251 * qlines - message queue size in cache lines. Includes 2-line header. 252 * qlines - message queue size in cache lines. Includes 2-line header.
252 */ 253 */
253int gru_create_message_queue(void *p, unsigned int bytes) 254int gru_create_message_queue(struct gru_message_queue_desc *mqd,
255 void *p, unsigned int bytes, int nasid, int vector, int apicid)
254{ 256{
255 struct message_queue *mq = p; 257 struct message_queue *mq = p;
256 unsigned int qlines; 258 unsigned int qlines;
@@ -265,6 +267,12 @@ int gru_create_message_queue(void *p, unsigned int bytes)
265 mq->hstatus[0] = 0; 267 mq->hstatus[0] = 0;
266 mq->hstatus[1] = 1; 268 mq->hstatus[1] = 1;
267 mq->head = gru_mesq_head(2, qlines / 2 + 1); 269 mq->head = gru_mesq_head(2, qlines / 2 + 1);
270 mqd->mq = mq;
271 mqd->mq_gpa = uv_gpa(mq);
272 mqd->qlines = qlines;
273 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
274 mqd->interrupt_vector = vector;
275 mqd->interrupt_apicid = apicid;
268 return 0; 276 return 0;
269} 277}
270EXPORT_SYMBOL_GPL(gru_create_message_queue); 278EXPORT_SYMBOL_GPL(gru_create_message_queue);
@@ -277,8 +285,8 @@ EXPORT_SYMBOL_GPL(gru_create_message_queue);
277 * -1 - if mesq sent successfully but queue not full 285 * -1 - if mesq sent successfully but queue not full
278 * >0 - unexpected error. MQE_xxx returned 286 * >0 - unexpected error. MQE_xxx returned
279 */ 287 */
280static int send_noop_message(void *cb, 288static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
281 unsigned long mq, void *mesg) 289 void *mesg)
282{ 290{
283 const struct message_header noop_header = { 291 const struct message_header noop_header = {
284 .present = MQS_NOOP, .lines = 1}; 292 .present = MQS_NOOP, .lines = 1};
@@ -289,7 +297,7 @@ static int send_noop_message(void *cb,
289 STAT(mesq_noop); 297 STAT(mesq_noop);
290 save_mhdr = *mhdr; 298 save_mhdr = *mhdr;
291 *mhdr = noop_header; 299 *mhdr = noop_header;
292 gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA); 300 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
293 ret = gru_wait(cb); 301 ret = gru_wait(cb);
294 302
295 if (ret) { 303 if (ret) {
@@ -313,7 +321,7 @@ static int send_noop_message(void *cb,
313 break; 321 break;
314 case CBSS_PUT_NACKED: 322 case CBSS_PUT_NACKED:
315 STAT(mesq_noop_put_nacked); 323 STAT(mesq_noop_put_nacked);
316 m = mq + (gru_get_amo_value_head(cb) << 6); 324 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
317 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, 325 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
318 IMA); 326 IMA);
319 if (gru_wait(cb) == CBS_IDLE) 327 if (gru_wait(cb) == CBS_IDLE)
@@ -333,30 +341,20 @@ static int send_noop_message(void *cb,
333/* 341/*
334 * Handle a gru_mesq full. 342 * Handle a gru_mesq full.
335 */ 343 */
336static int send_message_queue_full(void *cb, 344static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
337 unsigned long mq, void *mesg, int lines) 345 void *mesg, int lines)
338{ 346{
339 union gru_mesqhead mqh; 347 union gru_mesqhead mqh;
340 unsigned int limit, head; 348 unsigned int limit, head;
341 unsigned long avalue; 349 unsigned long avalue;
342 int half, qlines, save; 350 int half, qlines;
343 351
344 /* Determine if switching to first/second half of q */ 352 /* Determine if switching to first/second half of q */
345 avalue = gru_get_amo_value(cb); 353 avalue = gru_get_amo_value(cb);
346 head = gru_get_amo_value_head(cb); 354 head = gru_get_amo_value_head(cb);
347 limit = gru_get_amo_value_limit(cb); 355 limit = gru_get_amo_value_limit(cb);
348 356
349 /* 357 qlines = mqd->qlines;
350 * Fetch "qlines" from the queue header. Since the queue may be
351 * in memory that can't be accessed using socket addresses, use
352 * the GRU to access the data. Use DSR space from the message.
353 */
354 save = *(int *)mesg;
355 gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
356 if (gru_wait(cb) != CBS_IDLE)
357 goto cberr;
358 qlines = *(int *)mesg;
359 *(int *)mesg = save;
360 half = (limit != qlines); 358 half = (limit != qlines);
361 359
362 if (half) 360 if (half)
@@ -365,7 +363,7 @@ static int send_message_queue_full(void *cb,
365 mqh = gru_mesq_head(2, qlines / 2 + 1); 363 mqh = gru_mesq_head(2, qlines / 2 + 1);
366 364
367 /* Try to get lock for switching head pointer */ 365 /* Try to get lock for switching head pointer */
368 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA); 366 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
369 if (gru_wait(cb) != CBS_IDLE) 367 if (gru_wait(cb) != CBS_IDLE)
370 goto cberr; 368 goto cberr;
371 if (!gru_get_amo_value(cb)) { 369 if (!gru_get_amo_value(cb)) {
@@ -375,8 +373,8 @@ static int send_message_queue_full(void *cb,
375 373
376 /* Got the lock. Send optional NOP if queue not full, */ 374 /* Got the lock. Send optional NOP if queue not full, */
377 if (head != limit) { 375 if (head != limit) {
378 if (send_noop_message(cb, mq, mesg)) { 376 if (send_noop_message(cb, mqd, mesg)) {
379 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), 377 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
380 XTYPE_DW, IMA); 378 XTYPE_DW, IMA);
381 if (gru_wait(cb) != CBS_IDLE) 379 if (gru_wait(cb) != CBS_IDLE)
382 goto cberr; 380 goto cberr;
@@ -387,14 +385,16 @@ static int send_message_queue_full(void *cb,
387 } 385 }
388 386
389 /* Then flip queuehead to other half of queue. */ 387 /* Then flip queuehead to other half of queue. */
390 gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA); 388 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
389 IMA);
391 if (gru_wait(cb) != CBS_IDLE) 390 if (gru_wait(cb) != CBS_IDLE)
392 goto cberr; 391 goto cberr;
393 392
394 /* If not successfully in swapping queue head, clear the hstatus lock */ 393 /* If not successfully in swapping queue head, clear the hstatus lock */
395 if (gru_get_amo_value(cb) != avalue) { 394 if (gru_get_amo_value(cb) != avalue) {
396 STAT(mesq_qf_switch_head_failed); 395 STAT(mesq_qf_switch_head_failed);
397 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA); 396 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
397 IMA);
398 if (gru_wait(cb) != CBS_IDLE) 398 if (gru_wait(cb) != CBS_IDLE)
399 goto cberr; 399 goto cberr;
400 } 400 }
@@ -404,15 +404,25 @@ cberr:
404 return MQE_UNEXPECTED_CB_ERR; 404 return MQE_UNEXPECTED_CB_ERR;
405} 405}
406 406
407/*
408 * Send a cross-partition interrupt to the SSI that contains the target
409 * message queue. Normally, the interrupt is automatically delivered by hardware
410 * but some error conditions require explicit delivery.
411 */
412static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
413{
414 if (mqd->interrupt_vector)
415 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
416 mqd->interrupt_vector);
417}
418
407 419
408/* 420/*
409 * Handle a gru_mesq failure. Some of these failures are software recoverable 421 * Handle a gru_mesq failure. Some of these failures are software recoverable
410 * or retryable. 422 * or retryable.
411 */ 423 */
412static int send_message_failure(void *cb, 424static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
413 unsigned long mq, 425 void *mesg, int lines)
414 void *mesg,
415 int lines)
416{ 426{
417 int substatus, ret = 0; 427 int substatus, ret = 0;
418 unsigned long m; 428 unsigned long m;
@@ -429,7 +439,7 @@ static int send_message_failure(void *cb,
429 break; 439 break;
430 case CBSS_QLIMIT_REACHED: 440 case CBSS_QLIMIT_REACHED:
431 STAT(mesq_send_qlimit_reached); 441 STAT(mesq_send_qlimit_reached);
432 ret = send_message_queue_full(cb, mq, mesg, lines); 442 ret = send_message_queue_full(cb, mqd, mesg, lines);
433 break; 443 break;
434 case CBSS_AMO_NACKED: 444 case CBSS_AMO_NACKED:
435 STAT(mesq_send_amo_nacked); 445 STAT(mesq_send_amo_nacked);
@@ -437,12 +447,14 @@ static int send_message_failure(void *cb,
437 break; 447 break;
438 case CBSS_PUT_NACKED: 448 case CBSS_PUT_NACKED:
439 STAT(mesq_send_put_nacked); 449 STAT(mesq_send_put_nacked);
440 m =mq + (gru_get_amo_value_head(cb) << 6); 450 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
441 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); 451 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
442 if (gru_wait(cb) == CBS_IDLE) 452 if (gru_wait(cb) == CBS_IDLE) {
443 ret = MQE_OK; 453 ret = MQE_OK;
444 else 454 send_message_queue_interrupt(mqd);
455 } else {
445 ret = MQE_UNEXPECTED_CB_ERR; 456 ret = MQE_UNEXPECTED_CB_ERR;
457 }
446 break; 458 break;
447 default: 459 default:
448 BUG(); 460 BUG();
@@ -452,12 +464,12 @@ static int send_message_failure(void *cb,
452 464
453/* 465/*
454 * Send a message to a message queue 466 * Send a message to a message queue
455 * cb GRU control block to use to send message 467 * mqd message queue descriptor
456 * mq message queue
457 * mesg message. ust be vaddr within a GSEG 468 * mesg message. ust be vaddr within a GSEG
458 * bytes message size (<= 2 CL) 469 * bytes message size (<= 2 CL)
459 */ 470 */
460int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) 471int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
472 unsigned int bytes)
461{ 473{
462 struct message_header *mhdr; 474 struct message_header *mhdr;
463 void *cb; 475 void *cb;
@@ -481,10 +493,10 @@ int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
481 493
482 do { 494 do {
483 ret = MQE_OK; 495 ret = MQE_OK;
484 gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA); 496 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
485 istatus = gru_wait(cb); 497 istatus = gru_wait(cb);
486 if (istatus != CBS_IDLE) 498 if (istatus != CBS_IDLE)
487 ret = send_message_failure(cb, mq, dsr, clines); 499 ret = send_message_failure(cb, mqd, dsr, clines);
488 } while (ret == MQIE_AGAIN); 500 } while (ret == MQIE_AGAIN);
489 gru_free_cpu_resources(cb, dsr); 501 gru_free_cpu_resources(cb, dsr);
490 502
@@ -497,9 +509,9 @@ EXPORT_SYMBOL_GPL(gru_send_message_gpa);
497/* 509/*
498 * Advance the receive pointer for the queue to the next message. 510 * Advance the receive pointer for the queue to the next message.
499 */ 511 */
500void gru_free_message(void *rmq, void *mesg) 512void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
501{ 513{
502 struct message_queue *mq = rmq; 514 struct message_queue *mq = mqd->mq;
503 struct message_header *mhdr = mq->next; 515 struct message_header *mhdr = mq->next;
504 void *next, *pnext; 516 void *next, *pnext;
505 int half = -1; 517 int half = -1;
@@ -529,16 +541,16 @@ EXPORT_SYMBOL_GPL(gru_free_message);
529 * present. User must call next_message() to move to next message. 541 * present. User must call next_message() to move to next message.
530 * rmq message queue 542 * rmq message queue
531 */ 543 */
532void *gru_get_next_message(void *rmq) 544void *gru_get_next_message(struct gru_message_queue_desc *mqd)
533{ 545{
534 struct message_queue *mq = rmq; 546 struct message_queue *mq = mqd->mq;
535 struct message_header *mhdr = mq->next; 547 struct message_header *mhdr = mq->next;
536 int present = mhdr->present; 548 int present = mhdr->present;
537 549
538 /* skip NOOP messages */ 550 /* skip NOOP messages */
539 STAT(mesq_receive); 551 STAT(mesq_receive);
540 while (present == MQS_NOOP) { 552 while (present == MQS_NOOP) {
541 gru_free_message(rmq, mhdr); 553 gru_free_message(mqd, mhdr);
542 mhdr = mq->next; 554 mhdr = mq->next;
543 present = mhdr->present; 555 present = mhdr->present;
544 } 556 }
@@ -576,7 +588,7 @@ int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
576 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) 588 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
577 return MQE_BUG_NO_RESOURCES; 589 return MQE_BUG_NO_RESOURCES;
578 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), 590 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
579 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA); 591 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
580 ret = gru_wait(cb); 592 ret = gru_wait(cb);
581 gru_free_cpu_resources(cb, dsr); 593 gru_free_cpu_resources(cb, dsr);
582 return ret; 594 return ret;
@@ -611,7 +623,7 @@ static int quicktest(struct gru_state *gru)
611 623
612 if (word0 != word1 || word0 != MAGIC) { 624 if (word0 != word1 || word0 != MAGIC) {
613 printk 625 printk
614 ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", 626 ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n",
615 gru->gs_gid, word1, MAGIC); 627 gru->gs_gid, word1, MAGIC);
616 BUG(); /* ZZZ should not be fatal */ 628 BUG(); /* ZZZ should not be fatal */
617 } 629 }
@@ -660,15 +672,15 @@ int gru_kservices_init(struct gru_state *gru)
660 cch->tlb_int_enable = 0; 672 cch->tlb_int_enable = 0;
661 cch->tfm_done_bit_enable = 0; 673 cch->tfm_done_bit_enable = 0;
662 cch->unmap_enable = 1; 674 cch->unmap_enable = 1;
663 err = cch_allocate(cch, 0, cbr_map, dsr_map); 675 err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
664 if (err) { 676 if (err) {
665 gru_dbg(grudev, 677 gru_dbg(grudev,
666 "Unable to allocate kernel CCH: gru %d, err %d\n", 678 "Unable to allocate kernel CCH: gid %d, err %d\n",
667 gru->gs_gid, err); 679 gru->gs_gid, err);
668 BUG(); 680 BUG();
669 } 681 }
670 if (cch_start(cch)) { 682 if (cch_start(cch)) {
671 gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", 683 gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n",
672 gru->gs_gid, err); 684 gru->gs_gid, err);
673 BUG(); 685 BUG();
674 } 686 }
@@ -678,3 +690,22 @@ int gru_kservices_init(struct gru_state *gru)
678 quicktest(gru); 690 quicktest(gru);
679 return 0; 691 return 0;
680} 692}
693
694void gru_kservices_exit(struct gru_state *gru)
695{
696 struct gru_context_configuration_handle *cch;
697 struct gru_blade_state *bs;
698
699 bs = gru->gs_blade;
700 if (gru != &bs->bs_grus[1])
701 return;
702
703 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
704 lock_cch_handle(cch);
705 if (cch_interrupt_sync(cch))
706 BUG();
707 if (cch_deallocate(cch))
708 BUG();
709 unlock_cch_handle(cch);
710}
711
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index eb17e0a3ac61..747ed315d56f 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -41,6 +41,15 @@
41 * - gru_create_message_queue() needs interrupt vector info 41 * - gru_create_message_queue() needs interrupt vector info
42 */ 42 */
43 43
44struct gru_message_queue_desc {
45 void *mq; /* message queue vaddress */
46 unsigned long mq_gpa; /* global address of mq */
47 int qlines; /* queue size in CL */
48 int interrupt_vector; /* interrupt vector */
49 int interrupt_pnode; /* pnode for interrupt */
50 int interrupt_apicid; /* lapicid for interrupt */
51};
52
44/* 53/*
45 * Initialize a user allocated chunk of memory to be used as 54 * Initialize a user allocated chunk of memory to be used as
46 * a message queue. The caller must ensure that the queue is 55 * a message queue. The caller must ensure that the queue is
@@ -51,14 +60,19 @@
51 * to manage the queue. 60 * to manage the queue.
52 * 61 *
53 * Input: 62 * Input:
54 * p pointer to user allocated memory. 63 * mqd pointer to message queue descriptor
64 * p pointer to user allocated mesq memory.
55 * bytes size of message queue in bytes 65 * bytes size of message queue in bytes
66 * vector interrupt vector (zero if no interrupts)
67 * nasid nasid of blade where interrupt is delivered
68 * apicid apicid of cpu for interrupt
56 * 69 *
57 * Errors: 70 * Errors:
58 * 0 OK 71 * 0 OK
59 * >0 error 72 * >0 error
60 */ 73 */
61extern int gru_create_message_queue(void *p, unsigned int bytes); 74extern int gru_create_message_queue(struct gru_message_queue_desc *mqd,
75 void *p, unsigned int bytes, int nasid, int vector, int apicid);
62 76
63/* 77/*
64 * Send a message to a message queue. 78 * Send a message to a message queue.
@@ -68,7 +82,7 @@ extern int gru_create_message_queue(void *p, unsigned int bytes);
68 * 82 *
69 * 83 *
70 * Input: 84 * Input:
71 * xmq message queue - must be a UV global physical address 85 * mqd pointer to message queue descriptor
72 * mesg pointer to message. Must be 64-bit aligned 86 * mesg pointer to message. Must be 64-bit aligned
73 * bytes size of message in bytes 87 * bytes size of message in bytes
74 * 88 *
@@ -77,8 +91,8 @@ extern int gru_create_message_queue(void *p, unsigned int bytes);
77 * >0 Send failure - see error codes below 91 * >0 Send failure - see error codes below
78 * 92 *
79 */ 93 */
80extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg, 94extern int gru_send_message_gpa(struct gru_message_queue_desc *mqd,
81 unsigned int bytes); 95 void *mesg, unsigned int bytes);
82 96
83/* Status values for gru_send_message() */ 97/* Status values for gru_send_message() */
84#define MQE_OK 0 /* message sent successfully */ 98#define MQE_OK 0 /* message sent successfully */
@@ -94,10 +108,11 @@ extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg,
94 * API extensions may allow for out-of-order freeing. 108 * API extensions may allow for out-of-order freeing.
95 * 109 *
96 * Input 110 * Input
97 * mq message queue 111 * mqd pointer to message queue descriptor
98 * mesq message being freed 112 * mesq message being freed
99 */ 113 */
100extern void gru_free_message(void *mq, void *mesq); 114extern void gru_free_message(struct gru_message_queue_desc *mqd,
115 void *mesq);
101 116
102/* 117/*
103 * Get next message from message queue. Returns pointer to 118 * Get next message from message queue. Returns pointer to
@@ -106,13 +121,13 @@ extern void gru_free_message(void *mq, void *mesq);
106 * in order to move the queue pointers to next message. 121 * in order to move the queue pointers to next message.
107 * 122 *
108 * Input 123 * Input
109 * mq message queue 124 * mqd pointer to message queue descriptor
110 * 125 *
111 * Output: 126 * Output:
112 * p pointer to message 127 * p pointer to message
113 * NULL no message available 128 * NULL no message available
114 */ 129 */
115extern void *gru_get_next_message(void *mq); 130extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
116 131
117 132
118/* 133/*
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 3d2fc216bae5..ec3f7a17d221 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -76,10 +76,9 @@ int gru_cpu_fault_map_id(void)
76/* Hit the asid limit. Start over */ 76/* Hit the asid limit. Start over */
77static int gru_wrap_asid(struct gru_state *gru) 77static int gru_wrap_asid(struct gru_state *gru)
78{ 78{
79 gru_dbg(grudev, "gru %p\n", gru); 79 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
80 STAT(asid_wrap); 80 STAT(asid_wrap);
81 gru->gs_asid_gen++; 81 gru->gs_asid_gen++;
82 gru_flush_all_tlb(gru);
83 return MIN_ASID; 82 return MIN_ASID;
84} 83}
85 84
@@ -88,19 +87,21 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid)
88{ 87{
89 int i, gid, inuse_asid, limit; 88 int i, gid, inuse_asid, limit;
90 89
91 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); 90 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
92 STAT(asid_next); 91 STAT(asid_next);
93 limit = MAX_ASID; 92 limit = MAX_ASID;
94 if (asid >= limit) 93 if (asid >= limit)
95 asid = gru_wrap_asid(gru); 94 asid = gru_wrap_asid(gru);
95 gru_flush_all_tlb(gru);
96 gid = gru->gs_gid; 96 gid = gru->gs_gid;
97again: 97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) { 98 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i]) 99 if (!gru->gs_gts[i])
100 continue; 100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
102 gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", 102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
103 gru, inuse_asid, i, gru->gs_gts[i]); 103 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
104 inuse_asid, i);
104 if (inuse_asid == asid) { 105 if (inuse_asid == asid) {
105 asid += ASID_INC; 106 asid += ASID_INC;
106 if (asid >= limit) { 107 if (asid >= limit) {
@@ -120,8 +121,8 @@ again:
120 } 121 }
121 gru->gs_asid_limit = limit; 122 gru->gs_asid_limit = limit;
122 gru->gs_asid = asid; 123 gru->gs_asid = asid;
123 gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, 124 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
124 limit); 125 asid, limit);
125 return asid; 126 return asid;
126} 127}
127 128
@@ -130,14 +131,12 @@ static int gru_assign_asid(struct gru_state *gru)
130{ 131{
131 int asid; 132 int asid;
132 133
133 spin_lock(&gru->gs_asid_lock);
134 gru->gs_asid += ASID_INC; 134 gru->gs_asid += ASID_INC;
135 asid = gru->gs_asid; 135 asid = gru->gs_asid;
136 if (asid >= gru->gs_asid_limit) 136 if (asid >= gru->gs_asid_limit)
137 asid = gru_reset_asid_limit(gru, asid); 137 asid = gru_reset_asid_limit(gru, asid);
138 spin_unlock(&gru->gs_asid_lock);
139 138
140 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); 139 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
141 return asid; 140 return asid;
142} 141}
143 142
@@ -215,17 +214,20 @@ static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
215 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
216 * context. 215 * context.
217 */ 216 */
218static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, 217static int gru_load_mm_tracker(struct gru_state *gru,
219 int ctxnum) 218 struct gru_thread_state *gts)
220{ 219{
220 struct gru_mm_struct *gms = gts->ts_gms;
221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
222 unsigned short ctxbitmap = (1 << ctxnum); 222 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
223 int asid; 223 int asid;
224 224
225 spin_lock(&gms->ms_asid_lock); 225 spin_lock(&gms->ms_asid_lock);
226 asid = asids->mt_asid; 226 asid = asids->mt_asid;
227 227
228 if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { 228 spin_lock(&gru->gs_asid_lock);
229 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
230 gru->gs_asid_gen)) {
229 asid = gru_assign_asid(gru); 231 asid = gru_assign_asid(gru);
230 asids->mt_asid = asid; 232 asids->mt_asid = asid;
231 asids->mt_asid_gen = gru->gs_asid_gen; 233 asids->mt_asid_gen = gru->gs_asid_gen;
@@ -233,6 +235,7 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms,
233 } else { 235 } else {
234 STAT(asid_reuse); 236 STAT(asid_reuse);
235 } 237 }
238 spin_unlock(&gru->gs_asid_lock);
236 239
237 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 240 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
238 asids->mt_ctxbitmap |= ctxbitmap; 241 asids->mt_ctxbitmap |= ctxbitmap;
@@ -241,24 +244,28 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms,
241 spin_unlock(&gms->ms_asid_lock); 244 spin_unlock(&gms->ms_asid_lock);
242 245
243 gru_dbg(grudev, 246 gru_dbg(grudev,
244 "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", 247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
245 gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); 248 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
249 gms->ms_asidmap[0]);
246 return asid; 250 return asid;
247} 251}
248 252
249static void gru_unload_mm_tracker(struct gru_state *gru, 253static void gru_unload_mm_tracker(struct gru_state *gru,
250 struct gru_mm_struct *gms, int ctxnum) 254 struct gru_thread_state *gts)
251{ 255{
256 struct gru_mm_struct *gms = gts->ts_gms;
252 struct gru_mm_tracker *asids; 257 struct gru_mm_tracker *asids;
253 unsigned short ctxbitmap; 258 unsigned short ctxbitmap;
254 259
255 asids = &gms->ms_asids[gru->gs_gid]; 260 asids = &gms->ms_asids[gru->gs_gid];
256 ctxbitmap = (1 << ctxnum); 261 ctxbitmap = (1 << gts->ts_ctxnum);
257 spin_lock(&gms->ms_asid_lock); 262 spin_lock(&gms->ms_asid_lock);
263 spin_lock(&gru->gs_asid_lock);
258 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 264 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
259 asids->mt_ctxbitmap ^= ctxbitmap; 265 asids->mt_ctxbitmap ^= ctxbitmap;
260 gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 266 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
261 gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); 267 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
268 spin_unlock(&gru->gs_asid_lock);
262 spin_unlock(&gms->ms_asid_lock); 269 spin_unlock(&gms->ms_asid_lock);
263} 270}
264 271
@@ -319,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
319 gts->ts_vma = vma; 326 gts->ts_vma = vma;
320 gts->ts_tlb_int_select = -1; 327 gts->ts_tlb_int_select = -1;
321 gts->ts_gms = gru_register_mmu_notifier(); 328 gts->ts_gms = gru_register_mmu_notifier();
329 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
322 if (!gts->ts_gms) 330 if (!gts->ts_gms)
323 goto err; 331 goto err;
324 332
@@ -399,7 +407,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts)
399 struct gru_state *gru; 407 struct gru_state *gru;
400 408
401 gru = gts->ts_gru; 409 gru = gts->ts_gru;
402 gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); 410 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
403 411
404 spin_lock(&gru->gs_lock); 412 spin_lock(&gru->gs_lock);
405 gru->gs_gts[gts->ts_ctxnum] = NULL; 413 gru->gs_gts[gts->ts_ctxnum] = NULL;
@@ -408,6 +416,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts)
408 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 416 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
409 gts->ts_ctxnum = NULLCTX; 417 gts->ts_ctxnum = NULLCTX;
410 gts->ts_gru = NULL; 418 gts->ts_gru = NULL;
419 gts->ts_blade = -1;
411 spin_unlock(&gru->gs_lock); 420 spin_unlock(&gru->gs_lock);
412 421
413 gts_drop(gts); 422 gts_drop(gts);
@@ -432,8 +441,8 @@ static inline long gru_copy_handle(void *d, void *s)
432 return GRU_HANDLE_BYTES; 441 return GRU_HANDLE_BYTES;
433} 442}
434 443
435static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, 444static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
436 unsigned long length) 445 unsigned long cbrmap, unsigned long length)
437{ 446{
438 int i, scr; 447 int i, scr;
439 448
@@ -500,12 +509,12 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
500 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 509 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
501 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 510 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
502 511
512 gru_dbg(grudev, "gts %p\n", gts);
503 lock_cch_handle(cch); 513 lock_cch_handle(cch);
504 if (cch_interrupt_sync(cch)) 514 if (cch_interrupt_sync(cch))
505 BUG(); 515 BUG();
506 gru_dbg(grudev, "gts %p\n", gts);
507 516
508 gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); 517 gru_unload_mm_tracker(gru, gts);
509 if (savestate) 518 if (savestate)
510 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 519 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
511 ctxnum, gts->ts_cbr_map, 520 ctxnum, gts->ts_cbr_map,
@@ -534,7 +543,7 @@ static void gru_load_context(struct gru_thread_state *gts)
534 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 543 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
535 544
536 lock_cch_handle(cch); 545 lock_cch_handle(cch);
537 asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); 546 asid = gru_load_mm_tracker(gru, gts);
538 cch->tfm_fault_bit_enable = 547 cch->tfm_fault_bit_enable =
539 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 548 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
540 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 549 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
@@ -544,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts)
544 cch->tlb_int_select = gts->ts_tlb_int_select; 553 cch->tlb_int_select = gts->ts_tlb_int_select;
545 } 554 }
546 cch->tfm_done_bit_enable = 0; 555 cch->tfm_done_bit_enable = 0;
547 err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); 556 err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
557 gts->ts_dsr_map);
548 if (err) { 558 if (err) {
549 gru_dbg(grudev, 559 gru_dbg(grudev,
550 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 560 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
@@ -565,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts)
565/* 575/*
566 * Update fields in an active CCH: 576 * Update fields in an active CCH:
567 * - retarget interrupts on local blade 577 * - retarget interrupts on local blade
578 * - update sizeavail mask
568 * - force a delayed context unload by clearing the CCH asids. This 579 * - force a delayed context unload by clearing the CCH asids. This
569 * forces TLB misses for new GRU instructions. The context is unloaded 580 * forces TLB misses for new GRU instructions. The context is unloaded
570 * when the next TLB miss occurs. 581 * when the next TLB miss occurs.
571 */ 582 */
572static int gru_update_cch(struct gru_thread_state *gts, int int_select) 583int gru_update_cch(struct gru_thread_state *gts, int force_unload)
573{ 584{
574 struct gru_context_configuration_handle *cch; 585 struct gru_context_configuration_handle *cch;
575 struct gru_state *gru = gts->ts_gru; 586 struct gru_state *gru = gts->ts_gru;
@@ -583,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select)
583 goto exit; 594 goto exit;
584 if (cch_interrupt(cch)) 595 if (cch_interrupt(cch))
585 BUG(); 596 BUG();
586 if (int_select >= 0) { 597 if (!force_unload) {
587 gts->ts_tlb_int_select = int_select; 598 for (i = 0; i < 8; i++)
588 cch->tlb_int_select = int_select; 599 cch->sizeavail[i] = gts->ts_sizeavail;
600 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
601 cch->tlb_int_select = gru_cpu_fault_map_id();
589 } else { 602 } else {
590 for (i = 0; i < 8; i++) 603 for (i = 0; i < 8; i++)
591 cch->asid[i] = 0; 604 cch->asid[i] = 0;
@@ -617,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
617 630
618 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 631 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
619 gru_cpu_fault_map_id()); 632 gru_cpu_fault_map_id());
620 return gru_update_cch(gts, gru_cpu_fault_map_id()); 633 return gru_update_cch(gts, 0);
621} 634}
622 635
623 636
@@ -688,7 +701,7 @@ static void gru_steal_context(struct gru_thread_state *gts)
688 STAT(steal_context_failed); 701 STAT(steal_context_failed);
689 } 702 }
690 gru_dbg(grudev, 703 gru_dbg(grudev,
691 "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" 704 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
692 " avail cb %ld, ds %ld\n", 705 " avail cb %ld, ds %ld\n",
693 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 706 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
694 hweight64(gru->gs_dsr_map)); 707 hweight64(gru->gs_dsr_map));
@@ -727,6 +740,7 @@ again:
727 } 740 }
728 reserve_gru_resources(gru, gts); 741 reserve_gru_resources(gru, gts);
729 gts->ts_gru = gru; 742 gts->ts_gru = gru;
743 gts->ts_blade = gru->gs_blade_id;
730 gts->ts_ctxnum = 744 gts->ts_ctxnum =
731 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 745 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
732 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); 746 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
@@ -737,7 +751,7 @@ again:
737 751
738 STAT(assign_context); 752 STAT(assign_context);
739 gru_dbg(grudev, 753 gru_dbg(grudev,
740 "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", 754 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
741 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 755 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
742 gts->ts_gru->gs_gid, gts->ts_ctxnum, 756 gts->ts_gru->gs_gid, gts->ts_ctxnum,
743 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 757 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
@@ -773,8 +787,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
773 return VM_FAULT_SIGBUS; 787 return VM_FAULT_SIGBUS;
774 788
775again: 789again:
776 preempt_disable();
777 mutex_lock(&gts->ts_ctxlock); 790 mutex_lock(&gts->ts_ctxlock);
791 preempt_disable();
778 if (gts->ts_gru) { 792 if (gts->ts_gru) {
779 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { 793 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
780 STAT(migrated_nopfn_unload); 794 STAT(migrated_nopfn_unload);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 73b0ca061bb5..ee74821b171c 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -62,7 +62,9 @@ static int statistics_show(struct seq_file *s, void *p)
62 printstat(s, asid_wrap); 62 printstat(s, asid_wrap);
63 printstat(s, asid_reuse); 63 printstat(s, asid_reuse);
64 printstat(s, intr); 64 printstat(s, intr);
65 printstat(s, intr_mm_lock_failed);
65 printstat(s, call_os); 66 printstat(s, call_os);
67 printstat(s, call_os_offnode_reference);
66 printstat(s, call_os_check_for_bug); 68 printstat(s, call_os_check_for_bug);
67 printstat(s, call_os_wait_queue); 69 printstat(s, call_os_wait_queue);
68 printstat(s, user_flush_tlb); 70 printstat(s, user_flush_tlb);
@@ -120,6 +122,30 @@ static ssize_t statistics_write(struct file *file, const char __user *userbuf,
120 return count; 122 return count;
121} 123}
122 124
125static int mcs_statistics_show(struct seq_file *s, void *p)
126{
127 int op;
128 unsigned long total, count, max;
129 static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
130 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
131
132 for (op = 0; op < mcsop_last; op++) {
133 count = atomic_long_read(&mcs_op_statistics[op].count);
134 total = atomic_long_read(&mcs_op_statistics[op].total);
135 max = mcs_op_statistics[op].max;
136 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
137 count ? total / count : 0, max);
138 }
139 return 0;
140}
141
142static ssize_t mcs_statistics_write(struct file *file,
143 const char __user *userbuf, size_t count, loff_t *data)
144{
145 memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics));
146 return count;
147}
148
123static int options_show(struct seq_file *s, void *p) 149static int options_show(struct seq_file *s, void *p)
124{ 150{
125 seq_printf(s, "0x%lx\n", gru_options); 151 seq_printf(s, "0x%lx\n", gru_options);
@@ -135,6 +161,7 @@ static ssize_t options_write(struct file *file, const char __user *userbuf,
135 if (copy_from_user 161 if (copy_from_user
136 (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf))) 162 (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf)))
137 return -EFAULT; 163 return -EFAULT;
164 buf[count - 1] = '\0';
138 if (!strict_strtoul(buf, 10, &val)) 165 if (!strict_strtoul(buf, 10, &val))
139 gru_options = val; 166 gru_options = val;
140 167
@@ -199,7 +226,7 @@ static void seq_stop(struct seq_file *file, void *data)
199 226
200static void *seq_start(struct seq_file *file, loff_t *gid) 227static void *seq_start(struct seq_file *file, loff_t *gid)
201{ 228{
202 if (*gid < GRU_MAX_GRUS) 229 if (*gid < gru_max_gids)
203 return gid; 230 return gid;
204 return NULL; 231 return NULL;
205} 232}
@@ -207,7 +234,7 @@ static void *seq_start(struct seq_file *file, loff_t *gid)
207static void *seq_next(struct seq_file *file, void *data, loff_t *gid) 234static void *seq_next(struct seq_file *file, void *data, loff_t *gid)
208{ 235{
209 (*gid)++; 236 (*gid)++;
210 if (*gid < GRU_MAX_GRUS) 237 if (*gid < gru_max_gids)
211 return gid; 238 return gid;
212 return NULL; 239 return NULL;
213} 240}
@@ -231,6 +258,11 @@ static int statistics_open(struct inode *inode, struct file *file)
231 return single_open(file, statistics_show, NULL); 258 return single_open(file, statistics_show, NULL);
232} 259}
233 260
261static int mcs_statistics_open(struct inode *inode, struct file *file)
262{
263 return single_open(file, mcs_statistics_show, NULL);
264}
265
234static int options_open(struct inode *inode, struct file *file) 266static int options_open(struct inode *inode, struct file *file)
235{ 267{
236 return single_open(file, options_show, NULL); 268 return single_open(file, options_show, NULL);
@@ -255,6 +287,14 @@ static const struct file_operations statistics_fops = {
255 .release = single_release, 287 .release = single_release,
256}; 288};
257 289
290static const struct file_operations mcs_statistics_fops = {
291 .open = mcs_statistics_open,
292 .read = seq_read,
293 .write = mcs_statistics_write,
294 .llseek = seq_lseek,
295 .release = single_release,
296};
297
258static const struct file_operations options_fops = { 298static const struct file_operations options_fops = {
259 .open = options_open, 299 .open = options_open,
260 .read = seq_read, 300 .read = seq_read,
@@ -283,6 +323,7 @@ static struct proc_entry {
283 struct proc_dir_entry *entry; 323 struct proc_dir_entry *entry;
284} proc_files[] = { 324} proc_files[] = {
285 {"statistics", 0644, &statistics_fops}, 325 {"statistics", 0644, &statistics_fops},
326 {"mcs_statistics", 0644, &mcs_statistics_fops},
286 {"debug_options", 0644, &options_fops}, 327 {"debug_options", 0644, &options_fops},
287 {"cch_status", 0444, &cch_fops}, 328 {"cch_status", 0444, &cch_fops},
288 {"gru_status", 0444, &gru_fops}, 329 {"gru_status", 0444, &gru_fops},
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index a78f70deeb59..bf1eeb7553ed 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -153,6 +153,7 @@
153extern struct gru_stats_s gru_stats; 153extern struct gru_stats_s gru_stats;
154extern struct gru_blade_state *gru_base[]; 154extern struct gru_blade_state *gru_base[];
155extern unsigned long gru_start_paddr, gru_end_paddr; 155extern unsigned long gru_start_paddr, gru_end_paddr;
156extern unsigned int gru_max_gids;
156 157
157#define GRU_MAX_BLADES MAX_NUMNODES 158#define GRU_MAX_BLADES MAX_NUMNODES
158#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) 159#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
@@ -184,7 +185,9 @@ struct gru_stats_s {
184 atomic_long_t asid_wrap; 185 atomic_long_t asid_wrap;
185 atomic_long_t asid_reuse; 186 atomic_long_t asid_reuse;
186 atomic_long_t intr; 187 atomic_long_t intr;
188 atomic_long_t intr_mm_lock_failed;
187 atomic_long_t call_os; 189 atomic_long_t call_os;
190 atomic_long_t call_os_offnode_reference;
188 atomic_long_t call_os_check_for_bug; 191 atomic_long_t call_os_check_for_bug;
189 atomic_long_t call_os_wait_queue; 192 atomic_long_t call_os_wait_queue;
190 atomic_long_t user_flush_tlb; 193 atomic_long_t user_flush_tlb;
@@ -237,6 +240,17 @@ struct gru_stats_s {
237 240
238}; 241};
239 242
243enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
244 cchop_deallocate, tghop_invalidate, mcsop_last};
245
246struct mcs_op_statistic {
247 atomic_long_t count;
248 atomic_long_t total;
249 unsigned long max;
250};
251
252extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
253
240#define OPT_DPRINT 1 254#define OPT_DPRINT 1
241#define OPT_STATS 2 255#define OPT_STATS 2
242#define GRU_QUICKLOOK 4 256#define GRU_QUICKLOOK 4
@@ -278,13 +292,12 @@ struct gru_stats_s {
278/* Generate a GRU asid value from a GRU base asid & a virtual address. */ 292/* Generate a GRU asid value from a GRU base asid & a virtual address. */
279#if defined CONFIG_IA64 293#if defined CONFIG_IA64
280#define VADDR_HI_BIT 64 294#define VADDR_HI_BIT 64
281#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
282#elif defined CONFIG_X86_64 295#elif defined CONFIG_X86_64
283#define VADDR_HI_BIT 48 296#define VADDR_HI_BIT 48
284#define GRUREGION(addr) (0) /* ZZZ could do better */
285#else 297#else
286#error "Unsupported architecture" 298#error "Unsupported architecture"
287#endif 299#endif
300#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
288#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) 301#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
289 302
290/*------------------------------------------------------------------------------ 303/*------------------------------------------------------------------------------
@@ -297,12 +310,12 @@ struct gru_state;
297 * This structure is pointed to from the mmstruct via the notifier pointer. 310 * This structure is pointed to from the mmstruct via the notifier pointer.
298 * There is one of these per address space. 311 * There is one of these per address space.
299 */ 312 */
300struct gru_mm_tracker { 313struct gru_mm_tracker { /* pack to reduce size */
301 unsigned int mt_asid_gen; /* ASID wrap count */ 314 unsigned int mt_asid_gen:24; /* ASID wrap count */
302 int mt_asid; /* current base ASID for gru */ 315 unsigned int mt_asid:24; /* current base ASID for gru */
303 unsigned short mt_ctxbitmap; /* bitmap of contexts using 316 unsigned short mt_ctxbitmap:16;/* bitmap of contexts using
304 asid */ 317 asid */
305}; 318} __attribute__ ((packed));
306 319
307struct gru_mm_struct { 320struct gru_mm_struct {
308 struct mmu_notifier ms_notifier; 321 struct mmu_notifier ms_notifier;
@@ -348,6 +361,7 @@ struct gru_thread_state {
348 long ts_user_options;/* misc user option flags */ 361 long ts_user_options;/* misc user option flags */
349 pid_t ts_tgid_owner; /* task that is using the 362 pid_t ts_tgid_owner; /* task that is using the
350 context - for migration */ 363 context - for migration */
364 unsigned short ts_sizeavail; /* Pagesizes in use */
351 int ts_tsid; /* thread that owns the 365 int ts_tsid; /* thread that owns the
352 structure */ 366 structure */
353 int ts_tlb_int_select;/* target cpu if interrupts 367 int ts_tlb_int_select;/* target cpu if interrupts
@@ -359,6 +373,9 @@ struct gru_thread_state {
359 required for contest */ 373 required for contest */
360 unsigned char ts_cbr_au_count;/* Number of CBR resources 374 unsigned char ts_cbr_au_count;/* Number of CBR resources
361 required for contest */ 375 required for contest */
376 char ts_blade; /* If >= 0, migrate context if
377 ref from diferent blade */
378 char ts_force_cch_reload;
362 char ts_force_unload;/* force context to be unloaded 379 char ts_force_unload;/* force context to be unloaded
363 after migration */ 380 after migration */
364 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each 381 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
@@ -392,12 +409,12 @@ struct gru_state {
392 gru segments (64) */ 409 gru segments (64) */
393 void *gs_gru_base_vaddr; /* Virtual address of 410 void *gs_gru_base_vaddr; /* Virtual address of
394 gru segments (64) */ 411 gru segments (64) */
395 unsigned char gs_gid; /* unique GRU number */ 412 unsigned short gs_gid; /* unique GRU number */
413 unsigned short gs_blade_id; /* blade of GRU */
396 unsigned char gs_tgh_local_shift; /* used to pick TGH for 414 unsigned char gs_tgh_local_shift; /* used to pick TGH for
397 local flush */ 415 local flush */
398 unsigned char gs_tgh_first_remote; /* starting TGH# for 416 unsigned char gs_tgh_first_remote; /* starting TGH# for
399 remote flush */ 417 remote flush */
400 unsigned short gs_blade_id; /* blade of GRU */
401 spinlock_t gs_asid_lock; /* lock used for 418 spinlock_t gs_asid_lock; /* lock used for
402 assigning asids */ 419 assigning asids */
403 spinlock_t gs_lock; /* lock used for 420 spinlock_t gs_lock; /* lock used for
@@ -492,6 +509,10 @@ struct gru_blade_state {
492 (i) < GRU_CHIPLETS_PER_BLADE; \ 509 (i) < GRU_CHIPLETS_PER_BLADE; \
493 (i)++, (gru)++) 510 (i)++, (gru)++)
494 511
512/* Scan all GRUs */
513#define foreach_gid(gid) \
514 for ((gid) = 0; (gid) < gru_max_gids; (gid)++)
515
495/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ 516/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */
496#define for_each_gts_on_gru(gts, gru, ctxnum) \ 517#define for_each_gts_on_gru(gts, gru, ctxnum) \
497 for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \ 518 for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \
@@ -578,9 +599,11 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
578extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct 599extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
579 *vma, int tsid); 600 *vma, int tsid);
580extern void gru_unload_context(struct gru_thread_state *gts, int savestate); 601extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
602extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
581extern void gts_drop(struct gru_thread_state *gts); 603extern void gts_drop(struct gru_thread_state *gts);
582extern void gru_tgh_flush_init(struct gru_state *gru); 604extern void gru_tgh_flush_init(struct gru_state *gru);
583extern int gru_kservices_init(struct gru_state *gru); 605extern int gru_kservices_init(struct gru_state *gru);
606extern void gru_kservices_exit(struct gru_state *gru);
584extern irqreturn_t gru_intr(int irq, void *dev_id); 607extern irqreturn_t gru_intr(int irq, void *dev_id);
585extern int gru_handle_user_call_os(unsigned long address); 608extern int gru_handle_user_call_os(unsigned long address);
586extern int gru_user_flush_tlb(unsigned long arg); 609extern int gru_user_flush_tlb(unsigned long arg);
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index c84496a77691..1d125091f5e7 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
187 " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", 187 " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
188 gid, asid, num, asids->mt_ctxbitmap); 188 gid, asid, num, asids->mt_ctxbitmap);
189 tgh = get_lock_tgh_handle(gru); 189 tgh = get_lock_tgh_handle(gru);
190 tgh_invalidate(tgh, start, 0, asid, grupagesize, 0, 190 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
191 num - 1, asids->mt_ctxbitmap); 191 num - 1, asids->mt_ctxbitmap);
192 get_unlock_tgh_handle(tgh); 192 get_unlock_tgh_handle(tgh);
193 } else { 193 } else {
@@ -210,11 +210,10 @@ void gru_flush_all_tlb(struct gru_state *gru)
210{ 210{
211 struct gru_tlb_global_handle *tgh; 211 struct gru_tlb_global_handle *tgh;
212 212
213 gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); 213 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
214 tgh = get_lock_tgh_handle(gru); 214 tgh = get_lock_tgh_handle(gru);
215 tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0); 215 tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
216 get_unlock_tgh_handle(tgh); 216 get_unlock_tgh_handle(tgh);
217 preempt_enable();
218} 217}
219 218
220/* 219/*
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 275b78896a73..114444cfd496 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -92,7 +92,9 @@ struct xpc_rsvd_page {
92 u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ 92 u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
93 union { 93 union {
94 unsigned long vars_pa; /* phys address of struct xpc_vars */ 94 unsigned long vars_pa; /* phys address of struct xpc_vars */
95 unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */ 95 unsigned long activate_gru_mq_desc_gpa; /* phys addr of */
96 /* activate mq's */
97 /* gru mq descriptor */
96 } sn; 98 } sn;
97 unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ 99 unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
98 u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ 100 u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
@@ -189,7 +191,9 @@ struct xpc_gru_mq_uv {
189 int irq; /* irq raised when message is received in mq */ 191 int irq; /* irq raised when message is received in mq */
190 int mmr_blade; /* blade where watchlist was allocated from */ 192 int mmr_blade; /* blade where watchlist was allocated from */
191 unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ 193 unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
194 unsigned long mmr_value; /* value of irq mmr located on mmr_blade */
192 int watchlist_num; /* number of watchlist allocatd by BIOS */ 195 int watchlist_num; /* number of watchlist allocatd by BIOS */
196 void *gru_mq_desc; /* opaque structure used by the GRU driver */
193}; 197};
194 198
195/* 199/*
@@ -197,6 +201,7 @@ struct xpc_gru_mq_uv {
197 * heartbeat, partition active state, and channel state. This is UV only. 201 * heartbeat, partition active state, and channel state. This is UV only.
198 */ 202 */
199struct xpc_activate_mq_msghdr_uv { 203struct xpc_activate_mq_msghdr_uv {
204 unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
200 short partid; /* sender's partid */ 205 short partid; /* sender's partid */
201 u8 act_state; /* sender's act_state at time msg sent */ 206 u8 act_state; /* sender's act_state at time msg sent */
202 u8 type; /* message's type */ 207 u8 type; /* message's type */
@@ -232,7 +237,7 @@ struct xpc_activate_mq_msg_heartbeat_req_uv {
232struct xpc_activate_mq_msg_activate_req_uv { 237struct xpc_activate_mq_msg_activate_req_uv {
233 struct xpc_activate_mq_msghdr_uv hdr; 238 struct xpc_activate_mq_msghdr_uv hdr;
234 unsigned long rp_gpa; 239 unsigned long rp_gpa;
235 unsigned long activate_mq_gpa; 240 unsigned long activate_gru_mq_desc_gpa;
236}; 241};
237 242
238struct xpc_activate_mq_msg_deactivate_req_uv { 243struct xpc_activate_mq_msg_deactivate_req_uv {
@@ -263,7 +268,7 @@ struct xpc_activate_mq_msg_chctl_openreply_uv {
263 short ch_number; 268 short ch_number;
264 short remote_nentries; /* ??? Is this needed? What is? */ 269 short remote_nentries; /* ??? Is this needed? What is? */
265 short local_nentries; /* ??? Is this needed? What is? */ 270 short local_nentries; /* ??? Is this needed? What is? */
266 unsigned long local_notify_mq_gpa; 271 unsigned long notify_gru_mq_desc_gpa;
267}; 272};
268 273
269/* 274/*
@@ -510,8 +515,8 @@ struct xpc_channel_sn2 {
510}; 515};
511 516
512struct xpc_channel_uv { 517struct xpc_channel_uv {
513 unsigned long remote_notify_mq_gpa; /* gru phys address of remote */ 518 void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */
514 /* partition's notify mq */ 519 /* gru mq descriptor */
515 520
516 struct xpc_send_msg_slot_uv *send_msg_slots; 521 struct xpc_send_msg_slot_uv *send_msg_slots;
517 void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */ 522 void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */
@@ -682,8 +687,12 @@ struct xpc_partition_sn2 {
682}; 687};
683 688
684struct xpc_partition_uv { 689struct xpc_partition_uv {
685 unsigned long remote_activate_mq_gpa; /* gru phys address of remote */ 690 unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */
686 /* partition's activate mq */ 691 /* activate mq's gru mq */
692 /* descriptor */
693 void *cached_activate_gru_mq_desc; /* cached copy of partition's */
694 /* activate mq's gru mq descriptor */
695 struct mutex cached_activate_gru_mq_desc_mutex;
687 spinlock_t flags_lock; /* protect updating of flags */ 696 spinlock_t flags_lock; /* protect updating of flags */
688 unsigned int flags; /* general flags */ 697 unsigned int flags; /* general flags */
689 u8 remote_act_state; /* remote partition's act_state */ 698 u8 remote_act_state; /* remote partition's act_state */
@@ -694,8 +703,9 @@ struct xpc_partition_uv {
694 703
695/* struct xpc_partition_uv flags */ 704/* struct xpc_partition_uv flags */
696 705
697#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 706#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001
698#define XPC_P_ENGAGED_UV 0x00000002 707#define XPC_P_ENGAGED_UV 0x00000002
708#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004
699 709
700/* struct xpc_partition_uv act_state change requests */ 710/* struct xpc_partition_uv act_state change requests */
701 711
@@ -804,6 +814,7 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
804extern void xpc_create_kthreads(struct xpc_channel *, int, int); 814extern void xpc_create_kthreads(struct xpc_channel *, int, int);
805extern void xpc_disconnect_wait(int); 815extern void xpc_disconnect_wait(int);
806extern int (*xpc_setup_partitions_sn) (void); 816extern int (*xpc_setup_partitions_sn) (void);
817extern void (*xpc_teardown_partitions_sn) (void);
807extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, 818extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
808 unsigned long *, 819 unsigned long *,
809 size_t *); 820 size_t *);
@@ -846,8 +857,8 @@ extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
846 unsigned long *); 857 unsigned long *);
847extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); 858extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
848 859
849extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, 860extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
850 unsigned long); 861 unsigned long);
851 862
852extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, 863extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
853 u16, u8, xpc_notify_func, void *); 864 u16, u8, xpc_notify_func, void *);
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 45fd653dbe31..99a2534c38a1 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -183,6 +183,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
183 &part->remote_openclose_args[ch_number]; 183 &part->remote_openclose_args[ch_number];
184 struct xpc_channel *ch = &part->channels[ch_number]; 184 struct xpc_channel *ch = &part->channels[ch_number];
185 enum xp_retval reason; 185 enum xp_retval reason;
186 enum xp_retval ret;
186 187
187 spin_lock_irqsave(&ch->lock, irq_flags); 188 spin_lock_irqsave(&ch->lock, irq_flags);
188 189
@@ -399,8 +400,13 @@ again:
399 DBUG_ON(args->local_nentries == 0); 400 DBUG_ON(args->local_nentries == 0);
400 DBUG_ON(args->remote_nentries == 0); 401 DBUG_ON(args->remote_nentries == 0);
401 402
403 ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
404 if (ret != xpSuccess) {
405 XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
406 spin_unlock_irqrestore(&ch->lock, irq_flags);
407 return;
408 }
402 ch->flags |= XPC_C_ROPENREPLY; 409 ch->flags |= XPC_C_ROPENREPLY;
403 xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
404 410
405 if (args->local_nentries < ch->remote_nentries) { 411 if (args->local_nentries < ch->remote_nentries) {
406 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " 412 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 6576170de962..1ab9fda87fab 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -171,6 +171,7 @@ static struct notifier_block xpc_die_notifier = {
171}; 171};
172 172
173int (*xpc_setup_partitions_sn) (void); 173int (*xpc_setup_partitions_sn) (void);
174void (*xpc_teardown_partitions_sn) (void);
174enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, 175enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
175 unsigned long *rp_pa, 176 unsigned long *rp_pa,
176 size_t *len); 177 size_t *len);
@@ -217,8 +218,8 @@ void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
217void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, 218void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
218 unsigned long *irq_flags); 219 unsigned long *irq_flags);
219 220
220void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, 221enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
221 unsigned long msgqueue_pa); 222 unsigned long msgqueue_pa);
222 223
223enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, 224enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
224 void *payload, u16 payload_size, 225 void *payload, u16 payload_size,
@@ -998,6 +999,7 @@ xpc_setup_partitions(void)
998static void 999static void
999xpc_teardown_partitions(void) 1000xpc_teardown_partitions(void)
1000{ 1001{
1002 xpc_teardown_partitions_sn();
1001 kfree(xpc_partitions); 1003 kfree(xpc_partitions);
1002} 1004}
1003 1005
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 2e975762c32b..eaaa964942de 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -66,6 +66,12 @@ xpc_setup_partitions_sn_sn2(void)
66 return 0; 66 return 0;
67} 67}
68 68
69static void
70xpc_teardown_partitions_sn_sn2(void)
71{
72 /* nothing needs to be done */
73}
74
69/* SH_IPI_ACCESS shub register value on startup */ 75/* SH_IPI_ACCESS shub register value on startup */
70static u64 xpc_sh1_IPI_access_sn2; 76static u64 xpc_sh1_IPI_access_sn2;
71static u64 xpc_sh2_IPI_access0_sn2; 77static u64 xpc_sh2_IPI_access0_sn2;
@@ -436,11 +442,12 @@ xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
436 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); 442 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
437} 443}
438 444
439static void 445static enum xp_retval
440xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, 446xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
441 unsigned long msgqueue_pa) 447 unsigned long msgqueue_pa)
442{ 448{
443 ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; 449 ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
450 return xpSuccess;
444} 451}
445 452
446/* 453/*
@@ -1737,20 +1744,20 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1737{ 1744{
1738 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 1745 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1739 struct xpc_msg_sn2 *msg; 1746 struct xpc_msg_sn2 *msg;
1740 s64 put; 1747 s64 put, remote_nentries = ch->remote_nentries;
1741 1748
1742 /* flags are zeroed when the buffer is allocated */ 1749 /* flags are zeroed when the buffer is allocated */
1743 if (ch_sn2->remote_GP.put < ch->remote_nentries) 1750 if (ch_sn2->remote_GP.put < remote_nentries)
1744 return; 1751 return;
1745 1752
1746 put = max(ch_sn2->w_remote_GP.put, ch->remote_nentries); 1753 put = max(ch_sn2->w_remote_GP.put, remote_nentries);
1747 do { 1754 do {
1748 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + 1755 msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1749 (put % ch->remote_nentries) * 1756 (put % remote_nentries) *
1750 ch->entry_size); 1757 ch->entry_size);
1751 DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); 1758 DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1752 DBUG_ON(!(msg->flags & XPC_M_SN2_DONE)); 1759 DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
1753 DBUG_ON(msg->number != put - ch->remote_nentries); 1760 DBUG_ON(msg->number != put - remote_nentries);
1754 msg->flags = 0; 1761 msg->flags = 0;
1755 } while (++put < ch_sn2->remote_GP.put); 1762 } while (++put < ch_sn2->remote_GP.put);
1756} 1763}
@@ -2315,6 +2322,7 @@ xpc_init_sn2(void)
2315 size_t buf_size; 2322 size_t buf_size;
2316 2323
2317 xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; 2324 xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2;
2325 xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2;
2318 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; 2326 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
2319 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; 2327 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2;
2320 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; 2328 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 29c0502a96b2..f7fff4727edb 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -31,6 +31,21 @@
31#include "../sgi-gru/grukservices.h" 31#include "../sgi-gru/grukservices.h"
32#include "xpc.h" 32#include "xpc.h"
33 33
34#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
35struct uv_IO_APIC_route_entry {
36 __u64 vector : 8,
37 delivery_mode : 3,
38 dest_mode : 1,
39 delivery_status : 1,
40 polarity : 1,
41 __reserved_1 : 1,
42 trigger : 1,
43 mask : 1,
44 __reserved_2 : 15,
45 dest : 32;
46};
47#endif
48
34static atomic64_t xpc_heartbeat_uv; 49static atomic64_t xpc_heartbeat_uv;
35static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); 50static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
36 51
@@ -56,26 +71,52 @@ xpc_setup_partitions_sn_uv(void)
56 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { 71 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
57 part_uv = &xpc_partitions[partid].sn.uv; 72 part_uv = &xpc_partitions[partid].sn.uv;
58 73
74 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
59 spin_lock_init(&part_uv->flags_lock); 75 spin_lock_init(&part_uv->flags_lock);
60 part_uv->remote_act_state = XPC_P_AS_INACTIVE; 76 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
61 } 77 }
62 return 0; 78 return 0;
63} 79}
64 80
81static void
82xpc_teardown_partitions_sn_uv(void)
83{
84 short partid;
85 struct xpc_partition_uv *part_uv;
86 unsigned long irq_flags;
87
88 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
89 part_uv = &xpc_partitions[partid].sn.uv;
90
91 if (part_uv->cached_activate_gru_mq_desc != NULL) {
92 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
93 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
94 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
95 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
96 kfree(part_uv->cached_activate_gru_mq_desc);
97 part_uv->cached_activate_gru_mq_desc = NULL;
98 mutex_unlock(&part_uv->
99 cached_activate_gru_mq_desc_mutex);
100 }
101 }
102}
103
65static int 104static int
66xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) 105xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
67{ 106{
107 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108
68#if defined CONFIG_X86_64 109#if defined CONFIG_X86_64
69 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); 110 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
70 if (mq->irq < 0) { 111 if (mq->irq < 0) {
71 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", 112 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
72 mq->irq); 113 -mq->irq);
114 return mq->irq;
73 } 115 }
74 116
75#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 117 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
76 int mmr_pnode;
77 unsigned long mmr_value;
78 118
119#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
79 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) 120 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
80 mq->irq = SGI_XPC_ACTIVATE; 121 mq->irq = SGI_XPC_ACTIVATE;
81 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) 122 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
@@ -83,10 +124,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
83 else 124 else
84 return -EINVAL; 125 return -EINVAL;
85 126
86 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); 127 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
87 mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; 128 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
88
89 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
90#else 129#else
91 #error not a supported configuration 130 #error not a supported configuration
92#endif 131#endif
@@ -127,7 +166,7 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
127 return ret; 166 return ret;
128 } 167 }
129#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV 168#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
130 ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), 169 ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
131 mq->order, &mq->mmr_offset); 170 mq->order, &mq->mmr_offset);
132 if (ret < 0) { 171 if (ret < 0) {
133 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", 172 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
@@ -168,12 +207,22 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
168 int pg_order; 207 int pg_order;
169 struct page *page; 208 struct page *page;
170 struct xpc_gru_mq_uv *mq; 209 struct xpc_gru_mq_uv *mq;
210 struct uv_IO_APIC_route_entry *mmr_value;
171 211
172 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); 212 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
173 if (mq == NULL) { 213 if (mq == NULL) {
174 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " 214 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
175 "a xpc_gru_mq_uv structure\n"); 215 "a xpc_gru_mq_uv structure\n");
176 ret = -ENOMEM; 216 ret = -ENOMEM;
217 goto out_0;
218 }
219
220 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
221 GFP_KERNEL);
222 if (mq->gru_mq_desc == NULL) {
223 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
224 "a gru_message_queue_desc structure\n");
225 ret = -ENOMEM;
177 goto out_1; 226 goto out_1;
178 } 227 }
179 228
@@ -194,14 +243,6 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
194 } 243 }
195 mq->address = page_address(page); 244 mq->address = page_address(page);
196 245
197 ret = gru_create_message_queue(mq->address, mq_size);
198 if (ret != 0) {
199 dev_err(xpc_part, "gru_create_message_queue() returned "
200 "error=%d\n", ret);
201 ret = -EINVAL;
202 goto out_3;
203 }
204
205 /* enable generation of irq when GRU mq operation occurs to this mq */ 246 /* enable generation of irq when GRU mq operation occurs to this mq */
206 ret = xpc_gru_mq_watchlist_alloc_uv(mq); 247 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
207 if (ret != 0) 248 if (ret != 0)
@@ -214,10 +255,20 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
214 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); 255 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
215 if (ret != 0) { 256 if (ret != 0) {
216 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 257 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
217 mq->irq, ret); 258 mq->irq, -ret);
218 goto out_5; 259 goto out_5;
219 } 260 }
220 261
262 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
263 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
264 nid, mmr_value->vector, mmr_value->dest);
265 if (ret != 0) {
266 dev_err(xpc_part, "gru_create_message_queue() returned "
267 "error=%d\n", ret);
268 ret = -EINVAL;
269 goto out_6;
270 }
271
221 /* allow other partitions to access this GRU mq */ 272 /* allow other partitions to access this GRU mq */
222 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); 273 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
223 if (xp_ret != xpSuccess) { 274 if (xp_ret != xpSuccess) {
@@ -237,8 +288,10 @@ out_4:
237out_3: 288out_3:
238 free_pages((unsigned long)mq->address, pg_order); 289 free_pages((unsigned long)mq->address, pg_order);
239out_2: 290out_2:
240 kfree(mq); 291 kfree(mq->gru_mq_desc);
241out_1: 292out_1:
293 kfree(mq);
294out_0:
242 return ERR_PTR(ret); 295 return ERR_PTR(ret);
243} 296}
244 297
@@ -268,13 +321,14 @@ xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
268} 321}
269 322
270static enum xp_retval 323static enum xp_retval
271xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) 324xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
325 size_t msg_size)
272{ 326{
273 enum xp_retval xp_ret; 327 enum xp_retval xp_ret;
274 int ret; 328 int ret;
275 329
276 while (1) { 330 while (1) {
277 ret = gru_send_message_gpa(mq_gpa, msg, msg_size); 331 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
278 if (ret == MQE_OK) { 332 if (ret == MQE_OK) {
279 xp_ret = xpSuccess; 333 xp_ret = xpSuccess;
280 break; 334 break;
@@ -421,7 +475,15 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
421 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; 475 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
422 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ 476 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
423 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; 477 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
424 part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; 478
479 if (msg->activate_gru_mq_desc_gpa !=
480 part_uv->activate_gru_mq_desc_gpa) {
481 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
482 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
483 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
484 part_uv->activate_gru_mq_desc_gpa =
485 msg->activate_gru_mq_desc_gpa;
486 }
425 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); 487 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
426 488
427 (*wakeup_hb_checker)++; 489 (*wakeup_hb_checker)++;
@@ -498,7 +560,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
498 args = &part->remote_openclose_args[msg->ch_number]; 560 args = &part->remote_openclose_args[msg->ch_number];
499 args->remote_nentries = msg->remote_nentries; 561 args->remote_nentries = msg->remote_nentries;
500 args->local_nentries = msg->local_nentries; 562 args->local_nentries = msg->local_nentries;
501 args->local_msgqueue_pa = msg->local_notify_mq_gpa; 563 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
502 564
503 spin_lock_irqsave(&part->chctl_lock, irq_flags); 565 spin_lock_irqsave(&part->chctl_lock, irq_flags);
504 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; 566 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
@@ -558,9 +620,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
558 short partid; 620 short partid;
559 struct xpc_partition *part; 621 struct xpc_partition *part;
560 int wakeup_hb_checker = 0; 622 int wakeup_hb_checker = 0;
623 int part_referenced;
561 624
562 while (1) { 625 while (1) {
563 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); 626 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
564 if (msg_hdr == NULL) 627 if (msg_hdr == NULL)
565 break; 628 break;
566 629
@@ -571,14 +634,15 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
571 partid); 634 partid);
572 } else { 635 } else {
573 part = &xpc_partitions[partid]; 636 part = &xpc_partitions[partid];
574 if (xpc_part_ref(part)) { 637
575 xpc_handle_activate_mq_msg_uv(part, msg_hdr, 638 part_referenced = xpc_part_ref(part);
576 &wakeup_hb_checker); 639 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
640 &wakeup_hb_checker);
641 if (part_referenced)
577 xpc_part_deref(part); 642 xpc_part_deref(part);
578 }
579 } 643 }
580 644
581 gru_free_message(xpc_activate_mq_uv->address, msg_hdr); 645 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
582 } 646 }
583 647
584 if (wakeup_hb_checker) 648 if (wakeup_hb_checker)
@@ -588,21 +652,73 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
588} 652}
589 653
590static enum xp_retval 654static enum xp_retval
655xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
656 unsigned long gru_mq_desc_gpa)
657{
658 enum xp_retval ret;
659
660 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
661 sizeof(struct gru_message_queue_desc));
662 if (ret == xpSuccess)
663 gru_mq_desc->mq = NULL;
664
665 return ret;
666}
667
668static enum xp_retval
591xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, 669xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
592 int msg_type) 670 int msg_type)
593{ 671{
594 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; 672 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
673 struct xpc_partition_uv *part_uv = &part->sn.uv;
674 struct gru_message_queue_desc *gru_mq_desc;
675 unsigned long irq_flags;
676 enum xp_retval ret;
595 677
596 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); 678 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
597 679
598 msg_hdr->type = msg_type; 680 msg_hdr->type = msg_type;
599 msg_hdr->partid = XPC_PARTID(part); 681 msg_hdr->partid = xp_partition_id;
600 msg_hdr->act_state = part->act_state; 682 msg_hdr->act_state = part->act_state;
601 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; 683 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
602 684
685 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
686again:
687 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
688 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
689 if (gru_mq_desc == NULL) {
690 gru_mq_desc = kmalloc(sizeof(struct
691 gru_message_queue_desc),
692 GFP_KERNEL);
693 if (gru_mq_desc == NULL) {
694 ret = xpNoMemory;
695 goto done;
696 }
697 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
698 }
699
700 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
701 part_uv->
702 activate_gru_mq_desc_gpa);
703 if (ret != xpSuccess)
704 goto done;
705
706 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
707 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
708 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
709 }
710
603 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ 711 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
604 return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, 712 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
605 msg_size); 713 msg_size);
714 if (ret != xpSuccess) {
715 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
716 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
717 goto again;
718 }
719done:
720 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
721 return ret;
606} 722}
607 723
608static void 724static void
@@ -620,7 +736,7 @@ static void
620xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, 736xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
621 void *msg, size_t msg_size, int msg_type) 737 void *msg, size_t msg_size, int msg_type)
622{ 738{
623 struct xpc_partition *part = &xpc_partitions[ch->number]; 739 struct xpc_partition *part = &xpc_partitions[ch->partid];
624 enum xp_retval ret; 740 enum xp_retval ret;
625 741
626 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); 742 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
@@ -692,7 +808,8 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
692static int 808static int
693xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) 809xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
694{ 810{
695 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); 811 rp->sn.activate_gru_mq_desc_gpa =
812 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
696 return 0; 813 return 0;
697} 814}
698 815
@@ -787,7 +904,8 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
787 904
788 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ 905 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
789 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; 906 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
790 part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; 907 part->sn.uv.activate_gru_mq_desc_gpa =
908 remote_rp->sn.activate_gru_mq_desc_gpa;
791 909
792 /* 910 /*
793 * ??? Is it a good idea to make this conditional on what is 911 * ??? Is it a good idea to make this conditional on what is
@@ -795,7 +913,8 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
795 */ 913 */
796 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { 914 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
797 msg.rp_gpa = uv_gpa(xpc_rsvd_page); 915 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
798 msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; 916 msg.activate_gru_mq_desc_gpa =
917 xpc_rsvd_page->sn.activate_gru_mq_desc_gpa;
799 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), 918 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
800 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); 919 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
801 } 920 }
@@ -857,7 +976,8 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
857 if (head->first == NULL) 976 if (head->first == NULL)
858 head->last = NULL; 977 head->last = NULL;
859 } 978 }
860 head->n_entries++; 979 head->n_entries--;
980 BUG_ON(head->n_entries < 0);
861 spin_unlock_irqrestore(&head->lock, irq_flags); 981 spin_unlock_irqrestore(&head->lock, irq_flags);
862 first->next = NULL; 982 first->next = NULL;
863 return first; 983 return first;
@@ -876,8 +996,7 @@ xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
876 else 996 else
877 head->first = last; 997 head->first = last;
878 head->last = last; 998 head->last = last;
879 head->n_entries--; 999 head->n_entries++;
880 BUG_ON(head->n_entries < 0);
881 spin_unlock_irqrestore(&head->lock, irq_flags); 1000 spin_unlock_irqrestore(&head->lock, irq_flags);
882} 1001}
883 1002
@@ -1037,6 +1156,12 @@ xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1037 1156
1038 DBUG_ON(ch->flags & XPC_C_SETUP); 1157 DBUG_ON(ch->flags & XPC_C_SETUP);
1039 1158
1159 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1160 gru_message_queue_desc),
1161 GFP_KERNEL);
1162 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1163 return xpNoMemory;
1164
1040 ret = xpc_allocate_send_msg_slot_uv(ch); 1165 ret = xpc_allocate_send_msg_slot_uv(ch);
1041 if (ret == xpSuccess) { 1166 if (ret == xpSuccess) {
1042 1167
@@ -1060,7 +1185,8 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1060 1185
1061 DBUG_ON(!spin_is_locked(&ch->lock)); 1186 DBUG_ON(!spin_is_locked(&ch->lock));
1062 1187
1063 ch_uv->remote_notify_mq_gpa = 0; 1188 kfree(ch_uv->cached_notify_gru_mq_desc);
1189 ch_uv->cached_notify_gru_mq_desc = NULL;
1064 1190
1065 if (ch->flags & XPC_C_SETUP) { 1191 if (ch->flags & XPC_C_SETUP) {
1066 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); 1192 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
@@ -1111,7 +1237,7 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1111 msg.ch_number = ch->number; 1237 msg.ch_number = ch->number;
1112 msg.local_nentries = ch->local_nentries; 1238 msg.local_nentries = ch->local_nentries;
1113 msg.remote_nentries = ch->remote_nentries; 1239 msg.remote_nentries = ch->remote_nentries;
1114 msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); 1240 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1115 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), 1241 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1116 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); 1242 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1117} 1243}
@@ -1128,11 +1254,15 @@ xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1128 xpc_wakeup_channel_mgr(part); 1254 xpc_wakeup_channel_mgr(part);
1129} 1255}
1130 1256
1131static void 1257static enum xp_retval
1132xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, 1258xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1133 unsigned long msgqueue_pa) 1259 unsigned long gru_mq_desc_gpa)
1134{ 1260{
1135 ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; 1261 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1262
1263 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1264 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1265 gru_mq_desc_gpa);
1136} 1266}
1137 1267
1138static void 1268static void
@@ -1339,7 +1469,8 @@ xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1339 short partid; 1469 short partid;
1340 struct xpc_partition *part; 1470 struct xpc_partition *part;
1341 1471
1342 while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) { 1472 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1473 NULL) {
1343 1474
1344 partid = msg->hdr.partid; 1475 partid = msg->hdr.partid;
1345 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 1476 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -1354,7 +1485,7 @@ xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1354 } 1485 }
1355 } 1486 }
1356 1487
1357 gru_free_message(xpc_notify_mq_uv, msg); 1488 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1358 } 1489 }
1359 1490
1360 return IRQ_HANDLED; 1491 return IRQ_HANDLED;
@@ -1438,7 +1569,8 @@ xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1438 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; 1569 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1439 memcpy(&msg->payload, payload, payload_size); 1570 memcpy(&msg->payload, payload, payload_size);
1440 1571
1441 ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size); 1572 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1573 msg_size);
1442 if (ret == xpSuccess) 1574 if (ret == xpSuccess)
1443 goto out_1; 1575 goto out_1;
1444 1576
@@ -1529,7 +1661,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1529 msg->hdr.partid = xp_partition_id; 1661 msg->hdr.partid = xp_partition_id;
1530 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ 1662 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1531 1663
1532 ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, 1664 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1533 sizeof(struct xpc_notify_mq_msghdr_uv)); 1665 sizeof(struct xpc_notify_mq_msghdr_uv));
1534 if (ret != xpSuccess) 1666 if (ret != xpSuccess)
1535 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1667 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
@@ -1541,6 +1673,7 @@ int
1541xpc_init_uv(void) 1673xpc_init_uv(void)
1542{ 1674{
1543 xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; 1675 xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
1676 xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv;
1544 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; 1677 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
1545 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; 1678 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
1546 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; 1679 xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f062b424704e..16899eee397e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -974,7 +974,7 @@ config ENC28J60_WRITEVERIFY
974 974
975config ETHOC 975config ETHOC
976 tristate "OpenCores 10/100 Mbps Ethernet MAC support" 976 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
977 depends on NET_ETHERNET 977 depends on NET_ETHERNET && HAS_IOMEM
978 select MII 978 select MII
979 select PHYLIB 979 select PHYLIB
980 help 980 help
@@ -2547,6 +2547,23 @@ config S2IO
2547 More specific information on configuring the driver is in 2547 More specific information on configuring the driver is in
2548 <file:Documentation/networking/s2io.txt>. 2548 <file:Documentation/networking/s2io.txt>.
2549 2549
2550config VXGE
2551 tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
2552 depends on PCI && INET
2553 ---help---
2554 This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
2555 I/O Virtualized Server Adapter.
2556 More specific information on configuring the driver is in
2557 <file:Documentation/networking/vxge.txt>.
2558
2559config VXGE_DEBUG_TRACE_ALL
2560 bool "Enabling All Debug trace statments in driver"
2561 default n
2562 depends on VXGE
2563 ---help---
2564 Say Y here if you want to enabling all the debug trace statements in
2565 driver. By default only few debug trace statements are enabled.
2566
2550config MYRI10GE 2567config MYRI10GE
2551 tristate "Myricom Myri-10G Ethernet support" 2568 tristate "Myricom Myri-10G Ethernet support"
2552 depends on PCI && INET 2569 depends on PCI && INET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 98409c9dd445..edc9a0d6171d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -220,6 +220,7 @@ obj-$(CONFIG_R8169) += r8169.o
220obj-$(CONFIG_AMD8111_ETH) += amd8111e.o 220obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
221obj-$(CONFIG_IBMVETH) += ibmveth.o 221obj-$(CONFIG_IBMVETH) += ibmveth.o
222obj-$(CONFIG_S2IO) += s2io.o 222obj-$(CONFIG_S2IO) += s2io.o
223obj-$(CONFIG_VXGE) += vxge/
223obj-$(CONFIG_MYRI10GE) += myri10ge/ 224obj-$(CONFIG_MYRI10GE) += myri10ge/
224obj-$(CONFIG_SMC91X) += smc91x.o 225obj-$(CONFIG_SMC91X) += smc91x.o
225obj-$(CONFIG_SMC911X) += smc911x.o 226obj-$(CONFIG_SMC911X) += smc911x.o
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 254ec62b5f58..d8350860c0f8 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -559,7 +559,7 @@ static void dm9000_show_carrier(board_info_t *db,
559static void 559static void
560dm9000_poll_work(struct work_struct *w) 560dm9000_poll_work(struct work_struct *w)
561{ 561{
562 struct delayed_work *dw = container_of(w, struct delayed_work, work); 562 struct delayed_work *dw = to_delayed_work(w);
563 board_info_t *db = container_of(dw, board_info_t, phy_poll); 563 board_info_t *db = container_of(dw, board_info_t, phy_poll);
564 struct net_device *ndev = db->ndev; 564 struct net_device *ndev = db->ndev;
565 565
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index db1e31f95200..33fa9eee4cac 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -8,7 +8,6 @@
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/version.h>
12#include <linux/io.h> 11#include <linux/io.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 049b0a7e01f3..8bbe7f617994 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -129,7 +129,8 @@ static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task
129 struct sk_buff *skb; 129 struct sk_buff *skb;
130 130
131 skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); 131 skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
132 dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); 132 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
133 DMA_FROM_DEVICE);
133 kfree_skb(skb); 134 kfree_skb(skb);
134 } 135 }
135} 136}
@@ -150,7 +151,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
150 bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk); 151 bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk);
151 152
152 bd->status = FEC_RX_BUFFER_SIZE; 153 bd->status = FEC_RX_BUFFER_SIZE;
153 bd->skb_pa = dma_map_single(&dev->dev, skb->data, 154 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data,
154 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 155 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
155 156
156 bcom_submit_next_buffer(rxtsk, skb); 157 bcom_submit_next_buffer(rxtsk, skb);
@@ -270,15 +271,6 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev)
270 phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN); 271 phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN);
271} 272}
272 273
273static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv,
274 struct mii_ioctl_data *mii_data, int cmd)
275{
276 if (!priv->phydev)
277 return -ENOTSUPP;
278
279 return phy_mii_ioctl(priv->phydev, mii_data, cmd);
280}
281
282static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv) 274static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv)
283{ 275{
284 struct mpc52xx_fec __iomem *fec = priv->fec; 276 struct mpc52xx_fec __iomem *fec = priv->fec;
@@ -370,7 +362,7 @@ static int mpc52xx_fec_close(struct net_device *dev)
370 * invariant will hold if you make sure that the netif_*_queue() 362 * invariant will hold if you make sure that the netif_*_queue()
371 * calls are done at the proper times. 363 * calls are done at the proper times.
372 */ 364 */
373static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 365static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
374{ 366{
375 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 367 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
376 struct bcom_fec_bd *bd; 368 struct bcom_fec_bd *bd;
@@ -378,7 +370,7 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
378 if (bcom_queue_full(priv->tx_dmatsk)) { 370 if (bcom_queue_full(priv->tx_dmatsk)) {
379 if (net_ratelimit()) 371 if (net_ratelimit())
380 dev_err(&dev->dev, "transmit queue overrun\n"); 372 dev_err(&dev->dev, "transmit queue overrun\n");
381 return 1; 373 return NETDEV_TX_BUSY;
382 } 374 }
383 375
384 spin_lock_irq(&priv->lock); 376 spin_lock_irq(&priv->lock);
@@ -388,7 +380,8 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
388 bcom_prepare_next_buffer(priv->tx_dmatsk); 380 bcom_prepare_next_buffer(priv->tx_dmatsk);
389 381
390 bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; 382 bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
391 bd->skb_pa = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE); 383 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
384 DMA_TO_DEVICE);
392 385
393 bcom_submit_next_buffer(priv->tx_dmatsk, skb); 386 bcom_submit_next_buffer(priv->tx_dmatsk, skb);
394 387
@@ -398,7 +391,7 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
398 391
399 spin_unlock_irq(&priv->lock); 392 spin_unlock_irq(&priv->lock);
400 393
401 return 0; 394 return NETDEV_TX_OK;
402} 395}
403 396
404#ifdef CONFIG_NET_POLL_CONTROLLER 397#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -430,7 +423,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
430 struct bcom_fec_bd *bd; 423 struct bcom_fec_bd *bd;
431 skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, 424 skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
432 (struct bcom_bd **)&bd); 425 (struct bcom_bd **)&bd);
433 dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_TO_DEVICE); 426 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
427 DMA_TO_DEVICE);
434 428
435 dev_kfree_skb_irq(skb); 429 dev_kfree_skb_irq(skb);
436 } 430 }
@@ -455,7 +449,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
455 449
456 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, 450 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
457 (struct bcom_bd **)&bd); 451 (struct bcom_bd **)&bd);
458 dma_unmap_single(&dev->dev, bd->skb_pa, rskb->len, DMA_FROM_DEVICE); 452 dma_unmap_single(dev->dev.parent, bd->skb_pa, rskb->len,
453 DMA_FROM_DEVICE);
459 454
460 /* Test for errors in received frame */ 455 /* Test for errors in received frame */
461 if (status & BCOM_FEC_RX_BD_ERRORS) { 456 if (status & BCOM_FEC_RX_BD_ERRORS) {
@@ -464,7 +459,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
464 bcom_prepare_next_buffer(priv->rx_dmatsk); 459 bcom_prepare_next_buffer(priv->rx_dmatsk);
465 460
466 bd->status = FEC_RX_BUFFER_SIZE; 461 bd->status = FEC_RX_BUFFER_SIZE;
467 bd->skb_pa = dma_map_single(&dev->dev, rskb->data, 462 bd->skb_pa = dma_map_single(dev->dev.parent,
463 rskb->data,
468 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 464 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
469 465
470 bcom_submit_next_buffer(priv->rx_dmatsk, rskb); 466 bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
@@ -499,7 +495,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
499 bcom_prepare_next_buffer(priv->rx_dmatsk); 495 bcom_prepare_next_buffer(priv->rx_dmatsk);
500 496
501 bd->status = FEC_RX_BUFFER_SIZE; 497 bd->status = FEC_RX_BUFFER_SIZE;
502 bd->skb_pa = dma_map_single(&dev->dev, skb->data, 498 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data,
503 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 499 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
504 500
505 bcom_submit_next_buffer(priv->rx_dmatsk, skb); 501 bcom_submit_next_buffer(priv->rx_dmatsk, skb);
@@ -847,12 +843,20 @@ static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
847static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 843static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
848{ 844{
849 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 845 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
846
847 if (!priv->phydev)
848 return -ENODEV;
849
850 return phy_ethtool_gset(priv->phydev, cmd); 850 return phy_ethtool_gset(priv->phydev, cmd);
851} 851}
852 852
853static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 853static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
854{ 854{
855 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 855 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
856
857 if (!priv->phydev)
858 return -ENODEV;
859
856 return phy_ethtool_sset(priv->phydev, cmd); 860 return phy_ethtool_sset(priv->phydev, cmd);
857} 861}
858 862
@@ -882,9 +886,28 @@ static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
882{ 886{
883 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 887 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
884 888
885 return mpc52xx_fec_phy_mii_ioctl(priv, if_mii(rq), cmd); 889 if (!priv->phydev)
890 return -ENOTSUPP;
891
892 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
886} 893}
887 894
895static const struct net_device_ops mpc52xx_fec_netdev_ops = {
896 .ndo_open = mpc52xx_fec_open,
897 .ndo_stop = mpc52xx_fec_close,
898 .ndo_start_xmit = mpc52xx_fec_start_xmit,
899 .ndo_set_multicast_list = mpc52xx_fec_set_multicast_list,
900 .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
901 .ndo_validate_addr = eth_validate_addr,
902 .ndo_do_ioctl = mpc52xx_fec_ioctl,
903 .ndo_change_mtu = eth_change_mtu,
904 .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
905 .ndo_get_stats = mpc52xx_fec_get_stats,
906#ifdef CONFIG_NET_POLL_CONTROLLER
907 .ndo_poll_controller = mpc52xx_fec_poll_controller,
908#endif
909};
910
888/* ======================================================================== */ 911/* ======================================================================== */
889/* OF Driver */ 912/* OF Driver */
890/* ======================================================================== */ 913/* ======================================================================== */
@@ -929,22 +952,10 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
929 return -EBUSY; 952 return -EBUSY;
930 953
931 /* Init ether ndev with what we have */ 954 /* Init ether ndev with what we have */
932 ndev->open = mpc52xx_fec_open; 955 ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
933 ndev->stop = mpc52xx_fec_close;
934 ndev->hard_start_xmit = mpc52xx_fec_hard_start_xmit;
935 ndev->do_ioctl = mpc52xx_fec_ioctl;
936 ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; 956 ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
937 ndev->get_stats = mpc52xx_fec_get_stats;
938 ndev->set_mac_address = mpc52xx_fec_set_mac_address;
939 ndev->set_multicast_list = mpc52xx_fec_set_multicast_list;
940 ndev->tx_timeout = mpc52xx_fec_tx_timeout;
941 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; 957 ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
942 ndev->base_addr = mem.start; 958 ndev->base_addr = mem.start;
943#ifdef CONFIG_NET_POLL_CONTROLLER
944 ndev->poll_controller = mpc52xx_fec_poll_controller;
945#endif
946
947 priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */
948 959
949 spin_lock_init(&priv->lock); 960 spin_lock_init(&priv->lock);
950 961
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index b3079a5a7f2b..aa1eb88c21fc 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -204,6 +204,7 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
204 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, 204 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
205 (unsigned long long)taddr); 205 (unsigned long long)taddr);
206} 206}
207EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
207 208
208/* Scan the bus in reverse, looking for an empty spot */ 209/* Scan the bus in reverse, looking for an empty spot */
209static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) 210static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
@@ -387,7 +388,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
387 * The TBIPHY-only buses will find PHYs at every address, 388 * The TBIPHY-only buses will find PHYs at every address,
388 * so we mask them all but the TBI 389 * so we mask them all but the TBI
389 */ 390 */
390 if (!of_device_is_compatible(np, "fsl,gianfar-mdio")) 391 if (of_device_is_compatible(np, "fsl,gianfar-tbi"))
391 new_bus->phy_mask = ~(1 << tbiaddr); 392 new_bus->phy_mask = ~(1 << tbiaddr);
392 393
393 err = mdiobus_register(new_bus); 394 err = mdiobus_register(new_bus);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index dd499d7cde26..0642d52aef5c 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -45,7 +45,6 @@
45#include <linux/crc32.h> 45#include <linux/crc32.h>
46#include <linux/workqueue.h> 46#include <linux/workqueue.h>
47#include <linux/ethtool.h> 47#include <linux/ethtool.h>
48#include <linux/fsl_devices.h>
49 48
50/* The maximum number of packets to be handled in one call of gfar_poll */ 49/* The maximum number of packets to be handled in one call of gfar_poll */
51#define GFAR_DEV_WEIGHT 64 50#define GFAR_DEV_WEIGHT 64
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 500a40b2afe7..b06691937ce9 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -55,6 +55,8 @@
55#include <asm/system.h> 55#include <asm/system.h>
56#include <linux/interrupt.h> 56#include <linux/interrupt.h>
57#include <linux/ioport.h> 57#include <linux/ioport.h>
58#include <linux/firmware.h>
59#include <linux/platform_device.h>
58 60
59#include <linux/netdevice.h> 61#include <linux/netdevice.h>
60#include <linux/if_arp.h> 62#include <linux/if_arp.h>
@@ -71,8 +73,6 @@
71#include <linux/init.h> 73#include <linux/init.h>
72 74
73#include <linux/yam.h> 75#include <linux/yam.h>
74#include "yam9600.h"
75#include "yam1200.h"
76 76
77/* --------------------------------------------------------------------- */ 77/* --------------------------------------------------------------------- */
78 78
@@ -82,6 +82,9 @@ static const char yam_drvinfo[] __initdata = KERN_INFO \
82 82
83/* --------------------------------------------------------------------- */ 83/* --------------------------------------------------------------------- */
84 84
85#define FIRMWARE_9600 "yam/9600.bin"
86#define FIRMWARE_1200 "yam/1200.bin"
87
85#define YAM_9600 1 88#define YAM_9600 1
86#define YAM_1200 2 89#define YAM_1200 2
87 90
@@ -342,9 +345,51 @@ static int fpga_write(int iobase, unsigned char wrd)
342 return 0; 345 return 0;
343} 346}
344 347
345static unsigned char *add_mcs(unsigned char *bits, int bitrate) 348/*
349 * predef should be 0 for loading user defined mcs
350 * predef should be YAM_1200 for loading predef 1200 mcs
351 * predef should be YAM_9600 for loading predef 9600 mcs
352 */
353static unsigned char *add_mcs(unsigned char *bits, int bitrate,
354 unsigned int predef)
346{ 355{
356 const char *fw_name[2] = {FIRMWARE_9600, FIRMWARE_1200};
357 const struct firmware *fw;
358 struct platform_device *pdev;
347 struct yam_mcs *p; 359 struct yam_mcs *p;
360 int err;
361
362 switch (predef) {
363 case 0:
364 fw = NULL;
365 break;
366 case YAM_1200:
367 case YAM_9600:
368 predef--;
369 pdev = platform_device_register_simple("yam", 0, NULL, 0);
370 if (IS_ERR(pdev)) {
371 printk(KERN_ERR "yam: Failed to register firmware\n");
372 return NULL;
373 }
374 err = request_firmware(&fw, fw_name[predef], &pdev->dev);
375 platform_device_unregister(pdev);
376 if (err) {
377 printk(KERN_ERR "Failed to load firmware \"%s\"\n",
378 fw_name[predef]);
379 return NULL;
380 }
381 if (fw->size != YAM_FPGA_SIZE) {
382 printk(KERN_ERR "Bogus length %zu in firmware \"%s\"\n",
383 fw->size, fw_name[predef]);
384 release_firmware(fw);
385 return NULL;
386 }
387 bits = (unsigned char *)fw->data;
388 break;
389 default:
390 printk(KERN_ERR "yam: Invalid predef number %u\n", predef);
391 return NULL;
392 }
348 393
349 /* If it already exists, replace the bit data */ 394 /* If it already exists, replace the bit data */
350 p = yam_data; 395 p = yam_data;
@@ -359,6 +404,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate)
359 /* Allocate a new mcs */ 404 /* Allocate a new mcs */
360 if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) { 405 if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
361 printk(KERN_WARNING "YAM: no memory to allocate mcs\n"); 406 printk(KERN_WARNING "YAM: no memory to allocate mcs\n");
407 release_firmware(fw);
362 return NULL; 408 return NULL;
363 } 409 }
364 memcpy(p->bits, bits, YAM_FPGA_SIZE); 410 memcpy(p->bits, bits, YAM_FPGA_SIZE);
@@ -366,6 +412,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate)
366 p->next = yam_data; 412 p->next = yam_data;
367 yam_data = p; 413 yam_data = p;
368 414
415 release_firmware(fw);
369 return p->bits; 416 return p->bits;
370} 417}
371 418
@@ -383,9 +430,11 @@ static unsigned char *get_mcs(int bitrate)
383 /* Load predefined mcs data */ 430 /* Load predefined mcs data */
384 switch (bitrate) { 431 switch (bitrate) {
385 case 1200: 432 case 1200:
386 return add_mcs(bits_1200, bitrate); 433 /* setting predef as YAM_1200 for loading predef 1200 mcs */
434 return add_mcs(NULL, bitrate, YAM_1200);
387 default: 435 default:
388 return add_mcs(bits_9600, bitrate); 436 /* setting predef as YAM_9600 for loading predef 9600 mcs */
437 return add_mcs(NULL, bitrate, YAM_9600);
389 } 438 }
390} 439}
391 440
@@ -936,7 +985,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
936 kfree(ym); 985 kfree(ym);
937 return -EINVAL; 986 return -EINVAL;
938 } 987 }
939 add_mcs(ym->bits, ym->bitrate); 988 /* setting predef as 0 for loading userdefined mcs data */
989 add_mcs(ym->bits, ym->bitrate, 0);
940 kfree(ym); 990 kfree(ym);
941 break; 991 break;
942 992
@@ -1159,6 +1209,8 @@ static void __exit yam_cleanup_driver(void)
1159MODULE_AUTHOR("Frederic Rible F1OAT frible@teaser.fr"); 1209MODULE_AUTHOR("Frederic Rible F1OAT frible@teaser.fr");
1160MODULE_DESCRIPTION("Yam amateur radio modem driver"); 1210MODULE_DESCRIPTION("Yam amateur radio modem driver");
1161MODULE_LICENSE("GPL"); 1211MODULE_LICENSE("GPL");
1212MODULE_FIRMWARE(FIRMWARE_1200);
1213MODULE_FIRMWARE(FIRMWARE_9600);
1162 1214
1163module_init(yam_init_driver); 1215module_init(yam_init_driver);
1164module_exit(yam_cleanup_driver); 1216module_exit(yam_cleanup_driver);
diff --git a/drivers/net/hamradio/yam1200.h b/drivers/net/hamradio/yam1200.h
deleted file mode 100644
index 53ca8a3903a7..000000000000
--- a/drivers/net/hamradio/yam1200.h
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 *
3 * File yam1k2b5.mcs converted to h format by mcs2h
4 *
5 * (C) F6FBB 1998
6 *
7 * Tue Aug 25 20:24:08 1998
8 *
9 */
10
11static unsigned char bits_1200[]= {
120xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xf3,0xcb,0xff,0xdb,0xfc,0xf2,
130xff,0xf6,0xff,0x3c,0xbf,0xfd,0xbf,0xdf,0x6e,0x3f,0x6f,0xf1,0x7d,0xb4,0xfd,0xbf,
140xdf,0x6f,0x3f,0x6f,0xf7,0x0b,0xff,0xdb,0xfd,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff,
150xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xef,0xff,0xff,0xff,
160xfd,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xbf,
170xff,0xff,0xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfc,0xff,0xfe,0xff,0xff,0xff,0xf0,
180x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
190xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf1,0xff,0xff,0xfe,0x7f,0xbf,0xff,
200xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xff,0xff,0xf0,0x9f,
210xff,0xff,0xff,0xfe,0xff,0xfd,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xff,
220xff,0xff,0xfb,0xff,0xfb,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
230xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xf0,0x5f,0xff,
240xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
250xff,0xbf,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
260xff,0xff,0xff,0xff,0xff,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,
270xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,
280xff,0xff,0xff,0xfd,0xff,0xbf,0xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xfb,
290xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff,
300xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,
310xff,0xff,0xf7,0xff,0xff,0xf1,0xff,0xff,0xf7,0xbf,0xe7,0xff,0xff,0xff,0xff,0xfb,
320xff,0xff,0xff,0xff,0xff,0xff,0x77,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,
330xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
340xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
350xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdb,
360xff,0xff,0xf5,0xa5,0xfd,0x4b,0x6e,0xef,0x33,0x32,0xdd,0xd3,0x4a,0xd6,0x92,0xfe,
370xb3,0x3f,0xbd,0xf1,0xfa,0xdb,0xfe,0xf7,0xf6,0x96,0xbd,0xbd,0xff,0xbd,0xff,0xed,
380x7f,0x6b,0x7f,0xfb,0xdf,0xfe,0xfb,0xfe,0x90,0xcf,0xff,0xff,0xff,0xfe,0xbe,0xef,
390xff,0xff,0xdb,0x5f,0xf6,0xff,0xf6,0x8f,0xfd,0xa5,0xdd,0xff,0xff,0xff,0xff,0x6f,
400x7f,0xdb,0xf1,0xfc,0xbf,0xff,0x6f,0xff,0xef,0xfc,0x5b,0x5d,0xda,0xdf,0xf4,0xff,
410xf2,0xff,0xfd,0xbf,0xff,0xff,0xff,0xd0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
420xff,0xfb,0xef,0xb7,0xfc,0x33,0xff,0xfb,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0,
430x0f,0xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x72,0x0f,0xf1,0x6f,0xff,0xfe,0x94,0x3f,
440xff,0xff,0xff,0x7b,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf0,
450xf7,0xef,0xb7,0xfc,0x33,0xff,0xff,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0,0x0f,
460xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x73,0x8f,0xf2,0x6f,0xff,0xfe,0x94,0x3f,0xff,
470xff,0xff,0x7d,0x9f,0xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x9e,
480xff,0xfc,0xef,0xd3,0xfb,0xff,0x7f,0xf5,0x5f,0xfe,0x59,0xff,0xff,0xff,0xfc,0xf1,
490xfe,0x7f,0xff,0xff,0xfa,0x17,0xff,0xe7,0xef,0xef,0xff,0xff,0x3f,0xf1,0xff,0xff,
500xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xfc,0xea,
510xff,0xf0,0xff,0xff,0xbf,0xf9,0x3f,0xb1,0xef,0xff,0xd7,0xff,0xfb,0xff,0xf0,0xff,
520xff,0xf3,0xff,0xdf,0xff,0x7b,0xff,0xfd,0xff,0xf6,0xff,0xbf,0xff,0xff,0xbf,0xff,
530xff,0xff,0xda,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x01,0x00,0x00,0x02,0x02,
540x02,0x02,0x00,0x40,0x40,0x40,0x10,0x00,0x00,0x00,0x20,0x00,0x00,0x01,0x00,0x00,
550x00,0x00,0x00,0x00,0x19,0x00,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,
560x00,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfb,0xff,0xfd,0xff,
570xff,0x7f,0xff,0xff,0xbf,0xff,0xef,0xff,0xff,0xfd,0xff,0xff,0xf1,0xff,0xdf,0xff,
580xff,0xff,0xff,0xff,0xff,0xbf,0xfe,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xdf,
590xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xdf,0xff,0x7f,0xff,0xff,0xff,0xff,
600xdf,0xdf,0xff,0xef,0xff,0x9e,0xef,0xff,0xff,0x7f,0xff,0xf1,0xef,0xff,0xff,0xff,
610xf7,0xfa,0xbf,0xff,0xff,0xfe,0x47,0xef,0xff,0xbd,0xf6,0xff,0xff,0xdf,0xf5,0xf0,
620xf0,0xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x01,0x02,0x08,
630x16,0x00,0x00,0x00,0x80,0x00,0x01,0x02,0x00,0x80,0x01,0x0c,0x02,0x00,0x00,0x01,
640x00,0x00,0x20,0x00,0x00,0x06,0x00,0x20,0x00,0x10,0x00,0x14,0x00,0x04,0xc1,0xf0,
650x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f,
660xec,0xff,0xff,0xfa,0xff,0xbf,0xff,0x6f,0xff,0xe1,0xff,0xff,0xff,0xff,0xbd,0xfe,
670x46,0xff,0xef,0x7f,0xcd,0xdf,0xff,0xff,0xfd,0xff,0xbd,0xff,0x7f,0x7f,0xf0,0x4f,
680xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
690xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
700xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,
710xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa4,0xbc,0xcd,0x6d,0x6b,0x6f,0x5b,0xdc,0x33,
720x5a,0xf6,0xf7,0xf6,0xb3,0x3f,0xbd,0xc1,0xfa,0x5a,0xf6,0xf6,0xb6,0xf7,0xff,0xbd,
730xbb,0x3c,0xce,0xcf,0x34,0xef,0x33,0xbb,0xcc,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,
740xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xd6,0xff,0xfd,0xfd,0xbf,0xff,0xad,
750xbf,0xf9,0x7f,0x6f,0xfc,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0xff,0xda,0xdb,0xfc,
760xdb,0xff,0x76,0x8f,0xf6,0xff,0xcd,0xab,0xfe,0xfb,0xff,0xd0,0xff,0xff,0xff,0xff,
770xfe,0xff,0x9f,0xff,0xf4,0x20,0xaf,0x6d,0x0b,0xc1,0x7b,0xff,0xff,0xff,0xcb,0xff,
780x3f,0xf0,0xef,0x7f,0x0f,0xf1,0xc3,0x3c,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0x0b,
790x1d,0x6a,0x64,0x05,0x6b,0x99,0x01,0xff,0xfd,0xef,0xf0,0x2f,0xff,0xff,0xff,0xfe,
800xff,0xff,0xff,0xf4,0x00,0x2f,0xcc,0x0b,0xc3,0x7f,0xff,0xff,0xff,0x0a,0xdf,0xbf,
810xfd,0x7f,0xff,0xff,0xf1,0xc3,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x4a,0x0e,
820x96,0x64,0x02,0x97,0x99,0x10,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,
830xff,0xff,0xfe,0x84,0xf9,0xd5,0x27,0xf1,0x7f,0xff,0xf8,0xeb,0xdf,0xf3,0xcf,0x3f,
840x1f,0xff,0xf7,0x11,0xff,0xcf,0xff,0xfe,0x67,0xff,0xff,0xff,0xff,0xc4,0xff,0xff,
850xb3,0xa1,0xff,0xf9,0xe0,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff,
860xff,0xfb,0x7f,0xe0,0xff,0xc7,0xfe,0x7f,0x3f,0xff,0xfd,0x77,0x8d,0x7f,0x0f,0xff,
870xc3,0xff,0xf1,0xbf,0x8f,0xcf,0xff,0xff,0xdd,0x7b,0xff,0xf6,0xfa,0xf7,0xff,0x40,
880x9f,0xf9,0x7f,0xd8,0xff,0xff,0xfa,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,
890x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x10,0x00,0x00,0x10,
900x00,0x01,0x00,0x10,0x20,0x20,0x00,0x00,0x10,0x00,0x04,0x01,0x05,0x00,0x00,0x00,
910x00,0x40,0x40,0x00,0x00,0x3c,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,
920xff,0xff,0xfe,0x7f,0x7f,0xff,0xef,0xff,0xff,0xdf,0xff,0xff,0xdf,0xff,0xef,0xf7,
930xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xfc,0xfd,0xff,0x7f,
940x7e,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0xff,0xff,
950xff,0xff,0xfe,0xeb,0xfd,0x6f,0xff,0xf7,0xfe,0xf5,0x7f,0xff,0xff,0x7f,0xbf,0xb1,
960xff,0xff,0x9f,0xbf,0xfb,0xff,0xfe,0xff,0xfe,0xff,0xf7,0xeb,0xdf,0xbf,0x5f,0xdd,
970xff,0xdb,0xfd,0xd0,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x00,0x42,0x00,
980x00,0x00,0x30,0x18,0x04,0x08,0x09,0x21,0x82,0x80,0x02,0x00,0x08,0x00,0x01,0x00,
990x00,0x00,0x0c,0x20,0x10,0x00,0x11,0x00,0x44,0x84,0x00,0x20,0x20,0x84,0x80,0x00,
1000x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xf7,0xff,0xfb,0xdd,0xf9,0xff,
1010xda,0xff,0xdc,0xdd,0xfc,0xfb,0xff,0xbf,0xfb,0x3e,0xd7,0x96,0xfe,0x61,0xf7,0xff,
1020x7f,0xff,0x3f,0xfd,0xff,0xdf,0xcf,0xf7,0xdf,0xf7,0xbf,0xfd,0xff,0xfe,0xef,0xef,
1030xfe,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1040xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,
1050xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1060xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf3,0xbd,0xfd,0x4b,0x74,0xcf,
1070x73,0x5b,0xcb,0x3b,0xdf,0xfe,0xf7,0xfe,0xd3,0x75,0xac,0xa1,0xfb,0xdf,0xfe,0xf7,
1080x76,0x96,0xb5,0x24,0xbd,0xa5,0xad,0x49,0x2f,0x69,0x2b,0x52,0x5b,0xbd,0xff,0xff,
1090xf0,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0xff,0xcc,
1100xa7,0xfb,0xad,0xff,0x7f,0x6f,0xff,0x6d,0x7f,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,
1110x6f,0xff,0xdb,0xff,0xdb,0xff,0xf6,0x97,0xf6,0xff,0xb5,0xb5,0xff,0xff,0xff,0xd0,
1120xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa5,0xbc,0x43,0xfc,0x7c,0x03,0xe7,
1130xff,0xff,0x20,0xff,0xff,0xff,0xcc,0xfd,0x7d,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59,
1140xba,0x56,0x66,0x6a,0xad,0x9a,0xa9,0x9a,0x97,0xa5,0xaa,0xbb,0xff,0xff,0xf0,0x0f,
1150xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xfd,0xf7,0xfd,0x43,0xff,0xfd,0x6b,0xe7,0xff,
1160xff,0xdf,0xff,0xff,0xff,0xff,0xff,0x3f,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59,0xb5,
1170xa6,0x66,0x6a,0xad,0x9a,0xa9,0x99,0x6b,0x5a,0xaa,0xff,0xff,0xb7,0xf0,0x3f,0xff,
1180xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x9c,0xf7,0xfd,0xd2,0x41,0xff,0xff,0xf2,0x7f,
1190x8f,0xff,0xff,0x3d,0xf3,0xff,0x17,0xf1,0xff,0xff,0xff,0xff,0xff,0x7f,0xdf,0xfc,
1200x8f,0x38,0xff,0xef,0x23,0xff,0xfb,0xf7,0xc8,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,
1210xff,0xfe,0xf5,0x7f,0xff,0xfd,0xff,0xe4,0xff,0xeb,0xff,0xcf,0xbf,0xfa,0xff,0xab,
1220xef,0xff,0xfb,0xff,0xf3,0xfd,0x61,0xff,0xff,0xff,0xff,0xfa,0xff,0xfb,0xfd,0x0d,
1230xff,0xfe,0xff,0x43,0x7f,0xfe,0xbf,0xd0,0xfd,0xff,0xfa,0xf0,0x3f,0xff,0xff,0xff,
1240xfe,0xf3,0xc0,0x00,0x00,0x00,0x02,0x00,0x02,0x01,0x00,0x60,0xc0,0x40,0x00,0x00,
1250x00,0x00,0x34,0x04,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x88,0x00,
1260x00,0x03,0x00,0x00,0x40,0x00,0x40,0x00,0x00,0x3c,0xf0,0x3f,0xff,0xff,0xff,0xfe,
1270xfd,0x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff,
1280xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfd,0xff,
1290xff,0xff,0xff,0xfe,0xfe,0x5f,0xff,0xff,0xcb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0,
1300xff,0xff,0xfd,0xff,0xef,0xe3,0xde,0xee,0xd9,0xc5,0x93,0xff,0xff,0xfe,0xfe,0xff,
1310xfb,0xee,0xfe,0xf1,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xbf,0xf7,0xff,0xff,0x7f,
1320xaf,0xbd,0xdf,0xdf,0xfb,0xf3,0xf3,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x34,
1330x00,0x06,0x61,0x00,0x18,0x01,0xa0,0x05,0x17,0x00,0x20,0x05,0x28,0x20,0x00,0x00,
1340x05,0x00,0x41,0x00,0x00,0x40,0x00,0x09,0x00,0x01,0x20,0x86,0x82,0x08,0x40,0x03,
1350x80,0x30,0x70,0x08,0x14,0x02,0xc1,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
1360xff,0xff,0xbd,0xef,0xfb,0xff,0xff,0xfb,0x9c,0x7f,0xef,0xdf,0xff,0xbf,0xeb,0xde,
1370xff,0xc1,0x7f,0xff,0xfb,0x7f,0xff,0xff,0xff,0x5f,0xff,0xff,0xff,0xdf,0xbf,0xef,
1380x3f,0xf7,0x8f,0xef,0x7f,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
1390xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1400xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1410xff,0xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbd,
1420xdf,0xef,0x7d,0x6d,0x2b,0x5a,0x5d,0xd2,0xdf,0xf6,0x92,0xb6,0xb2,0xb3,0xac,0xa1,
1430xfb,0xdf,0xfe,0xf1,0xee,0xf5,0xf6,0xbc,0x6b,0xbd,0x7d,0xaf,0x1a,0xef,0x5f,0x6b,
1440xc6,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,
1450xf6,0xff,0xf6,0xb7,0xfd,0xad,0xfd,0xbf,0xf3,0x6f,0xff,0x6f,0xff,0xdb,0xd1,0xfd,
1460xbf,0xff,0x6f,0xf5,0x6b,0xbc,0x5b,0x3c,0xda,0xef,0x16,0xaf,0x16,0xff,0xcd,0xab,
1470xff,0x6f,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfc,0xbf,0xff,0xff,
1480xff,0x6c,0x03,0x10,0xc1,0xf3,0xff,0xf3,0x3a,0xf3,0xca,0xff,0xaf,0xf1,0xff,0xff,
1490xff,0xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff,
1500xff,0x5f,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,
1510xea,0x0f,0x50,0xc3,0xf3,0x7f,0xff,0xf3,0xf3,0xc3,0xff,0xaf,0xf1,0xff,0xff,0xff,
1520xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff,0xff,
1530xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xd7,0xff,0xff,0x5f,0xc1,
1540x3f,0xf7,0x5e,0xf5,0xce,0x9e,0x5f,0x3f,0x17,0xff,0xf3,0xe1,0xff,0xff,0xff,0xff,
1550xd8,0xff,0xfa,0xfe,0x67,0xff,0xfe,0xbf,0x5a,0xff,0xff,0xaf,0xf5,0xff,0xff,0xff,
1560xf0,0x2f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfd,0xff,0xf7,0xff,0xfd,0x4e,0x3d,
1570x3f,0xe7,0x0b,0xbf,0x8f,0xf9,0xff,0xeb,0xe3,0xff,0xe1,0xff,0xff,0xfc,0xff,0xc7,
1580x9f,0xff,0x3e,0x39,0xe5,0xff,0xcf,0x9b,0xf9,0xff,0xff,0xc5,0xff,0xff,0xfa,0xf0,
1590x5f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,
1600x00,0x00,0x00,0x60,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x20,
1610x00,0x01,0x10,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0xf0,0x4f,
1620xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xbf,
1630x3f,0xff,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf1,0xff,0xff,0xff,0xff,0xf7,0xff,0xf7,
1640xff,0xed,0xff,0xfb,0xfe,0xff,0x7f,0xff,0x7f,0xdf,0xff,0xff,0xdd,0xf0,0x3f,0xff,
1650xff,0xff,0xfe,0xf0,0xff,0xff,0xf3,0xff,0xf7,0xff,0xfe,0x5f,0xff,0xf7,0xff,0xff,
1660xdf,0xff,0xff,0xff,0xf7,0xfe,0x7b,0xf1,0xff,0xfd,0xfd,0xff,0xdf,0xdf,0xff,0x7d,
1670x73,0xf9,0xff,0xc3,0x7e,0xfe,0xff,0xef,0xd7,0xff,0xcf,0xd0,0xf0,0x6f,0xff,0xff,
1680xff,0xfe,0xf8,0x30,0x00,0x00,0x40,0x04,0x00,0x01,0x41,0x20,0x00,0x04,0x00,0x02,
1690xd5,0x09,0x00,0x02,0x80,0x02,0x01,0x00,0x00,0x00,0x0a,0x04,0x00,0x07,0x00,0x01,
1700x50,0x01,0x80,0x02,0x61,0x40,0x41,0x0c,0x14,0x08,0xc1,0xf0,0x9f,0xff,0xff,0xff,
1710xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xdf,0xcb,0x5f,0xfe,0xef,0xff,0xfe,
1720xff,0x3f,0xff,0x7f,0xfd,0xc1,0xff,0xff,0x7f,0xff,0xdf,0xfd,0xfc,0xfd,0xf7,0xee,
1730xff,0xff,0x4e,0xff,0xdf,0xcf,0xdb,0xeb,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,
1740xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1750xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1760xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0x7f,
1770xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,
1780xf7,0xfb,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1790xff,0xff,0x7f,0xff,0xff,0xff,0x7f,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdd,0xff,
1800xff,0xff,0xa5,0xff,0x6f,0x6b,0xe9,0x6f,0xda,0xca,0xfb,0xdd,0xee,0xf7,0xf6,0xb2,
1810xb3,0xa4,0xa1,0x5b,0x5b,0xf6,0xd7,0xf4,0xf7,0x7b,0xbd,0xbd,0xad,0xcf,0xef,0x7f,
1820x6b,0x7f,0x3b,0xdf,0xdb,0xff,0xff,0x30,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
1830xff,0xff,0xff,0xf6,0xfe,0x96,0xff,0xfd,0xb5,0xfd,0xbf,0xad,0x7f,0xff,0x6f,0xff,
1840xde,0xd1,0xad,0xad,0xe9,0xff,0xf1,0xec,0xef,0xde,0x3f,0xcb,0xff,0xf6,0xff,0x32,
1850xff,0xc5,0xbd,0xff,0xff,0xff,0xd0,0xbf,0xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xf4,
1860x28,0xbf,0xff,0xfd,0xfb,0xd3,0xff,0xff,0x42,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3,
1870xc1,0xff,0x33,0xff,0xc0,0x15,0x6b,0x70,0xff,0xf0,0xf2,0x4f,0xff,0xfc,0x3e,0x97,
1880x3c,0xff,0xff,0xfd,0xef,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x78,
1890xbf,0xff,0xfd,0xf3,0xef,0x55,0xff,0x7e,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3,0xc1,
1900xff,0x33,0xff,0xc0,0x15,0x6f,0xff,0x0f,0xf0,0xf0,0x0f,0xff,0xfc,0x3d,0x6b,0xc3,
1910xff,0xff,0xfe,0xf7,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,
1920xff,0x23,0xf8,0x7f,0xff,0x4e,0xff,0xff,0xff,0xfb,0xf9,0x17,0xff,0xf6,0xf1,0xff,
1930xcf,0xef,0xff,0xff,0x13,0xdf,0xe6,0x2f,0xc7,0xff,0xff,0xe7,0xc1,0xfd,0xff,0xfe,
1940xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xfe,0xae,0xff,
1950xff,0x7f,0x3b,0x3f,0xfc,0x7f,0xfc,0xef,0xff,0xfc,0xe2,0x7b,0xff,0xf1,0xfd,0xed,
1960xef,0xff,0xff,0x35,0x73,0xff,0xff,0xfe,0xfa,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
1970xff,0xfa,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,
1980x00,0x00,0x00,0x80,0x00,0x00,0x40,0x00,0x00,0x00,0x0c,0x04,0x01,0x40,0x40,0x00,
1990x00,0x30,0x28,0x04,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00,0x00,
2000x38,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xfb,0xff,0x7f,
2010xff,0xff,0x9f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xdf,0xdf,0xff,
2020xff,0xff,0xff,0xed,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xbf,0xbf,0xff,0xff,0xc3,
2030xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xfd,0xff,0xbf,0xff,0xff,0xfd,0xff,0xff,
2040xff,0xff,0xff,0xfd,0x7b,0xff,0x7f,0xff,0xbd,0xff,0xf1,0xef,0xff,0xff,0xfd,0xdf,
2050xfd,0xfb,0xff,0xff,0xbf,0xbe,0xff,0xcd,0x7f,0xfc,0xf7,0xf7,0x6f,0xbf,0xd8,0xf0,
2060xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x00,0xa0,0x00,0x00,
2070xc0,0x00,0x00,0x20,0x34,0x00,0x00,0x00,0x0c,0x81,0x00,0x20,0xa4,0x20,0x00,0x10,
2080x08,0x04,0x48,0x08,0x00,0x40,0x93,0x00,0x10,0x00,0x38,0x18,0x20,0xc1,0xf0,0x3f,
2090xff,0xff,0xff,0xfe,0xff,0xfb,0xff,0xff,0xb9,0xdf,0xfe,0xb3,0xff,0xff,0xe7,0xfd,
2100xff,0xff,0x3b,0xff,0x7f,0xff,0xbf,0xff,0xc1,0xff,0xfc,0xff,0xff,0x3f,0x77,0xfe,
2110xfe,0xcf,0xff,0xbf,0xfd,0xbf,0xff,0xfe,0xed,0xf2,0xfd,0xf7,0xff,0xf0,0x2f,0xff,
2120xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2130xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2140xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff,
2150xff,0xfe,0xff,0xff,0xff,0xf3,0xad,0xcf,0xef,0x70,0xc9,0x73,0x3b,0xdf,0x5b,0x4a,
2160xf6,0xb7,0xfe,0xd7,0xf5,0xbc,0xc1,0x33,0xca,0xd6,0xb7,0x6e,0xf7,0xfb,0xbd,0xc5,
2170x24,0xcf,0x6f,0x2f,0x4d,0x2b,0xba,0x5a,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,
2180xfe,0xbf,0xff,0xff,0xff,0xff,0xf6,0xf6,0xd7,0xff,0xff,0xad,0xbd,0xff,0xff,0xff,
2190xef,0xf7,0x7f,0xfc,0x5b,0xb1,0xfd,0xbd,0x75,0x6f,0xef,0x6a,0xfd,0x5b,0xfb,0xdb,
2200x3a,0xbf,0x8e,0x9f,0xff,0xbf,0xfd,0xff,0x6f,0xff,0xd0,0x6f,0xff,0xff,0xff,0xfe,
2210xff,0xbb,0xff,0xf0,0x3f,0xff,0xff,0xfd,0xfb,0x7f,0xde,0xff,0xff,0x5a,0xd6,0xbf,
2220xd8,0x2a,0xbf,0xbf,0xf1,0xe5,0xff,0xcc,0xc0,0xa9,0x70,0xff,0xf3,0x3c,0x3c,0xfd,
2230x57,0xfd,0x98,0x03,0x00,0xc3,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff,
2240xff,0xff,0xff,0x3d,0xbf,0xff,0xfd,0xfb,0xff,0xdb,0xff,0xff,0x0f,0xfc,0x3f,0xd8,
2250x2a,0xbf,0xbf,0xf1,0xef,0xff,0xcc,0xc0,0x96,0xbe,0xff,0xf3,0x3f,0xff,0xfd,0x57,
2260xfd,0x99,0x0f,0xff,0xc3,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xff,0xff,
2270xff,0xf1,0xe7,0xff,0xff,0xf3,0x8e,0x7b,0xff,0xa8,0xff,0xdf,0x7f,0x8e,0x78,0x73,
2280xff,0xf1,0x51,0x62,0xff,0xfc,0x4b,0xff,0xf3,0xff,0x7e,0xcf,0xf9,0xff,0xfd,0xff,
2290xff,0x7f,0xff,0xe0,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,
2300xfb,0xfd,0xae,0xff,0xfc,0xfe,0x6f,0x3f,0xf8,0xfd,0x77,0xaf,0xfe,0x37,0xfe,0x7b,
2310xff,0xb1,0x8c,0xff,0xef,0xfd,0xf8,0xe7,0xbf,0xff,0xf1,0xfe,0x3e,0xf7,0xfe,0x95,
2320x3e,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00,
2330x01,0x04,0x00,0x00,0x00,0x00,0x80,0x02,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x08,
2340x41,0x80,0x10,0x00,0x00,0x08,0x10,0x84,0x00,0x0c,0x04,0x02,0x61,0x00,0x00,0x81,
2350x00,0x00,0x00,0x00,0x3d,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,
2360xff,0xff,0x7f,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
2370x7f,0xbf,0xf7,0x7f,0xef,0xff,0xef,0xff,0xf7,0xfd,0xff,0xff,0xfd,0x7f,0xff,0xbe,
2380xdf,0xff,0xff,0xd9,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0x7f,0xfb,0xff,
2390xfb,0xff,0xbf,0xff,0xf3,0x7f,0xfb,0xfd,0xeb,0x7f,0xdf,0xfa,0xff,0xde,0xf0,0xed,
2400xff,0xb1,0xf7,0xf9,0x1f,0xb5,0x5b,0xfe,0x7e,0xf7,0xbe,0xfd,0x7f,0x5f,0xb5,0xf7,
2410xff,0xff,0xd0,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x01,0x00,0x07,0x42,0x01,
2420x00,0x6a,0x18,0x50,0x80,0x00,0x00,0x02,0x40,0x01,0x01,0x20,0x01,0x01,0x24,0x14,
2430x21,0x10,0x02,0x08,0x07,0x08,0x00,0x40,0x10,0x80,0x58,0x00,0x84,0x80,0x18,0x10,
2440x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xff,0xdb,0xb7,0xf3,
2450xdf,0x7c,0xf8,0x74,0xff,0xff,0x6f,0x7d,0x3f,0x7e,0xec,0x7f,0xc1,0xf5,0xff,0xcf,
2460x6f,0x9f,0xf9,0xdf,0xbe,0xe5,0xe7,0xff,0xd7,0xf3,0xdd,0xfb,0xff,0xfc,0xff,0xbf,
2470xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2480xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
2490xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2500xf0,0x2f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xff,0xb4,0xcf,0xef,0x77,0x6f,0x73,
2510x3a,0x4a,0x3a,0xcb,0xd4,0xf7,0x2e,0xd6,0xbd,0xbd,0xa1,0x3b,0xdf,0xd6,0xf7,0xee,
2520xd3,0x35,0xbd,0xfb,0xbd,0xce,0xeb,0x2b,0x4d,0x2f,0xbb,0xda,0xff,0xff,0xfe,0xb0,
2530x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdf,0x5f,0x36,0xaf,0x3f,0xed,0xb7,
2540xf5,0xfd,0xf3,0x2b,0xef,0x77,0xff,0xfb,0xda,0xb1,0xbd,0xa3,0x77,0x69,0x7f,0x4f,
2550xff,0xdb,0xfa,0x5b,0xff,0xf2,0xfe,0xff,0x96,0xff,0xff,0xfe,0xdf,0xff,0xd0,0xaf,
2560xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x8f,0xfd,0x40,0x6f,0x9e,0x83,0x5a,0x0f,
2570xfa,0xc3,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xd0,0x00,0xfe,0xbf,0xcd,0x3f,0xf0,
2580xef,0xfc,0xc5,0x0c,0x3f,0xfd,0x68,0x0b,0xff,0xff,0xff,0xfe,0xdf,0xf0,0xff,0xff,
2590xff,0xff,0xfe,0xff,0xbb,0xff,0xfd,0x85,0xff,0xd4,0x6f,0x9f,0xc3,0x5a,0x0f,0xff,
2600xff,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xf0,0xfb,0xc2,0xbf,0xfc,0x00,0x37,0xef,
2610xfc,0xcd,0xbc,0x3f,0xff,0x0c,0xbf,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,
2620xff,0xfe,0xff,0xff,0xff,0xff,0xd9,0xf7,0xd1,0xb7,0x7e,0x7f,0xf1,0xe4,0xfd,0xff,
2630xfb,0xfb,0xff,0x5f,0xff,0x7f,0xb1,0xbc,0x0f,0x67,0xeb,0xb8,0x3f,0xff,0xe2,0xff,
2640xe9,0xff,0xfd,0xe3,0xff,0x3f,0x9f,0xc2,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,
2650xfe,0xf5,0x7f,0xff,0xf0,0x3f,0xbc,0xff,0xd5,0xf5,0xce,0x3f,0xfe,0xff,0xfe,0x6d,
2660xff,0xf1,0xbf,0x7b,0xff,0xf1,0xfd,0xff,0x4f,0xff,0x87,0xff,0xae,0xff,0xb1,0xf8,
2670xfe,0xff,0xff,0x78,0x01,0xb9,0xff,0xff,0xff,0xfa,0xf0,0x2f,0xff,0xff,0xff,0xfe,
2680xf3,0xc0,0x00,0x00,0x00,0x04,0x02,0x13,0x02,0x00,0x80,0x40,0x00,0x90,0x10,0x00,
2690x10,0x00,0x02,0x00,0x01,0x20,0x80,0x12,0x10,0x00,0x40,0x08,0x00,0x04,0x00,0x00,
2700x02,0x00,0x01,0x40,0x00,0x80,0x00,0x00,0x3c,0xf0,0xef,0xff,0xff,0xff,0xfe,0xfd,
2710x1f,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xf7,0xdf,0xf7,0xff,
2720xf7,0xfb,0xeb,0xd1,0xff,0xff,0xff,0xff,0xef,0xf7,0xff,0xff,0xfb,0xff,0xfe,0xff,
2730xff,0x7e,0xff,0xfb,0xff,0xff,0xff,0xdb,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf0,0xff,
2740xff,0xb7,0xeb,0xf7,0xdf,0xff,0xfe,0xf5,0x6b,0xe7,0xed,0xf7,0x3e,0xec,0xff,0x54,
2750xef,0x6f,0xf1,0xf5,0xaf,0x6f,0xf6,0xfd,0xff,0xdd,0x7b,0xff,0xef,0xbf,0x7f,0xff,
2760xff,0xf7,0xff,0xf3,0x5f,0xf7,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,
2770x80,0x40,0x04,0x00,0x81,0x2c,0x04,0x24,0x00,0x02,0x01,0xc8,0x02,0x00,0x02,0x24,
2780x00,0x01,0xb4,0x42,0xdc,0x44,0x02,0x15,0x90,0x02,0x03,0x48,0x39,0x10,0x02,0x24,
2790xa0,0xba,0x00,0x00,0x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
2800xfe,0xfc,0xf7,0xf0,0xee,0xb6,0x5d,0xfd,0xf5,0xff,0xdb,0xf7,0x7f,0x7f,0xbe,0xff,
2810xc1,0xfe,0xbf,0xfa,0xfa,0x5f,0xff,0xad,0xff,0xef,0xff,0x7f,0xdf,0x7f,0xfe,0xbf,
2820xb7,0x94,0xbf,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
2830xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
2840xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2850xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xfb,0xb5,0xff,
2860xef,0x7c,0xeb,0x2b,0x52,0x5b,0x3b,0xda,0xd4,0xf3,0x36,0x96,0xb5,0xbd,0xf1,0xfb,
2870xda,0xee,0xf6,0xfe,0xd3,0x35,0xbd,0xdf,0xad,0xcf,0xef,0x7e,0xcd,0x6b,0xbb,0xdf,
2880xff,0xff,0xfd,0xb0,0xef,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xd3,0x5f,0xf6,
2890xff,0xf6,0xff,0xfd,0xad,0xfd,0xff,0x7f,0xef,0xff,0x6f,0x7f,0xdb,0xf1,0xa5,0xa3,
2900x7f,0x6f,0x6b,0x4f,0xff,0xdb,0xfb,0xcb,0xff,0xf6,0xff,0xf4,0xd7,0xfd,0xbf,0xfe,
2910xdf,0xff,0xd0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdf,0xff,0xff,0xff,
2920x3f,0x7f,0xfc,0xe5,0xff,0x20,0xfe,0xff,0xff,0xdf,0x7f,0xff,0xf1,0x7f,0xff,0xfe,
2930xff,0xf0,0x7c,0x3d,0x4f,0xf3,0xc3,0x3f,0xff,0xff,0x6f,0xc3,0xff,0x0f,0xff,0xff,
2940xaf,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xb7,0xe0,0x0f,0xff,0xff,0x2b,
2950xff,0x7d,0xbf,0xff,0xdf,0xff,0xff,0xf8,0x9f,0x7f,0xff,0xf1,0x55,0xff,0xff,0xff,
2960xfd,0x7c,0x3c,0xff,0xf3,0xc3,0x3f,0xff,0xff,0xef,0xc3,0xff,0xdf,0xff,0xff,0xff,
2970xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xef,0xff,0xff,0x9f,0xbf,0x7f,
2980xf9,0x19,0x47,0x8e,0xe7,0x9f,0x3f,0x17,0xff,0xfc,0x81,0xc1,0x7e,0xf3,0xd9,0xf9,
2990x73,0xdf,0xf4,0x7f,0xfa,0xff,0xff,0xff,0xfb,0x7f,0x77,0xc7,0xff,0xff,0xff,0xf0,
3000x2f,0xff,0xff,0xff,0xfe,0xf5,0xf7,0xff,0xfb,0xff,0xf7,0x3f,0xfc,0xbf,0x3e,0x3f,
3010xec,0xff,0x81,0xaf,0xfe,0x4f,0xf3,0xbb,0xff,0xf0,0x7e,0xff,0x6f,0xff,0x87,0xff,
3020xbb,0xff,0xd5,0xfc,0xff,0x7f,0xfc,0x6f,0xff,0xef,0xe7,0xff,0xff,0xfa,0xf0,0x3f,
3030xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,
3040x00,0x30,0x10,0x60,0x20,0x00,0x08,0x00,0x01,0x20,0x80,0x00,0x10,0x00,0x04,0x00,
3050x00,0x00,0x00,0x00,0x00,0x02,0x00,0x80,0x40,0x00,0x08,0x20,0x3c,0xf0,0x6f,0xff,
3060xff,0xff,0xfe,0xf5,0xbf,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x7f,0xfe,0x3f,0xff,
3070xff,0xff,0xff,0xff,0xef,0xff,0xff,0xf1,0xdf,0xdf,0xff,0xff,0xff,0x7f,0xdf,0xff,
3080xfd,0xbd,0xff,0xff,0xff,0xfb,0xdf,0xff,0xff,0xff,0xff,0x5b,0xf0,0xff,0xff,0xff,
3090xff,0xfe,0xf0,0xbf,0xbf,0xbf,0xff,0xf7,0xfb,0xff,0xfe,0xee,0xfa,0xff,0xff,0xff,
3100x3d,0x3b,0xff,0xff,0xfe,0xfb,0xf1,0xff,0xbf,0x7b,0xff,0xff,0xef,0xff,0xbf,0xff,
3110xff,0xff,0xff,0xff,0xfe,0xff,0xf7,0xef,0xff,0xfb,0xd0,0xf0,0xdf,0xff,0xff,0xff,
3120xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x00,0x0b,0x10,0x05,0x01,0x00,0x08,0x00,0x02,
3130x01,0x01,0x00,0x00,0x10,0x01,0xc8,0x08,0x00,0x00,0x00,0x00,0x42,0x02,0x00,0x00,
3140x00,0x80,0x02,0x00,0x00,0x40,0x24,0x80,0x00,0xc1,0xf0,0x3f,0xff,0xff,0xff,0xfe,
3150xff,0xff,0xff,0xff,0xf7,0xfd,0xf7,0xfa,0xef,0xee,0xf9,0xfd,0xff,0xf7,0xfe,0xbf,
3160x1f,0xfd,0x9e,0xfd,0xd1,0xef,0xff,0xf7,0x7f,0x9f,0xff,0xef,0xff,0xf6,0xff,0xfe,
3170xfe,0x7b,0xff,0xbd,0xff,0x7e,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,
3180xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3190xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3200xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff,0xff,
3210xff,0xf7,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xdf,0xfd,0xff,0xff,0xdf,0xff,
3220xff,0x5f,0xf1,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3230xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xef,0xff,
3240xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x3f,0xfb,0xff,0xff,0xef,0xfb,0xfd,
3250xff,0xf1,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3260xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xf7,0xff,0xff,0xff,
3270xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xe7,0xff,
3280xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3290xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
3300xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0xff,0xfb,0xff,0xfb,0xf1,
3310xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3320xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,
3330xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7b,0xff,0xff,0xff,0x7f,0xff,0xf1,0xff,
3340xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3350xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xff,0xff,
3360xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x57,0xff,0xfe,0xbf,0xfb,0xf1,0xff,0xff,
3370xfd,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3380xd7,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd,
3390xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xef,0x2f,0xf1,0x3c,0xbf,0xbc,
3400xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff,
3410x01,0xe2,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3420xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3430xff,0xff,0xff,0xff,0xff,0xff };
diff --git a/drivers/net/hamradio/yam9600.h b/drivers/net/hamradio/yam9600.h
deleted file mode 100644
index 5ed1fe6ff43e..000000000000
--- a/drivers/net/hamradio/yam9600.h
+++ /dev/null
@@ -1,343 +0,0 @@
1/*
2 *
3 * File yam111.mcs converted to h format by mcs2h
4 *
5 * (C) F6FBB 1998
6 *
7 * Tue Aug 25 20:23:03 1998
8 *
9 */
10
11static unsigned char bits_9600[]= {
120xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xfb,0xcb,0xff,0xdb,0xfe,0xf2,
130xff,0xf6,0xff,0x9c,0xbf,0xfd,0xbf,0xef,0x2e,0x3f,0x6f,0xf1,0xfd,0xb4,0xfd,0xbf,
140xff,0x6f,0xff,0x6f,0xff,0x0b,0xff,0xdb,0xff,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff,
150xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xfd,0xdf,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,
160xfb,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0x7f,0xf1,0xff,0xfe,0xff,0xbf,0xbf,
170xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0xfe,0xff,0xff,0xff,0xf0,
180xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xf7,
190xff,0xff,0xf7,0xef,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0x7e,0xff,0xff,
200xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0xdf,
210xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
220xff,0xff,0xef,0xff,0xf3,0xfb,0xfe,0xff,0xf1,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,
230xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xdf,0xff,0xf0,0x7f,0xff,
240xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
250xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,
260xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff,
270xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,
280xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,
290xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,
300xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xef,0xff,0x7f,0xff,0xef,
310xff,0xef,0xff,0x7f,0xef,0xf1,0xff,0xef,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,
320xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,
330xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
340xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
350xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,
360xff,0xff,0xff,0xbd,0xff,0xef,0x7f,0xef,0x7f,0xfb,0xdf,0xd3,0x5a,0xfe,0xd7,0xd6,
370xf7,0x7f,0xbd,0xf1,0xbb,0x5d,0xd6,0xf7,0xfe,0x96,0xff,0xbd,0xaf,0xad,0xbf,0xef,
380x7f,0x6b,0x7f,0xfb,0xd6,0xfe,0xf7,0xff,0x10,0xef,0xff,0xff,0xff,0xfe,0xbe,0xef,
390xff,0xff,0xdb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xbf,0xff,0x7f,0xff,0x7f,
400xdf,0xdb,0xf1,0xfd,0x35,0xff,0x6f,0xff,0x6f,0xff,0xdb,0xff,0xcb,0xff,0xf6,0xff,
410xf2,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
420xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff,
430xff,0xf1,0x24,0xf0,0xff,0xff,0xcf,0xef,0x3f,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f,
440xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
450xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff,0xff,
460xf1,0x00,0xf0,0xff,0xff,0xcf,0xdf,0xff,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f,0xff,
470xff,0xff,0x7d,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
480xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfe,0x7f,0xdf,0xff,0xff,0xff,0xf1,
490xff,0xcf,0xff,0xf3,0xff,0x97,0xff,0xff,0x8f,0xe7,0xff,0xff,0xfc,0x71,0xff,0xff,
500xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xff,0xff,
510xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xf7,0xef,0xff,0xff,0xfc,0x7b,0xff,0xf1,0x3f,
520xff,0xef,0xff,0xcf,0xe3,0xe3,0xff,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xbf,0xff,
530xbf,0xff,0xda,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x00,0x00,0x00,0x00,0x00,
540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,
550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,
560x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,
570xff,0xff,0xff,0xff,0xff,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0x9f,0xff,
580xff,0xff,0xf7,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
590xdb,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,
600xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xfb,0xdf,0xbf,0xf1,0xfe,0xfd,0xf7,0xff,
610xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x77,0xfd,0xf2,
620xf0,0x1f,0xff,0xff,0xff,0xfe,0xf8,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,
630x00,0x00,0x00,0x02,0x00,0x90,0x00,0x00,0x00,0x0c,0x01,0x00,0x00,0x04,0x24,0x00,
640x40,0x01,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x01,0xc0,0xf0,
650x4f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
660xff,0xff,0xbf,0xff,0xff,0x6f,0xff,0xdf,0xff,0xd1,0xff,0xfe,0xff,0xff,0xff,0xff,
670xff,0xff,0xdf,0xff,0xfb,0xff,0xfb,0xef,0xff,0xff,0xee,0xff,0xff,0x7f,0xf0,0xdf,
680xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
690xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
700xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,
710xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xad,0xff,0x69,0x2a,0xed,0x6b,0xfb,0xdf,0x3a,
720xdc,0xf4,0x96,0xee,0xb3,0x3d,0x35,0xc1,0xbb,0xdd,0xfe,0xf6,0xfe,0xd6,0xb5,0xad,
730xbf,0xa5,0xad,0x49,0x2f,0x4f,0x2b,0xda,0x5f,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,
740xff,0xfe,0xbf,0xff,0xff,0xfb,0x5b,0xf7,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xa5,
750xf3,0x6f,0xf3,0x6e,0xfa,0x7b,0xd1,0xfd,0xb5,0x77,0x6f,0xe9,0x6f,0xff,0xdb,0xfb,
760xdb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0x3f,0xfe,0xf7,0xff,0xd0,0x4f,0xff,0xff,0xff,
770xfe,0xff,0x9f,0xff,0xff,0x0f,0xff,0xc0,0x3f,0x9c,0x03,0xff,0xff,0x8b,0xa5,0xfe,
780x80,0x3e,0xc2,0xbf,0xac,0xb1,0x24,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xa3,
790xff,0xfd,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,
800xff,0xff,0xff,0xff,0x0f,0xff,0xc0,0x3f,0xd4,0x6b,0xff,0xff,0xdb,0xff,0xfe,0x86,
810xbf,0xc2,0xbf,0x30,0xa1,0x24,0xff,0xff,0xff,0xff,0xcc,0xff,0x0f,0xff,0xa3,0xff,
820x05,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,
830xff,0xff,0xfb,0xc7,0xff,0xc4,0xff,0xff,0x7f,0xff,0xec,0xfe,0x7f,0xdf,0xd8,0xb9,
840x47,0xfc,0x36,0xc1,0xdf,0xff,0xff,0xf9,0xff,0xf3,0xff,0xf7,0xff,0xfc,0xff,0xfd,
850x3f,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf5,0xff,
860xff,0xff,0xff,0xfe,0xff,0xff,0x7e,0xbd,0x3f,0xff,0x2b,0xfe,0x2f,0xf5,0xa3,0xfc,
870x5b,0xfe,0x61,0x9f,0x7f,0xef,0xff,0xff,0xa7,0xfb,0xff,0xff,0xfa,0xfe,0xff,0x33,
880xf1,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,
890x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x30,0x24,0x04,
900x00,0x01,0x00,0x80,0x40,0x00,0x08,0x00,0x00,0x00,0x02,0x01,0x01,0x00,0x02,0x00,
910x00,0x00,0x00,0x00,0x01,0x3d,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xfd,0xbd,0xff,0xfd,
920xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f,0xf6,0xef,0xbf,0xf7,0xff,0x73,0xeb,
930xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xf9,0xff,0xfd,0xfe,0xff,0xff,
940xff,0xff,0xff,0xff,0xd9,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0,0xbf,0x7f,0xff,0xff,
950xff,0x7f,0xff,0xff,0xde,0xff,0xff,0xef,0xdd,0xde,0x77,0xf2,0xfb,0xed,0xe7,0xf1,
960x73,0xfd,0xfd,0xdf,0xff,0x7d,0xbe,0xdf,0xff,0xfb,0xff,0xef,0xff,0xef,0xff,0xff,
970xff,0xff,0xff,0xd0,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x02,0x00,0x22,
980x40,0xc0,0x00,0x00,0x00,0x08,0x00,0x02,0x41,0x02,0x12,0x00,0x21,0x87,0x81,0x00,
990x00,0x80,0x04,0x0b,0x28,0x01,0xb0,0x00,0x82,0x00,0x40,0x00,0x00,0x00,0x00,0x00,
1000x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,
1010xf7,0xff,0xfe,0x7f,0xed,0x79,0xff,0xde,0xeb,0x7f,0x74,0xf7,0xf7,0xe1,0xf9,0xff,
1020xf6,0x5f,0x7f,0xff,0xff,0xff,0xd7,0xdb,0xef,0xff,0xbb,0xff,0xff,0xff,0xcc,0xff,
1030xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1040xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,
1050xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1060xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x3d,0xcd,0x49,0x7f,0x6f,
1070x2b,0xba,0x5c,0xd2,0xda,0xf6,0xf3,0x3e,0xf7,0xff,0xbd,0xf1,0xfa,0xdf,0xfe,0xf7,
1080xcc,0xf6,0xbb,0xa5,0xb3,0xad,0xbf,0x6f,0x7d,0x6f,0x6b,0xdb,0xdf,0xbd,0xff,0xfe,
1090xb0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xfb,0xdb,0x57,0xf6,0xfe,0x9f,0xd5,
1100xb7,0xff,0xaf,0xe5,0x3f,0xff,0xff,0x6f,0xff,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0x69,
1110x6c,0xdf,0xda,0xdf,0xcb,0xff,0xf6,0xff,0x76,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0,
1120x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xbd,0x08,0x03,0x89,0x4f,0x5a,
1130x0f,0xf0,0xff,0xf8,0xbf,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xf3,
1140xfa,0xa0,0xf0,0xf2,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,
1150xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xfd,0x00,0x6b,0xff,0xff,0x5a,0x0f,
1160xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xb3,0xf5,
1170x50,0xf0,0xf0,0xff,0xff,0xff,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff,
1180xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xbc,0xff,0xe4,0xe7,0x71,0xff,0xf9,0xc4,0xf4,
1190x7f,0x7f,0xcf,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xfb,0xf7,0x73,0xbf,0x14,
1200xff,0xe6,0xff,0xff,0xe1,0x7d,0xff,0xff,0xe7,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,
1210xff,0xfe,0xf5,0xff,0xff,0xfe,0xd2,0xfa,0xff,0xc4,0xf4,0x5c,0xbf,0xfa,0xff,0xff,
1220xec,0x7e,0xbf,0xff,0xff,0xff,0xf1,0xff,0xff,0xef,0xff,0xff,0x6b,0xdb,0xff,0xdf,
1230xf9,0xfb,0xbf,0xff,0xf1,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf0,0xbf,0xff,0xff,0xff,
1240xfe,0xf3,0xc0,0x00,0x02,0x00,0x00,0x00,0x00,0x82,0x00,0x00,0x00,0x00,0x80,0x00,
1250x00,0x00,0x00,0x40,0x00,0x01,0x00,0x00,0x00,0x01,0x08,0x20,0x00,0x00,0x00,0x00,
1260x01,0x00,0x01,0x00,0x00,0x80,0x02,0x00,0x01,0x3c,0xf0,0x5f,0xff,0xff,0xff,0xfe,
1270xfd,0xbf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0x7f,0xff,0xdf,0xff,0xef,0xff,
1280xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,
1290xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xc3,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf0,
1300xff,0xdf,0xff,0xff,0xf7,0x23,0xff,0xff,0xfd,0xff,0xef,0xff,0xfe,0x7f,0x7d,0xf7,
1310xfe,0xff,0x7f,0x71,0xff,0xfb,0x7f,0xff,0xff,0xff,0x6e,0xfd,0xf7,0xfd,0xff,0xbf,
1320xff,0xbf,0xf9,0xfd,0xff,0xdf,0xef,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x30,
1330x40,0x01,0x00,0x83,0x00,0x00,0x00,0x0c,0x06,0x08,0x04,0x26,0x26,0x00,0x00,0x06,
1340x03,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0x00,0x70,0x08,0x80,0x00,0x20,0x01,0x20,
1350x00,0x02,0x00,0x30,0x00,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
1360xff,0xff,0x7b,0x3f,0xf7,0xff,0xd7,0xfe,0xfe,0xfb,0xfe,0x3b,0xfe,0xbd,0xff,0x2f,
1370xff,0x71,0xff,0xfb,0x7f,0xe7,0xff,0xf9,0xef,0xff,0xd7,0xfa,0xff,0xb7,0xbb,0xfe,
1380xff,0xff,0x74,0xff,0xf7,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
1390xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1400xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1410xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xb5,
1420xbd,0x6f,0x7c,0xeb,0x7f,0xfb,0xdb,0xd3,0x4b,0xee,0xd6,0xf6,0xb7,0xfd,0xac,0xa1,
1430xfb,0xdf,0xfe,0xf7,0xf4,0x96,0xbd,0xb4,0xc5,0xa5,0xaf,0x6f,0x69,0x4f,0x7f,0xba,
1440xdb,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,
1450xf6,0xff,0xf6,0xff,0xbd,0xbf,0xa5,0xbf,0xff,0x7d,0x7f,0xef,0xff,0xfb,0xf1,0xfd,
1460xbf,0xff,0x6f,0xff,0x6b,0x7a,0xdb,0xff,0xdb,0xdf,0xf6,0xfe,0xb6,0xfd,0xfd,0xbf,
1470xfe,0xf7,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf4,0x2f,0xff,
1480xfc,0x43,0x6b,0xff,0xff,0xff,0x0d,0xff,0xfc,0x33,0x3f,0xf0,0x5f,0xf1,0xff,0xff,
1490xff,0xff,0xf9,0xde,0xf0,0x4c,0xfe,0x77,0xaf,0xff,0xff,0xef,0xff,0xf0,0xff,0xdb,
1500xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xfe,0xf7,0xff,0xf0,0x2f,0xff,0xfd,
1510x43,0x7f,0xff,0xff,0xf1,0x0f,0xff,0xfc,0x33,0x3f,0xff,0xaf,0xf1,0xff,0xff,0xff,
1520xff,0xf6,0xd7,0xff,0xbc,0xfd,0xbd,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,
1530xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xfb,0xf1,
1540xbf,0xff,0xf9,0xfd,0xcf,0xf2,0x70,0xff,0x1f,0x9f,0xf3,0xf1,0xff,0xff,0xff,0xff,
1550xfc,0xf7,0xff,0x13,0x9f,0xfc,0xff,0xff,0x84,0xf7,0xff,0xff,0x47,0xff,0xff,0xff,
1560xf0,0xbf,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xf1,0xfc,0xff,0xfe,0xfe,0x79,
1570x3f,0xff,0x1d,0x46,0xcf,0xff,0xcf,0xfc,0x7b,0xff,0xf1,0xff,0xff,0xff,0xff,0xed,
1580xf3,0xab,0xff,0xcb,0xff,0xf8,0xff,0xfc,0xf5,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,
1590x8f,0xff,0xff,0xff,0xfe,0xf3,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,
1600x00,0x00,0x20,0x00,0x20,0x00,0x00,0x04,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x20,
1610x0c,0x00,0x00,0x04,0x01,0x00,0x01,0x00,0x00,0x80,0x00,0x00,0x01,0x3c,0xf0,0x7f,
1620xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,
1630xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xef,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,
1640xff,0xdf,0xff,0xff,0xfb,0xf7,0x7f,0xff,0xfe,0xff,0xff,0xbf,0xdb,0xf0,0xff,0xff,
1650xff,0xff,0xfe,0xf0,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0x7f,0xf7,0xff,
1660xbf,0xbf,0xcf,0xff,0xff,0xff,0x3e,0xf1,0x7f,0xff,0xff,0xef,0xff,0xff,0xff,0xfe,
1670xff,0xfd,0xff,0xbf,0xbd,0xfe,0xff,0xfb,0xf7,0xdf,0xfb,0xd0,0xf0,0x9f,0xff,0xff,
1680xff,0xfe,0xf8,0x30,0x20,0x00,0x40,0x01,0x80,0xc0,0x30,0x00,0x00,0x20,0x00,0x10,
1690x50,0x88,0x20,0x00,0x00,0x13,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00,
1700x00,0x00,0x01,0x80,0x08,0x00,0x00,0xa0,0x00,0x10,0xc1,0xf0,0xef,0xff,0xff,0xff,
1710xfe,0xfd,0xef,0x7f,0xff,0xff,0xbf,0xff,0xf7,0xff,0xef,0xfb,0xfd,0x77,0xef,0xbf,
1720xf7,0x7f,0xff,0xff,0xbf,0xd1,0x7f,0xff,0xff,0xf7,0xff,0xff,0xff,0xff,0xaf,0xff,
1730xdf,0xf7,0xfb,0xff,0xfd,0xff,0xfc,0xff,0xfd,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,
1740xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1750xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1760xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,
1770xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,
1780xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
1790xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0x3f,0xff,0xff,0xff,0xfe,0xdd,0xff,
1800xff,0xff,0xa5,0xfd,0x6f,0x7d,0x6d,0x7f,0x52,0xdf,0x5a,0x4b,0xee,0xb6,0xee,0xf2,
1810xbb,0xac,0xa1,0x5b,0x4d,0xd6,0xf7,0xfe,0xb2,0xbd,0x35,0xb5,0xb5,0xdd,0x6f,0x7f,
1820xe9,0x5f,0x52,0xdf,0xbd,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,
1830xff,0xdb,0xfe,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xb5,0xbf,0xf9,0x7f,0x6f,0xff,
1840xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0x69,0x7f,0xdb,0xff,0xd3,0xff,0xf6,0xfe,0xf2,
1850xff,0xad,0xbf,0xff,0xff,0xff,0xd0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,
1860x30,0x0f,0xff,0xff,0xfd,0x6b,0xca,0xff,0xf0,0x0f,0xd6,0xbf,0xcf,0x3f,0xff,0xff,
1870xf1,0xff,0xff,0xff,0xca,0xfe,0xbf,0xff,0xf0,0x05,0xaf,0x0f,0xff,0xfc,0xf0,0xcf,
1880xf0,0xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0x30,
1890x0f,0xff,0xff,0xfc,0x3f,0xca,0xff,0x0f,0x0f,0xd6,0xbf,0xff,0xff,0xf5,0x5f,0xf1,
1900xff,0x8b,0xff,0xc3,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xfc,0xf0,0xcf,0xf0,
1910xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xcf,0xff,
1920xff,0xbf,0x9f,0x3f,0xfe,0xfc,0xff,0x4f,0xff,0xff,0xff,0xff,0xff,0xf7,0xf1,0xff,
1930xdf,0xfe,0x7e,0x3f,0x9f,0xf4,0xfc,0x7f,0xfc,0xff,0xff,0x3f,0xff,0x3f,0xfe,0x3f,
1940xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfb,0xff,0xfe,0xff,
1950xff,0xff,0xff,0xbf,0xfb,0xff,0xf8,0xed,0xff,0x8f,0xff,0xbb,0xff,0xb1,0xf3,0xef,
1960x8f,0xf7,0xff,0xff,0xdb,0xff,0xff,0xff,0xef,0xbf,0xfd,0x79,0xbf,0xbf,0xff,0xff,
1970xff,0xfb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x04,0x00,0x00,
1980x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x04,0x08,0x08,0x01,0x01,0x00,0x90,
1990x00,0x00,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x00,0x01,
2000x3c,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2010xff,0xff,0xff,0xff,0x9f,0xff,0xaf,0xdf,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
2020xbf,0xef,0xff,0xff,0xff,0xed,0xff,0xff,0xff,0xef,0xff,0xbf,0xff,0xff,0xff,0xc3,
2030xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xfd,0xff,0xff,0xff,0xfb,0xff,0xbb,0xff,
2040xff,0xff,0x7f,0xf6,0xff,0x7f,0xfb,0xfd,0xed,0xff,0xf1,0xff,0xfe,0x7f,0xff,0xff,
2050xff,0x5f,0xff,0xf7,0xff,0x7e,0xff,0xfd,0xff,0xef,0xff,0xff,0xff,0xef,0xf0,0xf0,
2060x8f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x80,0x00,0x04,0x00,0x00,0x40,0x02,0x00,0x03,
2070x00,0x05,0x04,0x20,0x00,0x00,0x01,0xd0,0x00,0x81,0x00,0x20,0x04,0x04,0x00,0x00,
2080x81,0x04,0x08,0x80,0x10,0x00,0xc0,0x00,0x00,0x00,0x20,0x00,0x08,0xc1,0xf0,0x6f,
2090xff,0xff,0xff,0xfe,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xf3,0xfd,0xff,0xed,0xfc,
2100xff,0xff,0x9f,0xfb,0xfd,0xff,0xff,0xff,0xf1,0xff,0xff,0x7f,0xfb,0x3e,0xff,0x9f,
2110xff,0xff,0xff,0xff,0xfd,0xf9,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0x6f,0xff,
2120xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2130xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2140xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,
2150xff,0xfe,0xff,0xff,0xff,0xfd,0xbd,0xff,0xef,0x7c,0xeb,0x7f,0xfb,0xdb,0xfa,0xdc,
2160xee,0xf7,0xf6,0xd7,0xf5,0x2d,0xa1,0xbb,0xdd,0xee,0xf7,0x54,0xf7,0xfb,0x2c,0xb5,
2170xb4,0xbd,0x6b,0x6f,0xef,0x6f,0xbb,0xdf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,
2180xfe,0xbf,0xff,0xff,0xff,0xfb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xff,0xbf,0xef,
2190x6f,0xff,0x6f,0xfa,0xdb,0xf1,0xc5,0xbd,0xf5,0x6f,0xff,0x6f,0xca,0xdb,0xff,0xdb,
2200xfb,0xf6,0x97,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x9f,0xff,0xff,0xff,0xfe,
2210xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0x7f,0xff,0xff,0xe7,0x63,0xff,0xff,
2220xff,0xfc,0x77,0xdf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff,
2230xc3,0xff,0xeb,0xff,0xff,0xff,0xff,0xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,
2240xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2250xfc,0xff,0xcf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff,0xc3,
2260xff,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,
2270xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xbf,0xff,0xca,0xff,0x9f,0xff,0xfa,0xb9,0xe7,
2280x9f,0xf3,0x81,0xff,0xff,0xfc,0x73,0xd7,0xff,0xff,0x77,0xff,0xfd,0xff,0xfc,0xff,
2290xff,0xff,0xff,0xcf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,
2300xff,0xf7,0xde,0xff,0xfe,0x7e,0xff,0xbf,0xff,0xbf,0xf1,0xb3,0xff,0xff,0xe3,0xfb,
2310xff,0xe1,0x1f,0x7f,0xff,0xf8,0x78,0xff,0xfb,0x1e,0xff,0xf7,0xfe,0xe7,0xff,0xff,
2320xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,
2330x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x04,0x00,
2340x01,0x80,0x40,0x40,0x20,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
2350x80,0x00,0x00,0x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xff,
2360xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf7,0xf1,
2370xfd,0xff,0xff,0xff,0xdf,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,
2380xff,0xff,0xff,0xdb,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xdf,0xff,0xff,0x7f,
2390xff,0xff,0xff,0xbe,0xd7,0xff,0xed,0xbd,0x7e,0xbf,0xfe,0xf6,0x7f,0xbf,0x71,0xff,
2400xff,0xda,0xff,0xf9,0xff,0xbf,0x7f,0xfe,0xff,0x6f,0x7f,0xff,0xff,0xff,0xff,0xff,
2410x7f,0xff,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x42,0x00,0x00,0x00,0x00,
2420x80,0xc1,0x00,0x00,0x90,0x00,0xc4,0x00,0x00,0x12,0x20,0x43,0x22,0x81,0x84,0x00,
2430x00,0x14,0x00,0x01,0x00,0x08,0x80,0x00,0x02,0x00,0x02,0x00,0x04,0x02,0x00,0x00,
2440x10,0xc1,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xdd,0xfe,0xff,
2450xb6,0x76,0xe5,0xbc,0xf9,0xf7,0xaf,0x5f,0xbf,0xfc,0xdf,0xcf,0xf1,0xff,0xef,0x79,
2460xff,0xbd,0xff,0xef,0xff,0xff,0xf7,0x6f,0x5f,0xff,0xff,0xfd,0xef,0xef,0xbf,0xff,
2470xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2480xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,
2490xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2500xf0,0xff,0xff,0xff,0xff,0xfe,0xdb,0xff,0xff,0xfd,0x2d,0xff,0x69,0x2a,0xef,0x77,
2510xbb,0xdd,0x5a,0xdf,0xf6,0xf6,0xd6,0xf7,0x7d,0xbd,0xd1,0xb2,0x4a,0xd6,0xb2,0xbe,
2520x97,0xf5,0xbd,0xb3,0xad,0xff,0xef,0x7f,0x69,0x6b,0xfb,0xdf,0xff,0xff,0xff,0xf0,
2530x2f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0x9f,0xd4,0xbf,
2540xed,0xaf,0xff,0x6b,0x6f,0xf7,0xff,0xdd,0xdb,0x31,0xfd,0xbf,0xff,0x6f,0x7f,0xff,
2550xff,0xdb,0xff,0xcb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x8f,
2560xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff,
2570xa5,0xff,0xff,0xff,0xdf,0xb7,0xff,0xff,0xf1,0xff,0xff,0xff,0xf7,0xe9,0x6a,0xbf,
2580xff,0xff,0xfd,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xaf,0xf0,0x4f,0xff,
2590xff,0xff,0xfe,0xfe,0xdf,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff,0xa5,
2600xff,0xff,0xff,0xc0,0x37,0xff,0xff,0xf1,0x99,0x8e,0xdc,0x7f,0xe9,0x6a,0xbf,0xff,
2610xf0,0x0f,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff,
2620xff,0xfe,0xff,0xff,0xff,0xff,0x07,0xff,0xc0,0xbe,0xff,0xff,0xcf,0xef,0x9f,0xff,
2630xff,0xfb,0xff,0xe7,0xff,0xff,0xa1,0xe3,0xce,0x3c,0x58,0x3f,0xf3,0xff,0xfd,0xef,
2640xf9,0xff,0xff,0xf7,0xf1,0x7f,0xff,0xcb,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,
2650xfe,0xf5,0x7f,0xff,0xf0,0xff,0xfe,0xff,0xc4,0x75,0xe7,0xb9,0xff,0xff,0xff,0xef,
2660xff,0xc7,0x37,0x3b,0xff,0xf0,0x13,0x9e,0x0f,0xf4,0xff,0xfe,0xfb,0xff,0xff,0xf9,
2670xfc,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xef,0xff,0xff,0xff,0xfe,
2680xf3,0xc0,0x01,0x00,0x00,0x02,0x00,0x02,0x22,0x00,0x00,0xc0,0x40,0x00,0x40,0x00,
2690x04,0x08,0x04,0x0a,0x01,0x01,0x10,0x20,0x20,0x00,0x00,0x04,0x08,0x08,0x04,0x00,
2700x00,0x00,0x00,0x00,0x01,0x00,0x00,0x01,0x3c,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xfd,
2710x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xff,0x7f,0xff,0xcf,0x9d,0xff,
2720xff,0xf7,0xfd,0xf1,0xff,0xff,0xff,0xee,0xbf,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,
2730xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xff,
2740xff,0xff,0xf7,0xf7,0xff,0xff,0xfe,0xbf,0xf7,0xff,0xff,0x5b,0xff,0xbf,0xf7,0xff,
2750xfd,0x7f,0x71,0xfd,0xff,0xed,0xf7,0xfe,0xef,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,
2760xff,0xff,0xef,0xff,0x7f,0xff,0xd0,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf8,0x30,0x11,
2770x00,0x48,0x60,0x40,0x82,0x60,0x24,0x60,0x00,0xcc,0x00,0x80,0x04,0x01,0x00,0x00,
2780x14,0x01,0x0c,0x04,0x00,0x30,0x00,0x00,0x00,0x08,0x08,0x00,0x01,0x00,0xc2,0x00,
2790x00,0x02,0x00,0x80,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,
2800xf7,0x7b,0xff,0xf3,0xeb,0xbf,0xff,0xf7,0xff,0xff,0xff,0xe7,0x5d,0x3f,0xff,0xf6,
2810xd1,0xfd,0xff,0xeb,0xf7,0x3d,0xff,0xff,0xff,0x5f,0xff,0x7f,0x7f,0xf3,0xff,0xff,
2820xef,0xfd,0xbf,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
2830xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
2840xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
2850xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xb5,0xdf,
2860x6f,0x7d,0x69,0x7f,0xfb,0xdf,0x52,0x5f,0xf6,0xf7,0xfe,0xf6,0xf3,0xbd,0xb1,0xda,
2870xcd,0xfe,0xf6,0xee,0xd2,0xbd,0xa5,0xaf,0xbd,0xff,0x6f,0x7c,0xeb,0x2b,0xfa,0xda,
2880xff,0xfe,0xdf,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,
2890xff,0xf6,0xff,0xbd,0xbf,0xcd,0xbf,0xeb,0x6f,0xf7,0x6f,0xdf,0xdb,0x51,0xfd,0xbd,
2900xff,0x6f,0xff,0x6f,0xfb,0x5b,0xff,0xdb,0xff,0xf6,0xfe,0xf6,0xfd,0xfd,0xbf,0xfe,
2910xf7,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfa,0x50,0xff,0xff,0xff,
2920xf0,0x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0xfc,0xff,0xff,
2930xf7,0xdb,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff,
2940xaf,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xa0,0xff,0xff,0xff,0xf0,
2950x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,
2960xf3,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff,0xff,
2970xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0x9f,0xf0,0x7f,
2980xff,0xf9,0xfc,0x4f,0xf3,0xff,0x27,0xeb,0xff,0xfc,0x81,0xfc,0x7f,0xfe,0x7b,0xff,
2990xf7,0xff,0x12,0x7f,0xff,0xff,0xff,0xff,0x18,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,
3000x7f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xdf,0xfe,0xff,0xfc,0x7e,0x7f,0xbf,
3010xff,0xff,0xaf,0xef,0xff,0xdf,0xdf,0xfb,0xff,0xf1,0xc3,0xfe,0x6f,0xf1,0xcf,0x3f,
3020xfb,0xff,0xff,0xcf,0xfe,0xff,0xff,0xfe,0x7f,0xbf,0xff,0xff,0xbf,0xfa,0xf0,0xdf,
3030xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x01,0x00,0x00,
3040x20,0x00,0x01,0x00,0x10,0x00,0x00,0x00,0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00,
3050x00,0x00,0x00,0x02,0x00,0x00,0x80,0x00,0x02,0x80,0x00,0x02,0x3c,0xf0,0x2f,0xff,
3060xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3070xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xf1,0xff,0x7f,0xff,0xff,0xff,0xff,0xef,0xff,
3080xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x2f,0xff,0xff,
3090xff,0xfe,0xf0,0xff,0xff,0xff,0xfb,0xff,0xbf,0xff,0xff,0xff,0xff,0xf7,0xbf,0xfb,
3100xff,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xf7,0xbf,0xfb,0xff,0xff,0xff,0x7f,0xde,0xff,
3110xff,0xff,0xff,0xff,0xff,0xed,0xf7,0xff,0xff,0x7f,0xd0,0xf0,0x3f,0xff,0xff,0xff,
3120xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0xe0,0x00,0x00,0x80,
3130x20,0x01,0x01,0x92,0x00,0x01,0x01,0x00,0xe0,0x1c,0x60,0x20,0x30,0x08,0x08,0x00,
3140x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0xc1,0xf0,0x6f,0xff,0xff,0xff,0xfe,
3150xff,0xff,0xff,0xff,0xff,0xdb,0xfe,0xff,0xff,0xdf,0xff,0xfc,0x7f,0xfb,0xbf,0xff,
3160xff,0xff,0xff,0xff,0xf1,0xf6,0xff,0xf7,0x7e,0x3f,0xff,0x7f,0xff,0xff,0xff,0xf7,
3170xff,0xff,0xff,0xed,0xff,0xdf,0xff,0xb7,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,
3180xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3190xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3200xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,
3210xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xbf,0xff,0xdf,
3220x57,0xef,0xf1,0xfd,0xfe,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfb,0xff,
3230xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,
3240xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xdf,0xff,
3250xff,0xf1,0xfd,0xff,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3260xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xf7,0xfd,0xff,0xff,
3270xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xff,0xff,
3280xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3290xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,
3300xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,
3310xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3320xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,
3330xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0x6f,0xff,0xfe,0xbf,0xff,0xf1,0xff,
3340xf7,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,
3350xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3360xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0x57,0xff,0xfd,0xbf,0xff,0xf1,0xff,0xef,
3370xfe,0xff,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,
3380xde,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd,
3390xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xe7,0x2f,0xf1,0x3c,0xbf,0xfd,
3400xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff,
3410x02,0x01,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3420xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
3430xff,0xff,0xff,0xff,0xff,0xff };
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index de2d48624683..f50fac25be40 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -448,8 +448,11 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
448 goto out; 448 goto out;
449 } 449 }
450 450
451 /* Wait 15ms for MAC to configure PHY from NVM settings. */ 451 /*
452 msleep(15); 452 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
453 * timeout issues when LFS is enabled.
454 */
455 msleep(100);
453 456
454 /* 457 /*
455 * The NVM settings will configure LPLU in D3 for 458 * The NVM settings will configure LPLU in D3 for
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index fb09c8ad9f0d..27eae49e79c2 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1419,7 +1419,6 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1419{ 1419{
1420 struct e1000_hw *hw = &adapter->hw; 1420 struct e1000_hw *hw = &adapter->hw;
1421 u32 ctrl_reg = 0; 1421 u32 ctrl_reg = 0;
1422 u32 stat_reg = 0;
1423 1422
1424 hw->mac.autoneg = false; 1423 hw->mac.autoneg = false;
1425 1424
@@ -1443,18 +1442,11 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1443 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1442 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1444 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1443 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1445 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1444 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1446 E1000_CTRL_FD); /* Force Duplex to FULL */ 1445 E1000_CTRL_FD | /* Force Duplex to FULL */
1446 E1000_CTRL_SLU); /* Set link up enable bit */
1447 1447
1448 if (hw->phy.media_type == e1000_media_type_copper && 1448 if (hw->phy.type == e1000_phy_m88)
1449 hw->phy.type == e1000_phy_m88)
1450 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1449 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1451 else {
1452 /* Set the ILOS bit on the fiber Nic if half duplex link is
1453 * detected. */
1454 stat_reg = rd32(E1000_STATUS);
1455 if ((stat_reg & E1000_STATUS_FD) == 0)
1456 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1457 }
1458 1450
1459 wr32(E1000_CTRL, ctrl_reg); 1451 wr32(E1000_CTRL, ctrl_reg);
1460 1452
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ca842163dce4..03aa9593dd9e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -135,8 +135,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 137
138static int igb_suspend(struct pci_dev *, pm_message_t);
139#ifdef CONFIG_PM 138#ifdef CONFIG_PM
139static int igb_suspend(struct pci_dev *, pm_message_t);
140static int igb_resume(struct pci_dev *); 140static int igb_resume(struct pci_dev *);
141#endif 141#endif
142static void igb_shutdown(struct pci_dev *); 142static void igb_shutdown(struct pci_dev *);
@@ -420,6 +420,9 @@ static void igb_free_queues(struct igb_adapter *adapter)
420 for (i = 0; i < adapter->num_rx_queues; i++) 420 for (i = 0; i < adapter->num_rx_queues; i++)
421 netif_napi_del(&adapter->rx_ring[i].napi); 421 netif_napi_del(&adapter->rx_ring[i].napi);
422 422
423 adapter->num_rx_queues = 0;
424 adapter->num_tx_queues = 0;
425
423 kfree(adapter->tx_ring); 426 kfree(adapter->tx_ring);
424 kfree(adapter->rx_ring); 427 kfree(adapter->rx_ring);
425} 428}
@@ -1476,9 +1479,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1476 netdev->name, 1479 netdev->name,
1477 ((hw->bus.speed == e1000_bus_speed_2500) 1480 ((hw->bus.speed == e1000_bus_speed_2500)
1478 ? "2.5Gb/s" : "unknown"), 1481 ? "2.5Gb/s" : "unknown"),
1479 ((hw->bus.width == e1000_bus_width_pcie_x4) 1482 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1480 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1) 1483 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1481 ? "Width x1" : "unknown"), 1484 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1485 "unknown"),
1482 netdev->dev_addr); 1486 netdev->dev_addr);
1483 1487
1484 igb_read_part_num(hw, &part_num); 1488 igb_read_part_num(hw, &part_num);
@@ -5056,7 +5060,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5056 return 0; 5060 return 0;
5057} 5061}
5058 5062
5059static int igb_suspend(struct pci_dev *pdev, pm_message_t state) 5063static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5060{ 5064{
5061 struct net_device *netdev = pci_get_drvdata(pdev); 5065 struct net_device *netdev = pci_get_drvdata(pdev);
5062 struct igb_adapter *adapter = netdev_priv(netdev); 5066 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -5115,15 +5119,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5115 wr32(E1000_WUFC, 0); 5119 wr32(E1000_WUFC, 0);
5116 } 5120 }
5117 5121
5118 /* make sure adapter isn't asleep if manageability/wol is enabled */ 5122 *enable_wake = wufc || adapter->en_mng_pt;
5119 if (wufc || adapter->en_mng_pt) { 5123 if (!*enable_wake)
5120 pci_enable_wake(pdev, PCI_D3hot, 1);
5121 pci_enable_wake(pdev, PCI_D3cold, 1);
5122 } else {
5123 igb_shutdown_fiber_serdes_link_82575(hw); 5124 igb_shutdown_fiber_serdes_link_82575(hw);
5124 pci_enable_wake(pdev, PCI_D3hot, 0);
5125 pci_enable_wake(pdev, PCI_D3cold, 0);
5126 }
5127 5125
5128 /* Release control of h/w to f/w. If f/w is AMT enabled, this 5126 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5129 * would have already happened in close and is redundant. */ 5127 * would have already happened in close and is redundant. */
@@ -5131,12 +5129,29 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5131 5129
5132 pci_disable_device(pdev); 5130 pci_disable_device(pdev);
5133 5131
5134 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5135
5136 return 0; 5132 return 0;
5137} 5133}
5138 5134
5139#ifdef CONFIG_PM 5135#ifdef CONFIG_PM
5136static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5137{
5138 int retval;
5139 bool wake;
5140
5141 retval = __igb_shutdown(pdev, &wake);
5142 if (retval)
5143 return retval;
5144
5145 if (wake) {
5146 pci_prepare_to_sleep(pdev);
5147 } else {
5148 pci_wake_from_d3(pdev, false);
5149 pci_set_power_state(pdev, PCI_D3hot);
5150 }
5151
5152 return 0;
5153}
5154
5140static int igb_resume(struct pci_dev *pdev) 5155static int igb_resume(struct pci_dev *pdev)
5141{ 5156{
5142 struct net_device *netdev = pci_get_drvdata(pdev); 5157 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -5189,7 +5204,14 @@ static int igb_resume(struct pci_dev *pdev)
5189 5204
5190static void igb_shutdown(struct pci_dev *pdev) 5205static void igb_shutdown(struct pci_dev *pdev)
5191{ 5206{
5192 igb_suspend(pdev, PMSG_SUSPEND); 5207 bool wake;
5208
5209 __igb_shutdown(pdev, &wake);
5210
5211 if (system_state == SYSTEM_POWER_OFF) {
5212 pci_wake_from_d3(pdev, wake);
5213 pci_set_power_state(pdev, PCI_D3hot);
5214 }
5193} 5215}
5194 5216
5195#ifdef CONFIG_NET_POLL_CONTROLLER 5217#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index ed265a7a898f..de4db0dc7879 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -411,7 +411,8 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
411 411
412 /* Decide whether to use autoneg or not. */ 412 /* Decide whether to use autoneg or not. */
413 hw->mac.ops.check_link(hw, &speed, &link_up, false); 413 hw->mac.ops.check_link(hw, &speed, &link_up, false);
414 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) 414 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
415 (speed == IXGBE_LINK_SPEED_1GB_FULL))
415 ret_val = ixgbe_fc_autoneg(hw); 416 ret_val = ixgbe_fc_autoneg(hw);
416 417
417 if (ret_val) 418 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 8cfd3fd309a0..63ab6671d08e 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1937,7 +1937,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 1937
1938 /* Decide whether to use autoneg or not. */ 1938 /* Decide whether to use autoneg or not. */
1939 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1939 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1940 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) 1940 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
1941 (speed == IXGBE_LINK_SPEED_1GB_FULL))
1941 ret_val = ixgbe_fc_autoneg(hw); 1942 ret_val = ixgbe_fc_autoneg(hw);
1942 1943
1943 if (ret_val) 1944 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 7e94d6d399ab..24f73e719c3f 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -96,14 +96,11 @@ s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
96#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 96#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
97 97
98#ifdef DEBUG 98#ifdef DEBUG
99extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
99#define hw_dbg(hw, format, arg...) \ 100#define hw_dbg(hw, format, arg...) \
100printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg); 101 printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
101#else 102#else
102static inline int __attribute__ ((format (printf, 2, 3))) 103#define hw_dbg(hw, format, arg...) do {} while (0)
103hw_dbg(struct ixgbe_hw *hw, const char *format, ...)
104{
105 return 0;
106}
107#endif 104#endif
108 105
109#endif /* IXGBE_COMMON */ 106#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 0a8731f1f237..bd0a0c276952 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -90,6 +90,8 @@ int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
90 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; 90 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
91 } 91 }
92 92
93 dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
94
93 return 0; 95 return 0;
94} 96}
95 97
@@ -298,8 +300,10 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
298 300
299 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; 301 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
300 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != 302 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
301 adapter->dcb_cfg.tc_config[priority].dcb_pfc) 303 adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
302 adapter->dcb_set_bitmap |= BIT_PFC; 304 adapter->dcb_set_bitmap |= BIT_PFC;
305 adapter->temp_dcb_cfg.pfc_mode_enable = true;
306 }
303} 307}
304 308
305static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 309static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 18ecba7f6ecb..aafc120f164e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -129,6 +129,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
129 ecmd->advertising |= ADVERTISED_10000baseT_Full; 129 ecmd->advertising |= ADVERTISED_10000baseT_Full;
130 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 130 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
131 ecmd->advertising |= ADVERTISED_1000baseT_Full; 131 ecmd->advertising |= ADVERTISED_1000baseT_Full;
132 /*
133 * It's possible that phy.autoneg_advertised may not be
134 * set yet. If so display what the default would be -
135 * both 1G and 10G supported.
136 */
137 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
138 ADVERTISED_10000baseT_Full)))
139 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
140 ADVERTISED_1000baseT_Full);
132 141
133 ecmd->port = PORT_TP; 142 ecmd->port = PORT_TP;
134 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 143 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
@@ -225,7 +234,16 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
225 struct ixgbe_adapter *adapter = netdev_priv(netdev); 234 struct ixgbe_adapter *adapter = netdev_priv(netdev);
226 struct ixgbe_hw *hw = &adapter->hw; 235 struct ixgbe_hw *hw = &adapter->hw;
227 236
228 pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0); 237 /*
238 * Flow Control Autoneg isn't on if
239 * - we didn't ask for it OR
240 * - it failed, we know this by tx & rx being off
241 */
242 if (hw->fc.disable_fc_autoneg ||
243 (hw->fc.current_mode == ixgbe_fc_none))
244 pause->autoneg = 0;
245 else
246 pause->autoneg = 1;
229 247
230 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 248 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
231 pause->rx_pause = 1; 249 pause->rx_pause = 1;
@@ -243,8 +261,12 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
243 struct ixgbe_adapter *adapter = netdev_priv(netdev); 261 struct ixgbe_adapter *adapter = netdev_priv(netdev);
244 struct ixgbe_hw *hw = &adapter->hw; 262 struct ixgbe_hw *hw = &adapter->hw;
245 263
246 if ((pause->autoneg == AUTONEG_ENABLE) || 264 if (pause->autoneg != AUTONEG_ENABLE)
247 (pause->rx_pause && pause->tx_pause)) 265 hw->fc.disable_fc_autoneg = true;
266 else
267 hw->fc.disable_fc_autoneg = false;
268
269 if (pause->rx_pause && pause->tx_pause)
248 hw->fc.requested_mode = ixgbe_fc_full; 270 hw->fc.requested_mode = ixgbe_fc_full;
249 else if (pause->rx_pause && !pause->tx_pause) 271 else if (pause->rx_pause && !pause->tx_pause)
250 hw->fc.requested_mode = ixgbe_fc_rx_pause; 272 hw->fc.requested_mode = ixgbe_fc_rx_pause;
@@ -712,9 +734,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
712 struct ethtool_ringparam *ring) 734 struct ethtool_ringparam *ring)
713{ 735{
714 struct ixgbe_adapter *adapter = netdev_priv(netdev); 736 struct ixgbe_adapter *adapter = netdev_priv(netdev);
715 struct ixgbe_ring *temp_ring; 737 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
716 int i, err; 738 int i, err;
717 u32 new_rx_count, new_tx_count; 739 u32 new_rx_count, new_tx_count;
740 bool need_update = false;
718 741
719 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 742 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
720 return -EINVAL; 743 return -EINVAL;
@@ -733,80 +756,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
733 return 0; 756 return 0;
734 } 757 }
735 758
736 temp_ring = kcalloc(adapter->num_tx_queues,
737 sizeof(struct ixgbe_ring), GFP_KERNEL);
738 if (!temp_ring)
739 return -ENOMEM;
740
741 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 759 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
742 msleep(1); 760 msleep(1);
743 761
744 if (new_tx_count != adapter->tx_ring->count) { 762 temp_tx_ring = kcalloc(adapter->num_tx_queues,
763 sizeof(struct ixgbe_ring), GFP_KERNEL);
764 if (!temp_tx_ring) {
765 err = -ENOMEM;
766 goto err_setup;
767 }
768
769 if (new_tx_count != adapter->tx_ring_count) {
770 memcpy(temp_tx_ring, adapter->tx_ring,
771 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
745 for (i = 0; i < adapter->num_tx_queues; i++) { 772 for (i = 0; i < adapter->num_tx_queues; i++) {
746 temp_ring[i].count = new_tx_count; 773 temp_tx_ring[i].count = new_tx_count;
747 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); 774 err = ixgbe_setup_tx_resources(adapter,
775 &temp_tx_ring[i]);
748 if (err) { 776 if (err) {
749 while (i) { 777 while (i) {
750 i--; 778 i--;
751 ixgbe_free_tx_resources(adapter, 779 ixgbe_free_tx_resources(adapter,
752 &temp_ring[i]); 780 &temp_tx_ring[i]);
753 } 781 }
754 goto err_setup; 782 goto err_setup;
755 } 783 }
756 temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; 784 temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
757 } 785 }
758 if (netif_running(netdev)) 786 need_update = true;
759 netdev->netdev_ops->ndo_stop(netdev);
760 ixgbe_reset_interrupt_capability(adapter);
761 ixgbe_napi_del_all(adapter);
762 INIT_LIST_HEAD(&netdev->napi_list);
763 kfree(adapter->tx_ring);
764 adapter->tx_ring = temp_ring;
765 temp_ring = NULL;
766 adapter->tx_ring_count = new_tx_count;
767 } 787 }
768 788
769 temp_ring = kcalloc(adapter->num_rx_queues, 789 temp_rx_ring = kcalloc(adapter->num_rx_queues,
770 sizeof(struct ixgbe_ring), GFP_KERNEL); 790 sizeof(struct ixgbe_ring), GFP_KERNEL);
771 if (!temp_ring) { 791 if ((!temp_rx_ring) && (need_update)) {
772 if (netif_running(netdev)) 792 for (i = 0; i < adapter->num_tx_queues; i++)
773 netdev->netdev_ops->ndo_open(netdev); 793 ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
774 return -ENOMEM; 794 kfree(temp_tx_ring);
795 err = -ENOMEM;
796 goto err_setup;
775 } 797 }
776 798
777 if (new_rx_count != adapter->rx_ring->count) { 799 if (new_rx_count != adapter->rx_ring_count) {
800 memcpy(temp_rx_ring, adapter->rx_ring,
801 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
778 for (i = 0; i < adapter->num_rx_queues; i++) { 802 for (i = 0; i < adapter->num_rx_queues; i++) {
779 temp_ring[i].count = new_rx_count; 803 temp_rx_ring[i].count = new_rx_count;
780 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 804 err = ixgbe_setup_rx_resources(adapter,
805 &temp_rx_ring[i]);
781 if (err) { 806 if (err) {
782 while (i) { 807 while (i) {
783 i--; 808 i--;
784 ixgbe_free_rx_resources(adapter, 809 ixgbe_free_rx_resources(adapter,
785 &temp_ring[i]); 810 &temp_rx_ring[i]);
786 } 811 }
787 goto err_setup; 812 goto err_setup;
788 } 813 }
789 temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; 814 temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
790 } 815 }
816 need_update = true;
817 }
818
819 /* if rings need to be updated, here's the place to do it in one shot */
820 if (need_update) {
791 if (netif_running(netdev)) 821 if (netif_running(netdev))
792 netdev->netdev_ops->ndo_stop(netdev); 822 ixgbe_down(adapter);
793 ixgbe_reset_interrupt_capability(adapter); 823
794 ixgbe_napi_del_all(adapter); 824 /* tx */
795 INIT_LIST_HEAD(&netdev->napi_list); 825 if (new_tx_count != adapter->tx_ring_count) {
796 kfree(adapter->rx_ring); 826 kfree(adapter->tx_ring);
797 adapter->rx_ring = temp_ring; 827 adapter->tx_ring = temp_tx_ring;
798 temp_ring = NULL; 828 temp_tx_ring = NULL;
799 829 adapter->tx_ring_count = new_tx_count;
800 adapter->rx_ring_count = new_rx_count; 830 }
831
832 /* rx */
833 if (new_rx_count != adapter->rx_ring_count) {
834 kfree(adapter->rx_ring);
835 adapter->rx_ring = temp_rx_ring;
836 temp_rx_ring = NULL;
837 adapter->rx_ring_count = new_rx_count;
838 }
801 } 839 }
802 840
803 /* success! */ 841 /* success! */
804 err = 0; 842 err = 0;
805err_setup:
806 ixgbe_init_interrupt_scheme(adapter);
807 if (netif_running(netdev)) 843 if (netif_running(netdev))
808 netdev->netdev_ops->ndo_open(netdev); 844 ixgbe_up(adapter);
809 845
846err_setup:
810 clear_bit(__IXGBE_RESETTING, &adapter->state); 847 clear_bit(__IXGBE_RESETTING, &adapter->state);
811 return err; 848 return err;
812} 849}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 79aa811c403c..286ecc0e6ab7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
187 struct ixgbe_tx_buffer 187 struct ixgbe_tx_buffer
188 *tx_buffer_info) 188 *tx_buffer_info)
189{ 189{
190 if (tx_buffer_info->dma) { 190 tx_buffer_info->dma = 0;
191 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
192 tx_buffer_info->length, PCI_DMA_TODEVICE);
193 tx_buffer_info->dma = 0;
194 }
195 if (tx_buffer_info->skb) { 191 if (tx_buffer_info->skb) {
192 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
193 DMA_TO_DEVICE);
196 dev_kfree_skb_any(tx_buffer_info->skb); 194 dev_kfree_skb_any(tx_buffer_info->skb);
197 tx_buffer_info->skb = NULL; 195 tx_buffer_info->skb = NULL;
198 } 196 }
197 tx_buffer_info->time_stamp = 0;
199 /* tx_buffer_info must be completely set up in the transmit path */ 198 /* tx_buffer_info must be completely set up in the transmit path */
200} 199}
201 200
@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
204 unsigned int eop) 203 unsigned int eop)
205{ 204{
206 struct ixgbe_hw *hw = &adapter->hw; 205 struct ixgbe_hw *hw = &adapter->hw;
207 u32 head, tail;
208 206
209 /* Detect a transmit hang in hardware, this serializes the 207 /* Detect a transmit hang in hardware, this serializes the
210 * check with the clearing of time_stamp and movement of eop */ 208 * check with the clearing of time_stamp and movement of eop */
211 head = IXGBE_READ_REG(hw, tx_ring->head);
212 tail = IXGBE_READ_REG(hw, tx_ring->tail);
213 adapter->detect_tx_hung = false; 209 adapter->detect_tx_hung = false;
214 if ((head != tail) && 210 if (tx_ring->tx_buffer_info[eop].time_stamp &&
215 tx_ring->tx_buffer_info[eop].time_stamp &&
216 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 211 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
217 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 212 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
218 /* detected Tx unit hang */ 213 /* detected Tx unit hang */
@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
227 " time_stamp <%lx>\n" 222 " time_stamp <%lx>\n"
228 " jiffies <%lx>\n", 223 " jiffies <%lx>\n",
229 tx_ring->queue_index, 224 tx_ring->queue_index,
230 head, tail, 225 IXGBE_READ_REG(hw, tx_ring->head),
226 IXGBE_READ_REG(hw, tx_ring->tail),
231 tx_ring->next_to_use, eop, 227 tx_ring->next_to_use, eop,
232 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 228 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
233 return true; 229 return true;
@@ -2934,6 +2930,7 @@ err_tx_ring_allocation:
2934 **/ 2930 **/
2935static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 2931static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2936{ 2932{
2933 struct ixgbe_hw *hw = &adapter->hw;
2937 int err = 0; 2934 int err = 0;
2938 int vector, v_budget; 2935 int vector, v_budget;
2939 2936
@@ -2948,12 +2945,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2948 2945
2949 /* 2946 /*
2950 * At the same time, hardware can only support a maximum of 2947 * At the same time, hardware can only support a maximum of
2951 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, 2948 * hw.mac->max_msix_vectors vectors. With features
2952 * we can easily reach upwards of 64 Rx descriptor queues and 2949 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
2953 * 32 Tx queues. Thus, we cap it off in those rare cases where 2950 * descriptor queues supported by our device. Thus, we cap it off in
2954 * the cpu count also exceeds our vector limit. 2951 * those rare cases where the cpu count also exceeds our vector limit.
2955 */ 2952 */
2956 v_budget = min(v_budget, MAX_MSIX_COUNT); 2953 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
2957 2954
2958 /* A failure in MSI-X entry allocation isn't fatal, but it does 2955 /* A failure in MSI-X entry allocation isn't fatal, but it does
2959 * mean we disable MSI-X capabilities of the adapter. */ 2956 * mean we disable MSI-X capabilities of the adapter. */
@@ -3169,11 +3166,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3169#endif 3166#endif
3170 3167
3171 /* default flow control settings */ 3168 /* default flow control settings */
3172 hw->fc.requested_mode = ixgbe_fc_none; 3169 hw->fc.requested_mode = ixgbe_fc_full;
3170 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
3173 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 3171 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3174 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 3172 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3175 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 3173 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3176 hw->fc.send_xon = true; 3174 hw->fc.send_xon = true;
3175 hw->fc.disable_fc_autoneg = false;
3177 3176
3178 /* enable itr by default in dynamic mode */ 3177 /* enable itr by default in dynamic mode */
3179 adapter->itr_setting = 1; 3178 adapter->itr_setting = 1;
@@ -3489,10 +3488,10 @@ err_up:
3489 ixgbe_release_hw_control(adapter); 3488 ixgbe_release_hw_control(adapter);
3490 ixgbe_free_irq(adapter); 3489 ixgbe_free_irq(adapter);
3491err_req_irq: 3490err_req_irq:
3492 ixgbe_free_all_rx_resources(adapter);
3493err_setup_rx: 3491err_setup_rx:
3494 ixgbe_free_all_tx_resources(adapter); 3492 ixgbe_free_all_rx_resources(adapter);
3495err_setup_tx: 3493err_setup_tx:
3494 ixgbe_free_all_tx_resources(adapter);
3496 ixgbe_reset(adapter); 3495 ixgbe_reset(adapter);
3497 3496
3498 return err; 3497 return err;
@@ -4163,32 +4162,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4163 struct sk_buff *skb, unsigned int first) 4162 struct sk_buff *skb, unsigned int first)
4164{ 4163{
4165 struct ixgbe_tx_buffer *tx_buffer_info; 4164 struct ixgbe_tx_buffer *tx_buffer_info;
4166 unsigned int len = skb->len; 4165 unsigned int len = skb_headlen(skb);
4167 unsigned int offset = 0, size, count = 0, i; 4166 unsigned int offset = 0, size, count = 0, i;
4168 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 4167 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4169 unsigned int f; 4168 unsigned int f;
4170 4169 dma_addr_t *map;
4171 len -= skb->data_len;
4172 4170
4173 i = tx_ring->next_to_use; 4171 i = tx_ring->next_to_use;
4174 4172
4173 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4174 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4175 return 0;
4176 }
4177
4178 map = skb_shinfo(skb)->dma_maps;
4179
4175 while (len) { 4180 while (len) {
4176 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4181 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4177 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4182 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4178 4183
4179 tx_buffer_info->length = size; 4184 tx_buffer_info->length = size;
4180 tx_buffer_info->dma = pci_map_single(adapter->pdev, 4185 tx_buffer_info->dma = map[0] + offset;
4181 skb->data + offset,
4182 size, PCI_DMA_TODEVICE);
4183 tx_buffer_info->time_stamp = jiffies; 4186 tx_buffer_info->time_stamp = jiffies;
4184 tx_buffer_info->next_to_watch = i; 4187 tx_buffer_info->next_to_watch = i;
4185 4188
4186 len -= size; 4189 len -= size;
4187 offset += size; 4190 offset += size;
4188 count++; 4191 count++;
4189 i++; 4192
4190 if (i == tx_ring->count) 4193 if (len) {
4191 i = 0; 4194 i++;
4195 if (i == tx_ring->count)
4196 i = 0;
4197 }
4192 } 4198 }
4193 4199
4194 for (f = 0; f < nr_frags; f++) { 4200 for (f = 0; f < nr_frags; f++) {
@@ -4196,33 +4202,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4196 4202
4197 frag = &skb_shinfo(skb)->frags[f]; 4203 frag = &skb_shinfo(skb)->frags[f];
4198 len = frag->size; 4204 len = frag->size;
4199 offset = frag->page_offset; 4205 offset = 0;
4200 4206
4201 while (len) { 4207 while (len) {
4208 i++;
4209 if (i == tx_ring->count)
4210 i = 0;
4211
4202 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4212 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4203 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4213 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4204 4214
4205 tx_buffer_info->length = size; 4215 tx_buffer_info->length = size;
4206 tx_buffer_info->dma = pci_map_page(adapter->pdev, 4216 tx_buffer_info->dma = map[f + 1] + offset;
4207 frag->page,
4208 offset,
4209 size,
4210 PCI_DMA_TODEVICE);
4211 tx_buffer_info->time_stamp = jiffies; 4217 tx_buffer_info->time_stamp = jiffies;
4212 tx_buffer_info->next_to_watch = i; 4218 tx_buffer_info->next_to_watch = i;
4213 4219
4214 len -= size; 4220 len -= size;
4215 offset += size; 4221 offset += size;
4216 count++; 4222 count++;
4217 i++;
4218 if (i == tx_ring->count)
4219 i = 0;
4220 } 4223 }
4221 } 4224 }
4222 if (i == 0) 4225
4223 i = tx_ring->count - 1;
4224 else
4225 i = i - 1;
4226 tx_ring->tx_buffer_info[i].skb = skb; 4226 tx_ring->tx_buffer_info[i].skb = skb;
4227 tx_ring->tx_buffer_info[first].next_to_watch = i; 4227 tx_ring->tx_buffer_info[first].next_to_watch = i;
4228 4228
@@ -4388,13 +4388,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4388 (skb->ip_summed == CHECKSUM_PARTIAL)) 4388 (skb->ip_summed == CHECKSUM_PARTIAL))
4389 tx_flags |= IXGBE_TX_FLAGS_CSUM; 4389 tx_flags |= IXGBE_TX_FLAGS_CSUM;
4390 4390
4391 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 4391 count = ixgbe_tx_map(adapter, tx_ring, skb, first);
4392 ixgbe_tx_map(adapter, tx_ring, skb, first),
4393 skb->len, hdr_len);
4394 4392
4395 netdev->trans_start = jiffies; 4393 if (count) {
4394 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
4395 hdr_len);
4396 netdev->trans_start = jiffies;
4397 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
4396 4398
4397 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 4399 } else {
4400 dev_kfree_skb_any(skb);
4401 tx_ring->tx_buffer_info[first].time_stamp = 0;
4402 tx_ring->next_to_use = first;
4403 }
4398 4404
4399 return NETDEV_TX_OK; 4405 return NETDEV_TX_OK;
4400} 4406}
@@ -4987,8 +4993,20 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4987 4993
4988 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4994 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4989} 4995}
4996
4990#endif /* CONFIG_IXGBE_DCA */ 4997#endif /* CONFIG_IXGBE_DCA */
4998#ifdef DEBUG
4999/**
5000 * ixgbe_get_hw_dev_name - return device name string
5001 * used by hardware layer to print debugging information
5002 **/
5003char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
5004{
5005 struct ixgbe_adapter *adapter = hw->back;
5006 return adapter->netdev->name;
5007}
4991 5008
5009#endif
4992module_exit(ixgbe_exit_module); 5010module_exit(ixgbe_exit_module);
4993 5011
4994/* ixgbe_main.c */ 5012/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2b2ecba7b609..030ff0a9ea67 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2005,6 +2005,7 @@ struct ixgbe_fc_info {
2005 u16 pause_time; /* Flow Control Pause timer */ 2005 u16 pause_time; /* Flow Control Pause timer */
2006 bool send_xon; /* Flow control send XON */ 2006 bool send_xon; /* Flow control send XON */
2007 bool strict_ieee; /* Strict IEEE mode */ 2007 bool strict_ieee; /* Strict IEEE mode */
2008 bool disable_fc_autoneg; /* Turn off autoneg FC mode */
2008 enum ixgbe_fc_mode current_mode; /* FC mode in effect */ 2009 enum ixgbe_fc_mode current_mode; /* FC mode in effect */
2009 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ 2010 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
2010}; 2011};
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 9f6644a44030..303c23de6cac 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -505,7 +505,7 @@ out:
505 505
506static void mlx4_en_do_get_stats(struct work_struct *work) 506static void mlx4_en_do_get_stats(struct work_struct *work)
507{ 507{
508 struct delayed_work *delay = container_of(work, struct delayed_work, work); 508 struct delayed_work *delay = to_delayed_work(work);
509 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 509 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
510 stats_task); 510 stats_task);
511 struct mlx4_en_dev *mdev = priv->mdev; 511 struct mlx4_en_dev *mdev = priv->mdev;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index a4130e764991..7e40741fb7d8 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -298,7 +298,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
298 298
299void mlx4_en_rx_refill(struct work_struct *work) 299void mlx4_en_rx_refill(struct work_struct *work)
300{ 300{
301 struct delayed_work *delay = container_of(work, struct delayed_work, work); 301 struct delayed_work *delay = to_delayed_work(work);
302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
303 refill_task); 303 refill_task);
304 struct mlx4_en_dev *mdev = priv->mdev; 304 struct mlx4_en_dev *mdev = priv->mdev;
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 6d5089ecb5af..f36ae691cab3 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -103,7 +103,7 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
103 103
104static void mlx4_sense_port(struct work_struct *work) 104static void mlx4_sense_port(struct work_struct *work)
105{ 105{
106 struct delayed_work *delay = container_of(work, struct delayed_work, work); 106 struct delayed_work *delay = to_delayed_work(work);
107 struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, 107 struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
108 sense_poll); 108 sense_poll);
109 struct mlx4_dev *dev = sense->dev; 109 struct mlx4_dev *dev = sense->dev;
diff --git a/drivers/net/pcmcia/ositech.h b/drivers/net/pcmcia/ositech.h
deleted file mode 100644
index 4126efc355bd..000000000000
--- a/drivers/net/pcmcia/ositech.h
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 This file contains the firmware of Seven of Diamonds from OSITECH.
3 (Special thanks to Kevin MacPherson of OSITECH)
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License, incorporated herein by reference.
7*/
8
9 static const u_char __Xilinx7OD[] = {
10 0xFF, 0x04, 0xA0, 0x36, 0xF3, 0xEC, 0xFF, 0xFF, 0xFF, 0xDF, 0xFB, 0xFF,
11 0xF3, 0xFF, 0xFF, 0xFF,
12 0xEF, 0x3F, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x7F, 0xFE, 0xFF,
13 0xCE, 0xFE, 0xFE, 0xFE,
14 0xFE, 0xDE, 0xBD, 0xDD, 0xFD, 0xFF, 0xFD, 0xCF, 0xF7, 0xBF, 0x7F, 0xFF,
15 0x7F, 0x3F, 0xFE, 0xBF,
16 0xFF, 0xFF, 0xFF, 0xBC, 0xFF, 0xFF, 0xBD, 0xB5, 0x7F, 0x7F, 0xBF, 0xBF,
17 0x7F, 0xFF, 0xEF, 0xFF,
18 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDE,
19 0xFE, 0xFE, 0xFA, 0xDE,
20 0xBD, 0xFD, 0xED, 0xFD, 0xFD, 0xCF, 0xEF, 0xEF, 0xEF, 0xEF, 0xC7, 0xDF,
21 0xDF, 0xDF, 0xDF, 0xDF,
22 0xFF, 0x7E, 0xFE, 0xFD, 0x7D, 0x6D, 0xEE, 0xFE, 0x7C, 0xFB, 0xF4, 0xFB,
23 0xCF, 0xDB, 0xDF, 0xFF,
24 0xFF, 0xBB, 0x7F, 0xFF, 0x7F, 0xFF, 0xF7, 0xFF, 0x9E, 0xBF, 0x3B, 0xBF,
25 0xBF, 0x7F, 0x7F, 0x7F,
26 0x7E, 0x6F, 0xDF, 0xEF, 0xF5, 0xF6, 0xFD, 0xF6, 0xF5, 0xED, 0xEB, 0xFF,
27 0xEF, 0xEF, 0xEF, 0x7E,
28 0x7F, 0x7F, 0x6F, 0x7F, 0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xEF, 0xBF,
29 0xFF, 0xFF, 0xFF, 0xFF,
30 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0x1F, 0x1F, 0xEE, 0xFF, 0xBC,
31 0xB7, 0xFF, 0xDF, 0xFF,
32 0xDF, 0xEF, 0x3B, 0xE3, 0xD3, 0xFF, 0xFB, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF,
33 0xFF, 0xBA, 0xBF, 0x2D,
34 0xDB, 0xBD, 0xFD, 0xDB, 0xDF, 0xFA, 0xFB, 0xFF, 0xEF, 0xFB, 0xDB, 0xF3,
35 0xFF, 0xDF, 0xFD, 0x7F,
36 0xEF, 0xFB, 0xFF, 0xFF, 0xBE, 0xBF, 0x27, 0xBA, 0xFE, 0xFB, 0xDF, 0xFF,
37 0xF6, 0xFF, 0xFF, 0xEF,
38 0xFB, 0xDB, 0xF3, 0xD9, 0x9A, 0x3F, 0xFF, 0xAF, 0xBF, 0xFF, 0xFF, 0xBE,
39 0x3F, 0x37, 0xBD, 0x96,
40 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, 0xFB, 0xF3, 0xF3, 0xEB, 0xFF,
41 0xFF, 0xFF, 0xFF, 0xFF,
42 0xFF, 0xF7, 0xFA, 0xBC, 0xAE, 0xFE, 0xBE, 0xFE, 0xBB, 0x7F, 0xFD, 0xFF,
43 0x7F, 0xEF, 0xF7, 0xFB,
44 0xBB, 0xD7, 0xF7, 0x7F, 0xFF, 0xF7, 0xFF, 0xFF, 0xF7, 0xBC, 0xED, 0xFD,
45 0xBD, 0x9D, 0x7D, 0x7B,
46 0xFB, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFE, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF,
47 0xFF, 0xFF, 0xFF, 0xF7,
48 0xAA, 0xB9, 0xBF, 0x8F, 0xBF, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0x7F, 0xCF,
49 0xFB, 0xEB, 0xCB, 0xEB,
50 0xEE, 0xFF, 0xFF, 0xD7, 0xFF, 0xFF, 0xFF, 0x3E, 0x33, 0x3F, 0x1C, 0x7C,
51 0xFC, 0xFF, 0xFF, 0xFF,
52 0xFF, 0xFF, 0xCF, 0xD3, 0xF3, 0xE3, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF,
53 0xFF, 0xEB, 0xFE, 0x35,
54 0x3F, 0x3D, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xEF, 0x6F, 0xE3,
55 0xE3, 0xE3, 0xEF, 0xFF,
56 0xFF, 0xDF, 0xFF, 0xFF, 0xF7, 0xFE, 0x3E, 0x5E, 0xFE, 0xFF, 0xFF, 0xFF,
57 0xFF, 0xFD, 0xFF, 0xFF,
58 0xAF, 0xCF, 0xF2, 0xCB, 0xCF, 0x8E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD,
59 0xFC, 0x3E, 0x1F, 0x9E,
60 0xAD, 0xFD, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xEF, 0xFF, 0xB3, 0xF7, 0xE7,
61 0xF7, 0xFA, 0xFF, 0xFF,
62 0xFF, 0xFF, 0xFF, 0xEE, 0xEB, 0xAB, 0xAF, 0x9F, 0xE3, 0x7F, 0xFF, 0xDE,
63 0xFF, 0x7F, 0xEE, 0xFF,
64 0xFF, 0xFB, 0x3A, 0xFA, 0xFF, 0xF2, 0x77, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF,
65 0xFE, 0xBD, 0xAE, 0xDE,
66 0x7D, 0x7D, 0xFD, 0xFF, 0xBF, 0xEE, 0xFF, 0xFD, 0xFF, 0xDB, 0xFB, 0xFF,
67 0xF7, 0xEF, 0xFB, 0xFF,
68 0xFF, 0xFE, 0xFF, 0x2D, 0xAF, 0xB9, 0xFD, 0x79, 0xFB, 0xFA, 0xFF, 0xBF,
69 0xEF, 0xFF, 0xFF, 0x91,
70 0xFA, 0xFB, 0xDF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFC, 0xCF, 0x37, 0xBF,
71 0xBF, 0xFF, 0x7F, 0x7F,
72 0xFF, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0xF3, 0xFB, 0xFB, 0xFF, 0xF5, 0xEF,
73 0xFF, 0xFF, 0xF7, 0xFA,
74 0xFF, 0xFF, 0xEE, 0xFA, 0xFE, 0xFB, 0x55, 0xDD, 0xFF, 0x7F, 0xAF, 0xFE,
75 0xFF, 0xFB, 0xFB, 0xF5,
76 0xFF, 0xF7, 0xEF, 0xFF, 0xFF, 0xFF, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D,
77 0x7B, 0x7B, 0x7B, 0x7B,
78 0xFB, 0xAE, 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
79 0xF7, 0xDA, 0xB7, 0x61,
80 0xFF, 0xB9, 0x59, 0xF3, 0x73, 0xF3, 0xDF, 0x7F, 0x6F, 0xDF, 0xEF, 0xF7,
81 0xEB, 0xEB, 0xD7, 0xFF,
82 0xD7, 0xFF, 0xFF, 0xF7, 0xFE, 0x7F, 0xFB, 0x3E, 0x38, 0x73, 0xF6, 0x7F,
83 0xFC, 0xFF, 0xFF, 0xCF,
84 0xFF, 0xB7, 0xFB, 0xB3, 0xB3, 0x67, 0xFF, 0xE7, 0xFD, 0xFF, 0xEF, 0xF6,
85 0x7F, 0xB7, 0xBC, 0xF5,
86 0x7B, 0xF6, 0xF7, 0xF5, 0xFF, 0xFF, 0xEF, 0xFF, 0xF7, 0xFF, 0xF7, 0xCE,
87 0xE7, 0xFF, 0x9F, 0xFF,
88 0xFF, 0xF5, 0xFE, 0x7D, 0xFF, 0x5F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
89 0xFF, 0xEF, 0xFF, 0xF6,
90 0xCB, 0xDB, 0xEE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0xBE,
91 0x1E, 0x3E, 0xFE, 0xFF,
92 0x7D, 0xFE, 0xFF, 0xFF, 0xEF, 0xBF, 0xE7, 0xFF, 0xE3, 0xE3, 0xFF, 0xDF,
93 0xE7, 0xFF, 0xFF, 0xFF,
94 0xB8, 0xEF, 0xB7, 0x2F, 0xEE, 0xFF, 0xDF, 0xFF, 0xBF, 0xFF, 0x7F, 0xEF,
95 0xEB, 0xBF, 0xA3, 0xD3,
96 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xFD, 0x3F, 0xCF, 0xFD,
97 0xFB, 0xFF, 0xFF, 0xFF,
98 0xFF, 0xFF, 0xAF, 0xFB, 0xBF, 0xBB, 0xBF, 0xDB, 0xFD, 0xFB, 0xFF, 0xFF,
99 0xFF, 0xFF, 0x3E, 0xFE,
100 0x3F, 0xBA, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xEF, 0xC3, 0x7F,
101 0xB2, 0x9B, 0xFF, 0xFF,
102 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0x3C, 0xFF, 0x3F, 0x3C, 0xFF, 0xFE, 0xFF,
103 0xFF, 0xFF, 0xFF, 0xFF,
104 0xAF, 0xF3, 0xFE, 0xF3, 0xE3, 0xEB, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xF7,
105 0x9A, 0xFE, 0xAF, 0x9E,
106 0xBE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0x7B, 0xEF, 0xF7, 0xBF, 0xFB, 0xFB,
107 0xFB, 0xFF, 0xFF, 0x7F,
108 0xFF, 0xFF, 0xFF, 0xBC, 0xBD, 0xFD, 0xBD, 0xDD, 0x7D, 0x7B, 0x7B, 0x7B,
109 0x7B, 0xFB, 0xAE, 0xFF,
110 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xF7, 0x9A, 0xFF,
111 0x9F, 0xFF, 0xAF, 0xEF,
112 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xCF, 0xF3, 0xFF, 0xEB, 0xFF, 0xEB, 0xFF,
113 0xFF, 0xBF, 0xFF, 0xFF,
114 0xEF, 0xFE, 0xFF, 0x37, 0xFC, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
115 0xCF, 0xEF, 0xFD, 0xF3,
116 0xFF, 0xEE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0xFD, 0x2F, 0xFD,
117 0xFF, 0xFD, 0xFF, 0xFF,
118 0xFF, 0xFF, 0xFF, 0xEF, 0xCF, 0xFF, 0xF3, 0xBF, 0x69, 0xFF, 0xFF, 0xFF,
119 0xFF, 0xFF, 0xFF, 0xFE,
120 0xFB, 0x9F, 0xFF, 0xBF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x87,
121 0xFE, 0xDA, 0xEF, 0xCF,
122 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xBF, 0xEF, 0xEF, 0xFD,
123 0xFF, 0xFF, 0xFF, 0xFF,
124 0xFF, 0xEF, 0xFD, 0xFF, 0x7B, 0xFF, 0xEB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
125 0xEB, 0xF8, 0xFF, 0xEF,
126 0xAF, 0xFF, 0xFF, 0xBD, 0xFF, 0xFF, 0xFF, 0x7F, 0xEE, 0x7F, 0xEF, 0xFF,
127 0xBB, 0xFF, 0xBF, 0xFB,
128 0xFF, 0xFF, 0xFF, 0xF7, 0xF6, 0xFB, 0xBD, 0xFD, 0xDD, 0xF5, 0xFF, 0xFF,
129 0xFF, 0xFF, 0xFF, 0xAF,
130 0xFF, 0x5F, 0xF5, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF6,
131 0xF3, 0xFF, 0xDE, 0xFE,
132 0xEF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xDE, 0xDF, 0x5F, 0xDF,
133 0xFD, 0xFF, 0xFF, 0xFF,
134 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF,
135 0xFF, 0xAF, 0xFF, 0xFF,
136 0xEF, 0xED, 0xFF, 0xDF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xDA, 0xBD, 0xBE,
137 0xAE, 0xFE, 0x7F, 0xFD,
138 0xDF, 0xFF, 0xFF, 0x7F, 0xEF, 0xFF, 0xFB, 0xFB, 0xFB, 0x7F, 0xF7, 0xFF,
139 0xFF, 0xFF, 0xFF, 0xF7,
140 0xBC, 0xFD, 0xBD, 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE,
141 0xFF, 0xFF, 0xFD, 0xFF,
142 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x9F, 0xBF, 0xBF, 0xCF,
143 0x7F, 0xFF, 0xFF, 0xFF,
144 0xFF, 0xFF, 0xAF, 0xFF, 0xEB, 0xEB, 0xEB, 0xFF, 0xD7, 0xFE, 0xFF, 0xFF,
145 0xBF, 0xE7, 0xFE, 0xBF,
146 0x7F, 0xFC, 0xFF, 0xFF, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0xFF, 0xFB,
147 0xFB, 0xFF, 0xFF, 0xDD,
148 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBD, 0xDF, 0x9D, 0xFD, 0xDF, 0xB9,
149 0xFF, 0xFF, 0xFF, 0xFF,
150 0xEF, 0xFF, 0xFB, 0xEF, 0xEB, 0xFF, 0xDE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
151 0xF6, 0x9F, 0xFF, 0xFC,
152 0xFE, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xDF, 0xFA, 0xCD, 0xCF,
153 0xBF, 0x9F, 0xFF, 0xFF,
154 0xFF, 0xFF, 0xF7, 0xFE, 0xBF, 0xFF, 0xDF, 0xEF, 0x5F, 0xFF, 0xFF, 0xFF,
155 0xFF, 0x7F, 0x6F, 0xFF,
156 0xBB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7E, 0xFF,
157 0x5F, 0xFF, 0xBF, 0xBF,
158 0xF9, 0xFF, 0xFF, 0xFF, 0x7F, 0x6E, 0x7B, 0xFF, 0xEF, 0xFD, 0xEB, 0xDF,
159 0xFF, 0xFF, 0xFF, 0xFF,
160 0xF7, 0xB6, 0x3E, 0xFC, 0xFD, 0xBF, 0x7E, 0xFB, 0xFF, 0xFF, 0xFF, 0xF7,
161 0xEF, 0xF7, 0xF3, 0xF7,
162 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0x35, 0x79, 0xFF,
163 0xBF, 0xFC, 0xFF, 0xFF,
164 0xFF, 0xFF, 0xFF, 0xEF, 0xFB, 0x53, 0xDF, 0xFF, 0xEB, 0xBF, 0xFF, 0xFF,
165 0xFF, 0xFF, 0xFF, 0xBC,
166 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xF5,
167 0xFF, 0xF7, 0xFF, 0xFB,
168 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xAA, 0xEE, 0xFE, 0x3F, 0x7D,
169 0xFD, 0xFF, 0xFF, 0xFF,
170 0x7F, 0xAF, 0x77, 0xFB, 0xFB, 0xFF, 0xFB, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
171 0xF7, 0xBE, 0xBD, 0xBD,
172 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, 0xFF, 0xEF, 0xFF,
173 0xFF, 0xFF, 0xFF, 0xFC,
174 0xFF, 0xFF, 0xFF, 0xFF, 0x9A, 0xD9, 0xB8, 0xFF, 0xFF, 0x79, 0xFF, 0xFF,
175 0xFF, 0xFF, 0xFF, 0xCF,
176 0xFB, 0xFF, 0xEB, 0xFF, 0xEB, 0xD7, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xDE,
177 0xF8, 0xFB, 0xFE, 0x3F,
178 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xAD, 0xBF, 0xFA, 0xFF, 0x73,
179 0xDF, 0xFF, 0xFF, 0xFF,
180 0xFF, 0xFF, 0x3A, 0xF5, 0xB7, 0xFC, 0x3F, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF,
181 0x7F, 0xEF, 0xF3, 0xFF,
182 0xBF, 0xFE, 0xF3, 0x9F, 0xFE, 0xFF, 0xFF, 0xFF, 0xF7, 0x3E, 0xFF, 0xFF,
183 0xFF, 0xBF, 0xFF, 0xFF,
184 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xD3, 0xFE, 0xDB, 0xFF, 0xDB, 0xDF, 0xFF,
185 0xFF, 0xFF, 0xFF, 0xFF,
186 0x3E, 0xFF, 0xBF, 0xFF, 0x7F, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F,
187 0xF3, 0xFF, 0xED, 0xFF,
188 0xF7, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF6, 0x3C, 0xFE, 0xFF, 0xFF,
189 0xFF, 0xFF, 0xFF, 0xFF,
190 0xFF, 0x9F, 0xEF, 0xEF, 0xD1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
191 0xFF, 0xFF, 0x7E, 0xBF,
192 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBB, 0xEF, 0xDF, 0xF1,
193 0xFF, 0xFF, 0xFF, 0xFF,
194 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x3E, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
195 0xFF, 0xFF, 0xFF, 0xBF,
196 0xEF, 0xFD, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
197 0xFC, 0x3E, 0xFE, 0xFF,
198 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2E, 0xEF, 0xF3, 0xFF, 0xFF,
199 0xFF, 0xFF, 0xFF, 0xFF,
200 0xFF, 0xFF, 0xF7, 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
201 0xFF, 0x7F, 0xAF, 0xFB,
202 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xF2, 0xD6, 0xED,
203 0xBD, 0xBD, 0xBD, 0x7D,
204 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
205 0xFF, 0xFF, 0xFF, 0xFF,
206 0xFF, 0x92, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
207 0xAF, 0xEB, 0xEB, 0xFF,
208 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFE, 0x2E, 0xFE, 0xFF,
209 0xFF, 0xFF, 0xFF, 0xFF,
210 0xFF, 0xFF, 0xFF, 0x4F, 0xEF, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
211 0xFF, 0xFF, 0xFF, 0xFE,
212 0x3C, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xCE,
213 0xC3, 0xFD, 0xFF, 0xFF,
214 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x5D, 0xFF, 0xFF, 0xFF, 0xFF,
215 0xFF, 0xFF, 0xFF, 0xFF,
216 0xFF, 0xEF, 0xCF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
217 0xF7, 0xEE, 0x3E, 0xFF,
218 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xDF, 0xE2, 0xFF,
219 0xFF, 0xFF, 0xFB, 0xFF,
220 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, 0xBE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
221 0xFF, 0xFF, 0x7F, 0xEE,
222 0x5F, 0xE6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E,
223 0x7D, 0xFF, 0xFF, 0xFF,
224 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF,
225 0xFF, 0xFF, 0xFF, 0xFF,
226 0xBF, 0xF7, 0x36, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
227 0xFF, 0xEF, 0xD3, 0xF6,
228 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xEE,
229 0xFF, 0xFF, 0xFF, 0xFF,
230 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xEF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
231 0xFF, 0xFF, 0xFF, 0xFF,
232 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE,
233 0xFB, 0xFA, 0xFF, 0xFF,
234 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xD6, 0xFD, 0xBD, 0xBD, 0xBD,
235 0x7D, 0x7B, 0x7B, 0x7B,
236 0x7B, 0xFB, 0xAE, 0xFF, 0x7E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
237 0xFF, 0xF7, 0xBA, 0xBF,
238 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xEB, 0x6B,
239 0xFF, 0xFF, 0xFF, 0xFF,
240 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
241 0xFF, 0xFF, 0xFF, 0xFF,
242 0x4F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
243 0x3E, 0x6E, 0xFC, 0xFF,
244 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xC3, 0xC9, 0xFF, 0xFF,
245 0xFF, 0xFF, 0xFF, 0xFF,
246 0xFF, 0xFF, 0xFF, 0x3E, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
247 0xFF, 0xFF, 0xEF, 0xFB,
248 0xD5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFE,
249 0xFE, 0xFF, 0xFF, 0xFF,
250 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFB,
251 0xFF, 0xFF, 0xFF, 0xFF,
252 0xFF, 0xF6, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE,
253 0xEF, 0xFF, 0xFF, 0xFF,
254 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFE, 0xFF, 0xF7, 0xFF,
255 0xFF, 0xFF, 0xFF, 0xFF,
256 0xFF, 0x7F, 0xFA, 0xEF, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
257 0xFF, 0xE7, 0xFF, 0xFE,
258 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, 0xEF, 0xBF,
259 0xFF, 0xFF, 0xFF, 0xFF,
260 0xFF, 0xFF, 0xFF, 0xFF, 0xA7, 0xFF, 0xFC, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
261 0xFF, 0xFF, 0xFF, 0x7F,
262 0xFE, 0xAE, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7,
263 0xF7, 0xFA, 0xFF, 0xFD,
264 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAF, 0xFF, 0xFF, 0xFF,
265 0xFF, 0xFF, 0xFF, 0xFF,
266 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B,
267 0x7B, 0x7B, 0xFB, 0xAF,
268 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA,
269 0xFF, 0xFF, 0xFF, 0xFF,
270 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x6F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
271 0xFF, 0xFF, 0xFF, 0xFF,
272 0xFF, 0xE7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
273 0xFF, 0xCF, 0xFE, 0xFF,
274 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDF, 0xFF,
275 0xFF, 0xFF, 0xFF, 0xFF,
276 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
277 0xFF, 0xFF, 0xFF, 0xFF,
278 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
279 0xFF, 0xFF, 0xFF, 0xFF,
280 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
281 0xFF, 0xFF, 0xFF, 0xFF,
282 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF,
283 0xFF, 0xE7, 0xF2, 0xFC,
284 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAE, 0xEF, 0xFF,
285 0xFF, 0xFF, 0xFF, 0xFF,
286 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7E, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
287 0xFF, 0xFF, 0xFF, 0xFF,
288 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
289 0xFE, 0xFE, 0xFF, 0xFF,
290 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xEF, 0xDD, 0xFE, 0xFF, 0xFF,
291 0xFF, 0xFF, 0xFF, 0xFF,
292 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
293 0xFF, 0xFF, 0xAF, 0xEF,
294 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xFE,
295 0xFF, 0xFF, 0xFF, 0xFF,
296 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
297 0xFF, 0xFF, 0xFF, 0xFF,
298 0xF6, 0x9C, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB,
299 0xAE, 0xFF, 0xFF, 0xFF,
300 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7A, 0xFF, 0xFF, 0xFF,
301 0xFF, 0xDF, 0xFF, 0xFF,
302 0xFF, 0xFF, 0x6F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF,
303 0xFF, 0xFF, 0xF7, 0xFE,
304 0xFE, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xEB,
305 0xFF, 0xFF, 0xFF, 0xFF,
306 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9E, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF,
307 0xFF, 0xFF, 0xFF, 0xFF,
308 0xFF, 0xEF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
309 0xFF, 0xFE, 0xFF, 0xFF,
310 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xCB, 0xFF, 0xFF,
311 0xFF, 0xFF, 0xFF, 0xFD,
312 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
313 0xFF, 0xFF, 0xFF, 0xEF,
314 0xEF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
315 0xFF, 0xFF, 0xFF, 0xFF,
316 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
317 0xFF, 0xFF, 0xFF, 0xFF,
318 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
319 0xFB, 0xAF, 0x7F, 0xFF,
320 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xFF,
321 0xFF, 0xFF, 0xFF, 0xFF,
322 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
323 0xFF, 0xFF, 0xBF, 0xFF,
324 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE,
325 0xFF, 0xFF, 0xFF, 0xFF,
326 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF,
327 0xFF, 0xFF, 0xFF, 0xFF,
328 0xFF, 0x7F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
329 0xFF, 0xF7, 0xBC, 0xBD,
330 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFF,
331 0xFF, 0xFF, 0xFF, 0xFF,
332 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
333 0xFF, 0xFF, 0xFF, 0x7F,
334 0xAF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
335 0xFE, 0xFF, 0xFF, 0xFF,
336 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF,
337 0xFF, 0xFF, 0xFF, 0xFF,
338 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF,
339 0xFF, 0xFF, 0xEF, 0xFF,
340 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
341 0xFF, 0xFF, 0xFF, 0xFF,
342 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
343 0xBF, 0xFF, 0xFF, 0xFF,
344 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
345 0xEF, 0xFF, 0xFF, 0xFF,
346 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFE, 0xFF, 0x9F, 0x9F,
347 0x9F, 0x3F, 0x3F, 0x3F,
348 0x3F, 0x3F, 0xFF, 0xEF, 0xDF, 0xDF, 0xDF, 0xDF, 0xCF, 0xB7, 0xBF, 0xBF,
349 0xBF, 0xBF, 0xFF, 0xBC,
350 0xB9, 0x9D, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xEF, 0xD7,
351 0xF5, 0xF3, 0xF1, 0xD1,
352 0x65, 0xE3, 0xE3, 0xE3, 0xA3, 0xFF, 0xFE, 0x7F, 0xFE, 0xDE, 0xDE, 0xFF,
353 0xBD, 0xBD, 0xBD, 0xBD,
354 0xDF, 0xEF, 0xFB, 0xF7, 0xF3, 0xF3, 0xF3, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
355 0xFB, 0xFE, 0xFF, 0xFF,
356 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
357
358 };
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 774232c13b31..48dbb35747d8 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -42,6 +42,7 @@
42#include <linux/ethtool.h> 42#include <linux/ethtool.h>
43#include <linux/mii.h> 43#include <linux/mii.h>
44#include <linux/jiffies.h> 44#include <linux/jiffies.h>
45#include <linux/firmware.h>
45 46
46#include <pcmcia/cs_types.h> 47#include <pcmcia/cs_types.h>
47#include <pcmcia/cs.h> 48#include <pcmcia/cs.h>
@@ -55,17 +56,18 @@
55#include <asm/system.h> 56#include <asm/system.h>
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
57 58
58/* Ositech Seven of Diamonds firmware */
59#include "ositech.h"
60
61/*====================================================================*/ 59/*====================================================================*/
62 60
63static const char *if_names[] = { "auto", "10baseT", "10base2"}; 61static const char *if_names[] = { "auto", "10baseT", "10base2"};
64 62
63/* Firmware name */
64#define FIRMWARE_NAME "ositech/Xilinx7OD.bin"
65
65/* Module parameters */ 66/* Module parameters */
66 67
67MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver"); 68MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver");
68MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
70MODULE_FIRMWARE(FIRMWARE_NAME);
69 71
70#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) 72#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
71 73
@@ -771,6 +773,26 @@ static int osi_config(struct pcmcia_device *link)
771 return i; 773 return i;
772} 774}
773 775
776static int osi_load_firmware(struct pcmcia_device *link)
777{
778 const struct firmware *fw;
779 int i, err;
780
781 err = request_firmware(&fw, FIRMWARE_NAME, &link->dev);
782 if (err) {
783 pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME);
784 return err;
785 }
786
787 /* Download the Seven of Diamonds firmware */
788 for (i = 0; i < fw->size; i++) {
789 outb(fw->data[i], link->io.BasePort1 + 2);
790 udelay(50);
791 }
792 release_firmware(fw);
793 return err;
794}
795
774static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) 796static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
775{ 797{
776 struct net_device *dev = link->priv; 798 struct net_device *dev = link->priv;
@@ -811,11 +833,9 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
811 (cardid == PRODID_OSITECH_SEVEN)) || 833 (cardid == PRODID_OSITECH_SEVEN)) ||
812 ((manfid == MANFID_PSION) && 834 ((manfid == MANFID_PSION) &&
813 (cardid == PRODID_PSION_NET100))) { 835 (cardid == PRODID_PSION_NET100))) {
814 /* Download the Seven of Diamonds firmware */ 836 rc = osi_load_firmware(link);
815 for (i = 0; i < sizeof(__Xilinx7OD); i++) { 837 if (rc)
816 outb(__Xilinx7OD[i], link->io.BasePort1+2); 838 goto free_cfg_mem;
817 udelay(50);
818 }
819 } else if (manfid == MANFID_OSITECH) { 839 } else if (manfid == MANFID_OSITECH) {
820 /* Make sure both functions are powered up */ 840 /* Make sure both functions are powered up */
821 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); 841 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
@@ -862,10 +882,10 @@ static int smc91c92_resume(struct pcmcia_device *link)
862 (smc->cardid == PRODID_OSITECH_SEVEN)) || 882 (smc->cardid == PRODID_OSITECH_SEVEN)) ||
863 ((smc->manfid == MANFID_PSION) && 883 ((smc->manfid == MANFID_PSION) &&
864 (smc->cardid == PRODID_PSION_NET100))) { 884 (smc->cardid == PRODID_PSION_NET100))) {
865 /* Download the Seven of Diamonds firmware */ 885 i = osi_load_firmware(link);
866 for (i = 0; i < sizeof(__Xilinx7OD); i++) { 886 if (i) {
867 outb(__Xilinx7OD[i], link->io.BasePort1+2); 887 pr_err("smc91c92_cs: Failed to load firmware\n");
868 udelay(50); 888 return i;
869 } 889 }
870 } 890 }
871 if (link->open) { 891 if (link->open) {
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 58b73b08dde0..3ff1f425f1bb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -757,8 +757,7 @@ EXPORT_SYMBOL(phy_start);
757 */ 757 */
758static void phy_state_machine(struct work_struct *work) 758static void phy_state_machine(struct work_struct *work)
759{ 759{
760 struct delayed_work *dwork = 760 struct delayed_work *dwork = to_delayed_work(work);
761 container_of(work, struct delayed_work, work);
762 struct phy_device *phydev = 761 struct phy_device *phydev =
763 container_of(dwork, struct phy_device, state_queue); 762 container_of(dwork, struct phy_device, state_queue);
764 int needs_aneg = 0; 763 int needs_aneg = 0;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index a50078627fb6..913b2a5fafc9 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -33,7 +33,6 @@
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35 35
36#include <linux/version.h>
37 36
38#include "qlge.h" 37#include "qlge.h"
39 38
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 06c535222666..e1a638a05f86 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2075 if (!tp->pcie_cap && netif_msg_probe(tp)) 2075 if (!tp->pcie_cap && netif_msg_probe(tp))
2076 dev_info(&pdev->dev, "no PCI Express capability\n"); 2076 dev_info(&pdev->dev, "no PCI Express capability\n");
2077 2077
2078 /* Unneeded ? Don't mess with Mrs. Murphy. */ 2078 RTL_W16(IntrMask, 0x0000);
2079 rtl8169_irq_mask_and_ack(ioaddr);
2080 2079
2081 /* Soft reset the chip. */ 2080 /* Soft reset the chip. */
2082 RTL_W8(ChipCmd, CmdReset); 2081 RTL_W8(ChipCmd, CmdReset);
@@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2088 msleep_interruptible(1); 2087 msleep_interruptible(1);
2089 } 2088 }
2090 2089
2090 RTL_W16(IntrStatus, 0xffff);
2091
2091 /* Identify chip attached to board */ 2092 /* Identify chip attached to board */
2092 rtl8169_get_mac_version(tp, ioaddr); 2093 rtl8169_get_mac_version(tp, ioaddr);
2093 2094
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 00c23b1babca..dee23b159df2 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -448,6 +448,9 @@ static void efx_init_channels(struct efx_nic *efx)
448 448
449 WARN_ON(channel->rx_pkt != NULL); 449 WARN_ON(channel->rx_pkt != NULL);
450 efx_rx_strategy(channel); 450 efx_rx_strategy(channel);
451
452 netif_napi_add(channel->napi_dev, &channel->napi_str,
453 efx_poll, napi_weight);
451 } 454 }
452} 455}
453 456
@@ -462,10 +465,6 @@ static void efx_start_channel(struct efx_channel *channel)
462 465
463 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); 466 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
464 467
465 if (!(channel->efx->net_dev->flags & IFF_UP))
466 netif_napi_add(channel->napi_dev, &channel->napi_str,
467 efx_poll, napi_weight);
468
469 /* The interrupt handler for this channel may set work_pending 468 /* The interrupt handler for this channel may set work_pending
470 * as soon as we enable it. Make sure it's cleared before 469 * as soon as we enable it. Make sure it's cleared before
471 * then. Similarly, make sure it sees the enabled flag set. */ 470 * then. Similarly, make sure it sees the enabled flag set. */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index b52a1c088f37..d91e95b237b7 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1908,7 +1908,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1908 do { 1908 do {
1909 tc_writel(status, &tr->Int_Src); /* write to clear */ 1909 tc_writel(status, &tr->Int_Src); /* write to clear */
1910 1910
1911 handled = tc35815_do_interrupt(dev, status, limit); 1911 handled = tc35815_do_interrupt(dev, status, budget - received);
1912 if (handled >= 0) { 1912 if (handled >= 0) {
1913 received += handled; 1913 received += handled;
1914 if (received >= budget) 1914 if (received >= budget)
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 4a65fc2dd928..534c0f38483c 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -62,6 +62,7 @@
62#include <linux/pci.h> 62#include <linux/pci.h>
63#include <linux/spinlock.h> 63#include <linux/spinlock.h>
64#include <linux/bitops.h> 64#include <linux/bitops.h>
65#include <linux/firmware.h>
65 66
66#include <net/checksum.h> 67#include <net/checksum.h>
67 68
@@ -73,8 +74,10 @@
73static char version[] __devinitdata = 74static char version[] __devinitdata =
74"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ; 75"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ;
75 76
77#define FW_NAME "3com/3C359.bin"
76MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 78MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
77MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; 79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
80MODULE_FIRMWARE(FW_NAME);
78 81
79/* Module paramters */ 82/* Module paramters */
80 83
@@ -114,8 +117,6 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
114 * will be stuck with 1555 lines of hex #'s in the code. 117 * will be stuck with 1555 lines of hex #'s in the code.
115 */ 118 */
116 119
117#include "3c359_microcode.h"
118
119static struct pci_device_id xl_pci_tbl[] = 120static struct pci_device_id xl_pci_tbl[] =
120{ 121{
121 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, 122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
@@ -364,10 +365,30 @@ static int __devinit xl_probe(struct pci_dev *pdev,
364 return 0; 365 return 0;
365} 366}
366 367
368static int xl_init_firmware(struct xl_private *xl_priv)
369{
370 int err;
371
372 err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
373 if (err) {
374 printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME);
375 return err;
376 }
377
378 if (xl_priv->fw->size < 16) {
379 printk(KERN_ERR "Bogus length %zu in \"%s\"\n",
380 xl_priv->fw->size, FW_NAME);
381 release_firmware(xl_priv->fw);
382 err = -EINVAL;
383 }
384
385 return err;
386}
367 387
368static int __devinit xl_init(struct net_device *dev) 388static int __devinit xl_init(struct net_device *dev)
369{ 389{
370 struct xl_private *xl_priv = netdev_priv(dev); 390 struct xl_private *xl_priv = netdev_priv(dev);
391 int err;
371 392
372 printk(KERN_INFO "%s \n", version); 393 printk(KERN_INFO "%s \n", version);
373 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", 394 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
@@ -375,8 +396,11 @@ static int __devinit xl_init(struct net_device *dev)
375 396
376 spin_lock_init(&xl_priv->xl_lock) ; 397 spin_lock_init(&xl_priv->xl_lock) ;
377 398
378 return xl_hw_reset(dev) ; 399 err = xl_init_firmware(xl_priv);
400 if (err == 0)
401 err = xl_hw_reset(dev);
379 402
403 return err;
380} 404}
381 405
382 406
@@ -386,7 +410,7 @@ static int __devinit xl_init(struct net_device *dev)
386 */ 410 */
387 411
388static int xl_hw_reset(struct net_device *dev) 412static int xl_hw_reset(struct net_device *dev)
389{ 413{
390 struct xl_private *xl_priv = netdev_priv(dev); 414 struct xl_private *xl_priv = netdev_priv(dev);
391 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 415 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
392 unsigned long t ; 416 unsigned long t ;
@@ -396,6 +420,9 @@ static int xl_hw_reset(struct net_device *dev)
396 u16 start ; 420 u16 start ;
397 int j ; 421 int j ;
398 422
423 if (xl_priv->fw == NULL)
424 return -EINVAL;
425
399 /* 426 /*
400 * Reset the card. If the card has got the microcode on board, we have 427 * Reset the card. If the card has got the microcode on board, we have
401 * missed the initialization interrupt, so we must always do this. 428 * missed the initialization interrupt, so we must always do this.
@@ -458,25 +485,30 @@ static int xl_hw_reset(struct net_device *dev)
458 485
459 /* 486 /*
460 * Now to write the microcode into the shared ram 487 * Now to write the microcode into the shared ram
461 * The microcode must finish at position 0xFFFF, so we must subtract 488 * The microcode must finish at position 0xFFFF,
462 * to get the start position for the code 489 * so we must subtract to get the start position for the code
490 *
491 * Looks strange but ensures compiler only uses
492 * 16 bit unsigned int
463 */ 493 */
494 start = (0xFFFF - (xl_priv->fw->size) + 1) ;
464 495
465 start = (0xFFFF - (mc_size) + 1 ) ; /* Looks strange but ensures compiler only uses 16 bit unsigned int for this */
466
467 printk(KERN_INFO "3C359: Uploading Microcode: "); 496 printk(KERN_INFO "3C359: Uploading Microcode: ");
468 497
469 for (i = start, j = 0; j < mc_size; i++, j++) { 498 for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
470 writel(MEM_BYTE_WRITE | 0XD0000 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 499 writel(MEM_BYTE_WRITE | 0XD0000 | i,
471 writeb(microcode[j],xl_mmio + MMIO_MACDATA) ; 500 xl_mmio + MMIO_MAC_ACCESS_CMD);
501 writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
472 if (j % 1024 == 0) 502 if (j % 1024 == 0)
473 printk("."); 503 printk(".");
474 } 504 }
475 printk("\n") ; 505 printk("\n") ;
476 506
477 for (i=0;i < 16; i++) { 507 for (i = 0; i < 16; i++) {
478 writel( (MEM_BYTE_WRITE | 0xDFFF0) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 508 writel((MEM_BYTE_WRITE | 0xDFFF0) + i,
479 writeb(microcode[mc_size - 16 + i], xl_mmio + MMIO_MACDATA) ; 509 xl_mmio + MMIO_MAC_ACCESS_CMD);
510 writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
511 xl_mmio + MMIO_MACDATA);
480 } 512 }
481 513
482 /* 514 /*
@@ -1782,6 +1814,7 @@ static void __devexit xl_remove_one (struct pci_dev *pdev)
1782 struct net_device *dev = pci_get_drvdata(pdev); 1814 struct net_device *dev = pci_get_drvdata(pdev);
1783 struct xl_private *xl_priv=netdev_priv(dev); 1815 struct xl_private *xl_priv=netdev_priv(dev);
1784 1816
1817 release_firmware(xl_priv->fw);
1785 unregister_netdev(dev); 1818 unregister_netdev(dev);
1786 iounmap(xl_priv->xl_mmio) ; 1819 iounmap(xl_priv->xl_mmio) ;
1787 pci_release_regions(pdev) ; 1820 pci_release_regions(pdev) ;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
index 66b1ff603234..bcb1a6b4a4c7 100644
--- a/drivers/net/tokenring/3c359.h
+++ b/drivers/net/tokenring/3c359.h
@@ -284,5 +284,8 @@ struct xl_private {
284 u8 xl_laa[6] ; 284 u8 xl_laa[6] ;
285 u32 rx_ring_dma_addr ; 285 u32 rx_ring_dma_addr ;
286 u32 tx_ring_dma_addr ; 286 u32 tx_ring_dma_addr ;
287
288 /* firmware section */
289 const struct firmware *fw;
287}; 290};
288 291
diff --git a/drivers/net/tokenring/3c359_microcode.h b/drivers/net/tokenring/3c359_microcode.h
deleted file mode 100644
index 0400c029c077..000000000000
--- a/drivers/net/tokenring/3c359_microcode.h
+++ /dev/null
@@ -1,1581 +0,0 @@
1
2/*
3 * The firmware this driver downloads into the tokenring card is a
4 * separate program and is not GPL'd source code, even though the Linux
5 * side driver and the routine that loads this data into the card are.
6 *
7 * This firmware is licensed to you strictly for use in conjunction
8 * with the use of 3Com 3C359 TokenRing adapters. There is no
9 * waranty expressed or implied about its fitness for any purpose.
10 */
11
12/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode.
13 *
14 * Notes:
15 * - Loaded from xl_init upon adapter initialization.
16 *
17 * Available from 3Com as part of their standard 3C359 driver.
18 *
19 * mc_size *must* must match the microcode being used, each version is a
20 * different length.
21 */
22
23static int mc_size = 24880 ;
24
25static const u8 microcode[] = {
26 0xfe,0x3a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
27,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
28,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
29,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
30,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x33,0x2f,0x30,0x32,0x2f,0x39,0x39,0x20,0x31
31,0x37,0x3a,0x31,0x33,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
32,0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x41,0x42,0x43,0x44,0x45,0x46
33,0x00,0x00,0x07,0xff,0x02,0x00,0xfe,0x9f,0x06,0x00,0x00,0x7c,0x48,0x00,0x00,0x70
34,0x82,0x00,0xff,0xff,0x86,0x00,0xff,0xff,0x88,0x00,0xff,0xff,0x9a,0x00,0xff,0xff
35,0xff,0xff,0x11,0x00,0xc0,0x00,0xff,0xff,0xff,0xff,0x11,0x22,0x33,0x44,0x55,0x66
36,0x33,0x43,0x4f,0x4d,0x20,0x42,0x41,0x42,0x45,0x11,0x40,0xc0,0x00,0xff,0xff,0xff
37,0xff,0x11,0x22,0x33,0x44,0x55,0x66,0x53,0x74,0x61,0x72,0x74,0x20,0x6f,0x66,0x20
38,0x4c,0x4c,0x43,0x20,0x66,0x72,0x61,0x6d,0x65,0x2e,0x20,0x20,0x54,0x6f,0x74,0x61
39,0x6c,0x20,0x64,0x61,0x74,0x61,0x20,0x73,0x69,0x7a,0x65,0x20,0x69,0x73,0x20,0x78
40,0x78,0x78,0x20,0x20,0x20,0x42,0x41,0x42,0x45,0xe8,0xd2,0x01,0x83,0x3e,0xf7,0x34
41,0x00,0x75,0x21,0xe8,0x41,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75,0x17,0xe8,0x82,0x00
42,0x83,0x3e,0xf7,0x34,0x00,0x75,0x0d,0xe8,0xbf,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75
43,0x03,0xe8,0x41,0x02,0xc3,0x1e,0xb8,0x00,0xf0,0x8e,0xd8,0x33,0xf6,0xb9,0x00,0x80
44,0x33,0xdb,0xad,0x03,0xd8,0xe2,0xfb,0x1f,0xb8,0x00,0x00,0x83,0xfb,0x00,0x74,0x03
45,0xb8,0x22,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x56,0x00,0xb0,0xff,0xee,0x33,0xc0
46,0x8e,0xc0,0x33,0xf6,0xb9,0xff,0x7f,0x83,0x3e,0xff,0x34,0x00,0x74,0x08,0x8d,0x3e
47,0x30,0x61,0xd1,0xef,0x2b,0xcf,0x26,0x8b,0x1c,0x26,0xc7,0x04,0xff,0xff,0x26,0x83
48,0x3c,0xff,0x75,0x17,0x26,0xc7,0x04,0x00,0x00,0x26,0x83,0x3c,0x00,0x75,0x0c,0x26
49,0x89,0x1c,0x46,0x46,0xe2,0xe0,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x24,0x00,0xa3,0xf7
50,0x34,0xc3,0xfa,0xb4,0xd7,0x9e,0x73,0x3a,0x75,0x38,0x79,0x36,0x7b,0x34,0x9f,0xb1
51,0x05,0xd2,0xec,0x73,0x2d,0xb0,0x40,0xd0,0xe0,0x71,0x27,0x79,0x25,0xd0,0xe0,0x73
52,0x21,0x7b,0x1f,0x32,0xc0,0x75,0x1b,0x32,0xe4,0x9e,0x72,0x16,0x74,0x14,0x78,0x12
53,0x7a,0x10,0x9f,0xd2,0xec,0x72,0x0b,0xd0,0xe4,0x70,0x07,0x75,0x05,0xb8,0x00,0x00
54,0xeb,0x03,0xb8,0x26,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x5a,0x00,0x33,0xc0,0xef
55,0xef,0xef,0xef,0xb0,0x00,0xe6,0x56,0xb0,0x00,0xe6,0x54,0xba,0x52,0x00,0xb8,0x01
56,0x01,0xef,0xe8,0xca,0x00,0x3c,0x01,0x75,0x7f,0xe8,0x83,0x00,0xba,0x52,0x00,0xb8
57,0x02,0x02,0xef,0xe8,0xb9,0x00,0x3c,0x02,0x75,0x6e,0xe8,0x7a,0x00,0xba,0x52,0x00
58,0xb8,0x04,0x04,0xef,0xe8,0xa8,0x00,0x3c,0x04,0x75,0x5d,0xe8,0x71,0x00,0xba,0x52
59,0x00,0xb8,0x08,0x08,0xef,0xe8,0x97,0x00,0x3c,0x08,0x75,0x4c,0xe8,0x68,0x00,0xba
60,0x52,0x00,0xb8,0x10,0x10,0xef,0xe8,0x86,0x00,0x3c,0x10,0x75,0x3b,0xe8,0x5f,0x00
61,0xba,0x52,0x00,0xb8,0x20,0x20,0xef,0xe8,0x75,0x00,0x3c,0x20,0x75,0x2a,0xe8,0x56
62,0x00,0xba,0x52,0x00,0xb8,0x40,0x40,0xef,0xe8,0x64,0x00,0x3c,0x40,0x75,0x19,0xe8
63,0x4d,0x00,0xba,0x52,0x00,0xb8,0x80,0x80,0xef,0xe8,0x53,0x00,0x3c,0x80,0x75,0x08
64,0xe8,0x44,0x00,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x28,0x00,0xa3,0xf7,0x34,0xc3,0xba
65,0x5a,0x00,0xb8,0x00,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x01,0x80,0xef,0xc3,0xba
66,0x5a,0x00,0xb8,0x02,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x03,0x80,0xef,0xc3,0xba
67,0x5a,0x00,0xb8,0x04,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x05,0x80,0xef,0xc3,0xba
68,0x5a,0x00,0xb8,0x06,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x07,0x80,0xef,0xc3,0xb9
69,0xff,0xff,0xe4,0x58,0xe4,0x54,0x3c,0x00,0x75,0x03,0x49,0x75,0xf7,0xc3,0xfa,0x32
70,0xc0,0xe6,0x56,0xe4,0x56,0x3c,0x00,0x74,0x03,0xe9,0x82,0x00,0xb0,0xff,0xe6,0x56
71,0xe4,0x56,0x3c,0xff,0x75,0x78,0xba,0x52,0x00,0xb8,0xff,0xff,0xef,0xed,0x3c,0xff
72,0x75,0x6c,0xb8,0x00,0xff,0xef,0xed,0x3c,0x00,0x75,0x63,0xb0,0xff,0xe6,0x54,0xe4
73,0x54,0x3c,0xff,0x75,0x59,0x32,0xc0,0xe6,0x54,0xe4,0x54,0x3c,0x00,0x75,0x4f,0xb0
74,0x0f,0xe6,0x50,0xe4,0x50,0x24,0x0f,0x3c,0x0f,0x75,0x43,0xb0,0x00,0xe6,0x50,0xe4
75,0x50,0x24,0x0f,0x3c,0x00,0x75,0x37,0x8c,0xc8,0x8e,0xc0,0xbe,0x70,0x00,0x26,0x8b
76,0x14,0x26,0x8b,0x5c,0x02,0xb8,0x00,0x00,0xef,0xed,0x23,0xc3,0x3d,0x00,0x00,0x75
77,0x1d,0xb8,0xff,0xff,0x23,0xc3,0xef,0x8b,0xc8,0xed,0x23,0xc3,0x3b,0xc1,0x75,0x0e
78,0x83,0xc6,0x04,0x26,0x83,0x3c,0xff,0x75,0xd5,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x2a
79,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0x33,0xc0,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xab
80,0xbf,0x00,0x30,0xb9,0x17,0x00,0xf3,0xab,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xab
81,0xbf,0x00,0x32,0xb9,0x40,0x00,0xf3,0xab,0xfc,0x1e,0x8c,0xc8,0x8e,0xd8,0x33,0xc0
82,0x8e,0xc0,0xbe,0x92,0x00,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xa4,0xbe,0xa9,0x00
83,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0xfb,0x34,0x64,0x00,0xba
84,0x08,0x00,0xb8,0x0f,0x00,0xef,0xe8,0x82,0x01,0xe8,0x9b,0x01,0x72,0x0d,0xc7,0x06
85,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34,0x04,0x00,0xc3,0xba,0x0a,0x00,0x33,0xc0
86,0xef,0xe8,0x98,0x01,0xe8,0xb5,0x01,0xb8,0x17,0x00,0xba,0x9c,0x00,0xef,0xb8,0x00
87,0x10,0xba,0x9a,0x00,0xef,0xb8,0x17,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x8c
88,0x00,0xef,0xb8,0x00,0x18,0xba,0x86,0x00,0xef,0xb8,0x0c,0x00,0xba,0x82,0x00,0xef
89,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x02,0x00,0xef,0xba,0x06,0x00,0x33,0xc0
90,0xef,0xba,0x04,0x00,0xb8,0x60,0x00,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba
91,0x80,0x00,0xb9,0xff,0xff,0xed,0xa9,0x01,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x3e,0xba
92,0x0a,0x00,0xed,0xa9,0x00,0x40,0x74,0x35,0xa9,0x00,0x20,0x74,0x30,0x33,0xc0,0xef
93,0x51,0xb9,0xc8,0x00,0xe2,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x30,0x83
94,0xf9,0x17,0x75,0x18,0x49,0x49,0xbe,0x02,0x20,0xbf,0x06,0x30,0xf3,0xa6,0x1f,0x23
95,0xc9,0x75,0x0a,0xff,0x0e,0xfb,0x34,0x74,0x12,0xe9,0x4d,0xff,0x1f,0xb8,0x2c,0x00
96,0xbb,0x00,0x00,0xa3,0xf7,0x34,0x89,0x1e,0xf9,0x34,0xc3,0xc7,0x06,0xfb,0x34,0x64
97,0x00,0xe8,0xd3,0x00,0x72,0x0d,0xc7,0x06,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34
98,0x04,0x00,0xc3,0xe8,0xd6,0x00,0xe8,0xf3,0x00,0xb8,0x03,0x00,0xba,0x82,0x00,0xef
99,0xb8,0x40,0x80,0xba,0x98,0x00,0xef,0xb8,0x00,0x11,0xba,0x96,0x00,0xef,0xb8,0x40
100,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x92,0x00,0xef,0xb8,0x00,0x19,0xba,0x8e
101,0x00,0xef,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x06,0x00,0xef,0xba,0x06,0x00
102,0x33,0xc0,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba,0x80,0x00,0xb9,0xff,0xff
103,0xed,0xa9,0x20,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x43,0xba,0x0a,0x00,0xed,0xa9,0x00
104,0x40,0x74,0x3a,0xa9,0x00,0x20,0x74,0x35,0x33,0xc0,0xef,0x51,0xb9,0xc8,0x00,0xe2
105,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x32,0x83,0xf9,0x40,0x75,0x1d,0x49
106,0x49,0xbe,0x02,0x22,0xbf,0x06,0x32,0xf3,0xa6,0x1f,0x23,0xc9,0x75,0x0f,0xff,0x0e
107,0xfb,0x34,0x74,0x03,0xe9,0x5a,0xff,0xb8,0x00,0x00,0xeb,0x0b,0x1f,0xb8,0x2c,0x00
108,0xbb,0x02,0x00,0x89,0x1e,0xf9,0x34,0xa3,0xf7,0x34,0xc3,0xba,0x02,0x00,0xb8,0x00
109,0x9c,0xef,0xba,0x00,0x00,0xb8,0x00,0x84,0xef,0x33,0xc0,0xef,0xba,0x0a,0x00,0xef
110,0xba,0x0e,0x00,0x33,0xc0,0xef,0xc3,0xba,0x0a,0x00,0xb9,0xff,0xff,0xed,0x25,0x00
111,0x60,0x3d,0x00,0x60,0x74,0x04,0xe2,0xf5,0xf8,0xc3,0xf9,0xc3,0xb0,0x00,0xe6,0x56
112,0xb8,0x00,0xff,0xba,0x52,0x00,0xef,0xb9,0xff,0xff,0xba,0x58,0x00,0xed,0x25,0xef
113,0x00,0x74,0x08,0xba,0x5a,0x00,0x33,0xc0,0xef,0xe2,0xef,0xc3,0xba,0x80,0x00,0xed
114,0xba,0x84,0x00,0xef,0xba,0x80,0x00,0xed,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00
115,0xc6,0x06,0xec,0x34,0x15,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0x1e,0x8c,0xc8,0xbe,0x40
116,0x54,0xbf,0x60,0xfe,0x8e,0xd8,0xb9,0x10,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0x80,0x36
117,0x10,0x35,0xc7,0x06,0x8c,0x36,0x30,0x35,0x8d,0x06,0x38,0x35,0xa3,0x30,0x35,0xa3
118,0x32,0x35,0x05,0x33,0x01,0xa3,0x34,0x35,0xc7,0x06,0x36,0x35,0x50,0x01,0xc7,0x06
119,0x84,0x36,0x80,0xfe,0xc7,0x06,0x88,0x36,0xc0,0xfe,0xc6,0x06,0xc2,0xfe,0xff,0xc6
120,0x06,0x93,0x36,0x80,0xc6,0x06,0x92,0x36,0x00,0xc6,0x06,0x80,0xfe,0x80,0xc7,0x06
121,0x82,0xfe,0x54,0x50,0xc7,0x06,0x84,0xfe,0x2b,0x4d,0xe5,0xce,0xa9,0x02,0x00,0x75
122,0x08,0xc6,0x06,0x81,0xfe,0x23,0xe9,0x05,0x00,0xc6,0x06,0x81,0xfe,0x22,0xa1,0xf7
123,0x34,0xa3,0x86,0xfe,0xb8,0x48,0x34,0x86,0xe0,0xa3,0x88,0xfe,0x8d,0x06,0x4e,0x34
124,0x86,0xe0,0xa3,0x8a,0xfe,0xb8,0x58,0x34,0x86,0xe0,0xa3,0x8c,0xfe,0xb8,0x9c,0x34
125,0x86,0xe0,0xa3,0x8e,0xfe,0x8d,0x06,0x20,0x03,0x86,0xe0,0xa3,0x90,0xfe,0x33,0xc0
126,0xba,0x72,0x00,0xef,0x33,0xc0,0xba,0x74,0x00,0xef,0xba,0x76,0x00,0xef,0xb8,0x80
127,0xfe,0x86,0xe0,0xba,0x72,0x00,0xef,0xe8,0xbf,0x07,0xba,0x0c,0x01,0xb8,0x40,0x40
128,0xef,0xed,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0xb9
129,0x0a,0x00,0xe8,0x94,0x00,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0xef,0xa1
130,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33,0xc1
131,0xe8,0x04,0xcd,0x39,0xc7,0x06,0x90,0x36,0xff,0xff,0xe9,0xe3,0x00,0x63,0x0d,0x66
132,0x0d,0x66,0x0d,0x8a,0x0d,0xe6,0x0e,0x75,0x12,0x2e,0x0f,0x03,0x0f,0x50,0x0f,0x60
133,0x0d,0x60,0x0d,0x60,0x0d,0xed,0x0f,0xe9,0x12,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
134,0x0d,0x60,0x0d,0x22,0x10,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xfe,0x10,0x60
135,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xaf,0x0f,0x32,0x10,0x37
136,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
137,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60
138,0x0d,0x64,0x0e,0x00,0x0f,0x95,0x09,0x60,0x0a,0x49,0xbb,0xff,0xff,0xba,0x6a,0x00
139,0xed,0xa9,0x00,0x20,0x74,0x38,0x80,0x3e,0x80,0xfe,0x12,0x75,0x31,0xe8,0x4a,0x00
140,0xa1,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33
141,0xc1,0xe8,0x04,0xcd,0x39,0xe8,0x22,0x00,0xc7,0x06,0xf3,0x34,0x46,0x00,0xc7,0x06
142,0xf5,0x34,0xff,0xff,0xc7,0x06,0x90,0x36,0xff,0xff,0x58,0xe9,0x32,0x00,0x4b,0x83
143,0xfb,0x00,0x75,0xb9,0x83,0xf9,0x00,0x75,0xb0,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03
144,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0x5a,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03
145,0x00,0xc1,0xe0,0x08,0xef,0x5a,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
146,0x68,0x80,0x07,0xa1,0x90,0x36,0xcd,0x35,0x8b,0x36,0x24,0x02,0x2e,0xff,0xa4,0x35
147,0x0a,0xfa,0x8a,0x26,0x94,0x36,0x88,0x26,0xe8,0x34,0xc6,0x06,0x94,0x36,0x00,0xfb
148,0x22,0xe4,0x75,0x01,0xc3,0xf6,0xc4,0x20,0x74,0x7d,0xf6,0xc4,0x08,0x74,0x05,0x80
149,0x0e,0x92,0x36,0x04,0x80,0x26,0xe8,0x34,0xd7,0xc4,0x1e,0x84,0x36,0x26,0x8b,0x37
150,0x81,0xe6,0xff,0x00,0x83,0xfe,0x20,0x76,0x05,0xb0,0x01,0xe9,0x28,0x00,0x53,0x06
151,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0x07,0x5b,0x26,0x88,0x47,0x02,0x3c,0xff,0x74
152,0x07,0x3c,0xfe,0x75,0x11,0xe9,0x3b,0x00,0xf6,0x06,0x92,0x36,0x08,0x75,0x34,0xf6
153,0x06,0x92,0x36,0x04,0x74,0x2d,0x80,0x26,0x92,0x36,0xf3,0x80,0x3e,0x95,0x36,0x00
154,0x75,0x21,0x26,0x80,0x3f,0x05,0x75,0x13,0xc6,0x06,0x95,0x36,0x00,0x26,0x80,0x7f
155,0x06,0x00,0x74,0x07,0x26,0x8b,0x47,0x04,0xa2,0x95,0x36,0xba,0x0c,0x01,0xb8,0x40
156,0x40,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x10,0x75,0x03,0xe9,0x5b,0x00,0xf6
157,0xc4,0x04,0x74,0x05,0x80,0x0e,0x92,0x36,0x01,0x80,0x26,0xe8,0x34,0xeb,0xc4,0x3e
158,0x88,0x36,0x26,0x8b,0x35,0x83,0xe6,0x7f,0x83,0xfe,0x12,0x72,0x08,0x26,0xc6,0x45
159,0x02,0x01,0xe9,0x24,0x00,0x83,0xc6,0x20,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0xc4
160,0x3e,0x88,0x36,0x26,0x88,0x45,0x02,0x3c,0xff,0x75,0x0e,0xf6,0x06,0x92,0x36,0x01
161,0x74,0x14,0xf6,0x06,0x92,0x36,0x02,0x75,0x0d,0x80,0x26,0x92,0x36,0xfc,0xba,0x0c
162,0x01,0xb8,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x08,0x74,0x22,0x80
163,0x26,0xe8,0x34,0xf7,0x80,0x0e,0x92,0x36,0x04,0xf6,0x06,0x92,0x36,0x08,0x74,0x11
164,0x80,0x26,0x92,0x36,0xf3,0xba,0x0c,0x01,0xb8,0x40,0x40,0xef,0xed,0x8a,0x26,0xe8
165,0x34,0xf6,0xc4,0x04,0x74,0x22,0x80,0x26,0xe8,0x34,0xfb,0x80,0x0e,0x92,0x36,0x01
166,0xf6,0x06,0x92,0x36,0x02,0x75,0x11,0x80,0x26,0x92,0x36,0xfe,0xba,0x0c,0x01,0xb8
167,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x01,0x74,0x67,0x80,0x26,0xe8
168,0x34,0xfe,0x80,0x3e,0xe8,0xff,0x00,0x74,0x39,0x80,0x3e,0xe8,0xff,0x04,0x74,0x32
169,0x80,0x3e,0xe8,0xff,0x01,0x75,0x21,0xe5,0x80,0xa9,0x00,0x07,0x74,0x0a,0xba,0x9e
170,0x00,0xb8,0x00,0x02,0xef,0xe9,0xef,0xff,0xc6,0x06,0xe8,0xff,0x03,0xba,0x0c,0x01
171,0xb8,0x08,0x08,0xef,0xed,0xe9,0x28,0x00,0x80,0x3e,0xe8,0xff,0x03,0x74,0x06,0xe9
172,0x1e,0x00,0xe9,0x00,0x00,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0xe5,0x00,0x0d
173,0x18,0x00,0xe7,0x00,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xc6,0x06,0xe8,0xff,0x04
174,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x02,0x74,0x0d,0x80,0x26,0xe8,0x34,0xfd,0x80,0x26
175,0x92,0x36,0xbf,0xe8,0x4f,0x0b,0xfa,0xa0,0xe8,0x34,0x08,0x06,0x94,0x36,0xc6,0x06
176,0xe8,0x34,0x00,0xfb,0xc3,0xe8,0xe7,0x0f,0xc4,0x1e,0x84,0x36,0x2e,0xff,0x16,0x01
177,0x07,0x26,0x88,0x47,0x02,0xe9,0x7e,0xfe,0xe8,0x2d,0x10,0xc4,0x1e,0x84,0x36,0x2e
178,0xff,0x16,0x03,0x07,0x26,0x88,0x47,0x02,0xe9,0x6b,0xfe,0x8e,0x06,0x26,0x02,0x2e
179,0xff,0x16,0x07,0x07,0xc3,0xc3,0x83,0x3e,0xf5,0x34,0x00,0x74,0x0f,0xff,0x0e,0xf3
180,0x34,0x75,0x09,0xe8,0xc4,0xfd,0xc7,0x06,0xf5,0x34,0x00,0x00,0xf6,0x06,0x93,0x36
181,0x20,0x74,0x30,0xa1,0xc2,0x34,0x3b,0x06,0xe9,0x34,0xa3,0xe9,0x34,0x74,0x24,0x80
182,0x3e,0x95,0x36,0x00,0x75,0x1d,0xf7,0x06,0xe6,0x34,0x20,0x00,0x74,0x12,0xa9,0x20
183,0x00,0x74,0x0d,0x83,0x26,0xc2,0x34,0xdf,0x83,0x26,0xe9,0x34,0xdf,0xe9,0x03,0x00
184,0xe8,0xdd,0x09,0xba,0x06,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e
185,0x03,0x16,0x74,0x34,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06,0x74
186,0x34,0xba,0x02,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e,0x03,0x16
187,0x70,0x34,0xc1,0xe0,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0xc7
188,0x06,0xa6,0x33,0x04,0x00,0xc7,0x06,0xaa,0x33,0x00,0x00,0x8d,0x06,0xa0,0x33,0xc1
189,0xe8,0x04,0xcd,0x39,0xc3,0x95,0x09,0x95,0x09,0x65,0x09,0x78,0x09,0x95,0x09,0x95
190,0x09,0x91,0x07,0x95,0x09,0x96,0x09,0x8b,0x09,0x95,0x09,0x95,0x09,0x95,0x09,0x95
191,0x09,0x95,0x09,0x95,0x09,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x90
192,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9,0xcc,0x00,0x8c,0xc0,0x40,0x8e,0xc0,0x26
193,0x8b,0x0e,0x06,0x00,0x86,0xe9,0x26,0x89,0x0e,0x06,0x00,0x8c,0xc2,0xc1,0xe2,0x04
194,0xbe,0x0e,0x00,0x26,0xa1,0x04,0x00,0xd0,0xe0,0x24,0xc0,0x8a,0xe0,0xc0,0xec,0x04
195,0x0a,0xc4,0x26,0xa2,0x05,0x00,0x26,0xa1,0x08,0x00,0xa9,0x00,0xc0,0x74,0x03,0xe9
196,0x9e,0x00,0x26,0xf6,0x06,0x10,0x00,0x80,0x75,0x03,0xe9,0x0a,0x00,0x26,0xa0,0x16
197,0x00,0x24,0x1f,0x32,0xe4,0x03,0xf0,0x80,0x3e,0xec,0x34,0x06,0x72,0x5c,0x80,0x3e
198,0x95,0x36,0x00,0x75,0x66,0x8b,0xfa,0x33,0xdb,0x8e,0xc3,0x26,0x89,0x1d,0x26,0x88
199,0x5d,0x04,0x51,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x21,0x09
200,0x58,0x59,0x0b,0xdb,0x74,0x34,0xfe,0x0e,0xe6,0x3a,0x26,0xc6,0x07,0x81,0x26,0xc6
201,0x47,0x01,0x00,0x26,0xc6,0x47,0x02,0xff,0x26,0xc7,0x47,0x04,0x00,0x00,0x26,0x89
202,0x4f,0x0a,0x86,0xf2,0x26,0x89,0x57,0x06,0x26,0x89,0x77,0x08,0x26,0xc6,0x47,0x09
203,0x00,0x26,0xc6,0x47,0x0c,0x02,0xe8,0x8c,0x09,0xc3,0xff,0x06,0xec,0x33,0x8c,0xc0
204,0x48,0x8e,0xc0,0xfa,0xe8,0x97,0x10,0xfb,0xe9,0xeb,0xff,0x8c,0xc0,0x48,0x8e,0xc0
205,0xfa,0xe8,0x8a,0x10,0xfb,0xc3,0x8c,0xc0,0x8e,0xc0,0xfa,0xe8,0x80,0x10,0xfb,0xc3
206,0x80,0x3e,0x95,0x36,0x00,0x75,0x03,0xe9,0xc2,0x00,0xbf,0x08,0x00,0x26,0xf6,0x06
207,0x10,0x00,0x80,0x75,0x05,0x03,0xfe,0xe9,0x0c,0x00,0x26,0xa0,0x16,0x00,0x24,0x1f
208,0x32,0xe4,0x03,0xf0,0x03,0xfe,0xa0,0x95,0x36,0x3c,0x00,0x75,0x03,0xe9,0x9c,0x00
209,0x3c,0x01,0x74,0x0b,0x3c,0x02,0x74,0x14,0x3c,0x03,0x74,0x1d,0xe9,0x8d,0x00,0xc6
210,0x06,0x96,0x36,0x01,0xe8,0x3c,0x01,0x72,0x27,0xe9,0x80,0x00,0xc6,0x06,0x96,0x36
211,0x02,0xe8,0x83,0x00,0x72,0x1a,0xe9,0x73,0x00,0xc6,0x06,0x96,0x36,0x01,0xe8,0x22
212,0x01,0x72,0x0d,0xc6,0x06,0x96,0x36,0x02,0xe8,0x6c,0x00,0x72,0x03,0xe9,0x5c,0x00
213,0x53,0x06,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0b,0x00,0x33,0xc0,0xe8,0x42,0x08,0x58
214,0x26,0xc6,0x07,0x82,0x26,0xc6,0x47,0x02,0xff,0x8d,0x06,0xe0,0xfe,0x86,0xc4,0x26
215,0x89,0x47,0x06,0xa0,0x96,0x36,0x26,0x88,0x47,0x08,0xe8,0xc8,0x08,0x07,0x5b,0x83
216,0x26,0xad,0x36,0xfe,0xa1,0xad,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef
217,0xed,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0x52,0xba,0xe0,0x00,0xb8,0x41,0x10
218,0xef,0x5a,0xb8,0x9c,0x03,0xcd,0x39,0xc6,0x06,0x95,0x36,0x00,0x8c,0xc0,0x48,0x8e
219,0xc0,0xfa,0xe8,0xa9,0x0f,0xfb,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x8b
220,0xf0,0x8d,0x3e,0x20,0xf3,0x51,0xb1,0x0a,0x26,0x83,0x7d,0x0c,0x01,0x75,0x2a,0x57
221,0x26,0x83,0x7d,0x0e,0x00,0x74,0x06,0xe8,0x2f,0x00,0xe9,0x03,0x00,0xe8,0x66,0x07
222,0x5f,0x73,0x16,0x33,0xc0,0x8e,0xd8,0x26,0x8b,0x4d,0x12,0x8d,0x75,0x20,0x8d,0x3e
223,0xe0,0xfe,0xf3,0xa4,0x59,0x07,0x1f,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20
224,0x01,0xe9,0xc4,0xff,0x59,0x07,0x1f,0xf8,0xc3,0x51,0x50,0x53,0x56,0x52,0x57,0x33
225,0xdb,0x26,0x8a,0x5d,0x0e,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0x5a,0x87,0xd7,0x26
226,0x8a,0x45,0x14,0x87,0xd7,0x42,0x32,0xff,0x80,0xff,0x08,0x75,0x08,0xfe,0xcb,0x22
227,0xdb,0x75,0xea,0x33,0xdb,0x23,0xdb,0x74,0x06,0xfe,0xc7,0xd0,0xc8,0x73,0x0c,0x50
228,0x26,0x8a,0x05,0x38,0x04,0x58,0x74,0x03,0xe9,0x0a,0x00,0x49,0x46,0x47,0x23,0xc9
229,0x74,0x0a,0xe9,0xd3,0xff,0x5a,0x5e,0x5b,0x58,0x59,0xf8,0xc3,0x5a,0x5e,0x5b,0x58
230,0x59,0xf9,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x86,0xcd,0x2b,0xce,0x8b
231,0xf7,0x8b,0xc1,0x33,0xc9,0x80,0x3c,0xff,0x74,0x16,0x80,0xf9,0x06,0x73,0x09,0x32
232,0xc9,0x46,0x48,0x74,0x2e,0xe9,0xed,0xff,0x3d,0x60,0x00,0x73,0x0c,0xe9,0x23,0x00
233,0xfe,0xc1,0x46,0x48,0x74,0x1d,0xe9,0xdc,0xff,0xb8,0x10,0x00,0x8d,0x3e,0x18,0x34
234,0x32,0xed,0xb1,0x06,0xf3,0xa6,0x74,0x03,0xe9,0x08,0x00,0x48,0x23,0xc0,0x74,0x07
235,0xe9,0xe9,0xff,0x07,0x1f,0xf8,0xc3,0x8d,0x36,0x18,0x34,0x33,0xc0,0x8e,0xd8,0x8d
236,0x3e,0xe0,0xfe,0xb8,0x10,0x00,0xb9,0x06,0x00,0x56,0xf3,0xa4,0x5e,0x48,0x3d,0x00
237,0x00,0x75,0xf3,0x07,0x1f,0xf9,0xc3,0xff,0x06,0xe4,0x33,0xc6,0x06,0xeb,0x34,0x00
238,0x26,0x8b,0x45,0x06,0x86,0xe0,0xc1,0xe8,0x04,0x48,0x06,0x8e,0xc0,0xfe,0x06,0xe6
239,0x3a,0xfa,0xe8,0x69,0x0e,0xfb,0x07,0xb0,0xff,0xc3,0x00,0x00,0x00,0x00,0x00,0x00
240,0xb0,0x01,0xc3,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3
241,0x8b,0x0e,0x97,0x36,0x81,0xe1,0x80,0x30,0x26,0x8b,0x47,0x04,0x25,0x7f,0xcf,0x0b
242,0xc1,0xa3,0x97,0x36,0xa3,0xe6,0x34,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x74
243,0x03,0xb0,0x03,0xc3,0x26,0x8b,0x47,0x08,0xa3,0x97,0x36,0xa3,0xe6,0x34,0x26,0x8a
244,0x47,0x20,0xa2,0xfd,0x34,0x3c,0x01,0x75,0x06,0xc7,0x06,0xa1,0x36,0x00,0x00,0x26
245,0x8a,0x47,0x21,0xa2,0xfe,0x34,0x26,0x8b,0x47,0x0a,0xa3,0x18,0x34,0xa3,0x58,0x34
246,0x26,0x8b,0x47,0x0c,0xa3,0x1a,0x34,0xa3,0x5a,0x34,0x26,0x8b,0x47,0x0e,0xa3,0x1c
247,0x34,0xa3,0x5c,0x34,0xc6,0x06,0x2a,0x34,0xc0,0x26,0x8b,0x47,0x14,0x25,0x7f,0xff
248,0x09,0x06,0x2c,0x34,0x26,0x8b,0x47,0x16,0x25,0xff,0xfe,0x25,0xff,0xfc,0x09,0x06
249,0x2e,0x34,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47,0x10,0xa3,0x02,0x34,0x26,0x8b
250,0x47,0x12,0xa3,0x04,0x34,0x06,0x53,0xe8,0x84,0x0a,0x5b,0x07,0x3d,0x00,0x00,0x75
251,0x07,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3,0xb9,0x00,0x01,0xa1,0xac,0x33,0x33
252,0xd2,0xf7,0xf9,0xa3,0xae,0x33,0x91,0x49,0x33,0xd2,0xf7,0xe9,0x05,0x00,0x3b,0xa3
253,0x46,0x34,0xbf,0x00,0x3b,0x89,0x3e,0x44,0x34,0xba,0x68,0x00,0xb8,0xe0,0xe0,0xef
254,0xa1,0xae,0x33,0xe7,0x62,0xa1,0xae,0x33,0xba,0x08,0x01,0xef,0xa1,0x44,0x34,0xe7
255,0x64,0xa1,0x44,0x34,0xba,0x0a,0x01,0xef,0xb8,0x00,0x01,0x2d,0x04,0x00,0x0d,0x00
256,0x10,0xe7,0x92,0xc3,0x3d,0x00,0x00,0x74,0x0a,0x26,0x89,0x47,0x07,0xe8,0x83,0x3a
257,0xb0,0x07,0xc3,0xa1,0xae,0x33,0x26,0x89,0x47,0x2b,0xa1,0x44,0x34,0x26,0x89,0x47
258,0x2d,0xa1,0x46,0x34,0x26,0x89,0x47,0x2f,0x80,0x0e,0x93,0x36,0x20,0xa1,0x88,0x36
259,0x86,0xe0,0x26,0x89,0x47,0x08,0xa1,0x84,0x36,0x86,0xe0,0x26,0x89,0x47,0x0a,0xa1
260,0x80,0x36,0x86,0xe0,0x26,0x89,0x47,0x0c,0xb8,0x60,0xfe,0x86,0xe0,0x26,0x89,0x47
261,0x0e,0xa0,0xa1,0x36,0x26,0x88,0x47,0x10,0x8b,0x36,0x88,0x36,0x26,0xc6,0x44,0x02
262,0xff,0xe5,0x9e,0xa9,0x00,0x08,0x74,0x0c,0xba,0x84,0x00,0xed,0x0d,0x08,0x00,0xef
263,0xba,0x8e,0x00,0xef,0xe5,0x02,0x25,0xf9,0xff,0xe7,0x02,0xba,0x10,0x01,0xb8,0x02
264,0x02,0xef,0xed,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3
265,0x80,0x26,0x93,0x36,0x9f,0xe8,0x8d,0x0a,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3
266,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x2a
267,0x34,0xc0,0x26,0x8b,0x47,0x06,0x25,0x7f,0xff,0xa3,0x2c,0x34,0x26,0x8b,0x47,0x08
268,0x25,0xff,0xfe,0x25,0xff,0xfc,0xa3,0x2e,0x34,0xcd,0x52,0xb0,0x00,0xc3,0xf6,0x06
269,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47
270,0x06,0xa3,0x02,0x34,0x26,0x8b,0x47,0x08,0xa3,0x04,0x34,0xcd,0x52,0xb0,0x00,0xc3
271,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x57,0x8d,0x7f,0x06,0x51,0xb9
272,0x07,0x00,0x33,0xc0,0xf3,0xab,0x59,0x8d,0x7f,0x06,0xa1,0x7a,0x34,0x03,0x06,0x39
273,0x37,0x26,0x88,0x05,0xa1,0x95,0x37,0x26,0x88,0x45,0x02,0xa1,0x80,0x34,0x03,0x06
274,0x76,0x34,0x26,0x88,0x45,0x07,0xa1,0xc6,0x34,0x26,0x88,0x45,0x09,0xa1,0xd8,0x33
275,0x26,0x88,0x45,0x0a,0x33,0xc0,0xa3,0x7a,0x34,0xa3,0x39,0x37,0xa3,0x95,0x37,0xa3
276,0x80,0x34,0xa3,0x76,0x34,0xa3,0xc6,0x34,0xa3,0xd8,0x33,0x5f,0xb0,0x00,0xc3,0xf6
277,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x83,0xf9,0x06
278,0x74,0x12,0x83,0xf9,0x04,0x74,0x0d,0x83,0xf9,0x00,0x74,0x08,0x83,0xf9,0x02,0x74
279,0x03,0xb0,0x01,0xc3,0x89,0x0e,0xe8,0x3a,0x83,0x26,0xab,0x36,0xf9,0x09,0x0e,0xab
280,0x36,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc1,0xe7,0x02,0xb0,0x00,0xc3,0xf6,0x06,0x93
281,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x80,0xf9,0xff,0x74,0x08
282,0x80,0xf9,0x00,0x74,0x10,0xb0,0x01,0xc3,0x83,0x0e,0xad,0x36,0x02,0xa1,0xad,0x36
283,0xe7,0x04,0xe9,0x0a,0x00,0x83,0x26,0xad,0x36,0xfd,0xa1,0xad,0x36,0xe7,0x04,0xb0
284,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xe8,0xd5,0x04,0xb0
285,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x06
286,0x05,0x75,0x03,0xe9,0x9d,0x00,0x26,0x8b,0x57,0x04,0x26,0x8b,0x47,0x08,0x26,0x81
287,0x7f,0x06,0x00,0x80,0x75,0x08,0xed,0x26,0x89,0x47,0x0a,0xe9,0x9d,0x00,0x26,0x83
288,0x7f,0x06,0x01,0x75,0x04,0xef,0xe9,0x92,0x00,0x26,0x81,0x7f,0x06,0x01,0x80,0x75
289,0x09,0xef,0xed,0x26,0x89,0x47,0x0a,0xe9,0x81,0x00,0x26,0x83,0x7f,0x06,0x02,0x75
290,0x07,0x26,0x21,0x47,0x04,0xe9,0x73,0x00,0x26,0x81,0x7f,0x06,0x02,0x80,0x75,0x0c
291,0x26,0x21,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x5f,0x00,0x26,0x83,0x7f,0x06
292,0x03,0x75,0x07,0x26,0x09,0x47,0x04,0xe9,0x51,0x00,0x26,0x81,0x7f,0x06,0x03,0x80
293,0x75,0x0c,0x26,0x09,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x3d,0x00,0x26,0x83
294,0x7f,0x06,0x04,0x75,0x07,0x26,0x31,0x47,0x04,0xe9,0x2f,0x00,0x26,0x81,0x7f,0x06
295,0x04,0x80,0x75,0x0c,0x26,0x31,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x1b,0x00
296,0xb0,0x01,0xc3,0xfa,0x53,0x26,0x8b,0x4f,0x08,0x0b,0xc9,0x74,0x0c,0x8d,0x1e,0xe0
297,0xfe,0xe8,0x52,0xff,0x83,0xc3,0x08,0xe2,0xf8,0x5b,0xfb,0xb0,0x00,0xc3,0xf6,0x06
298,0x93,0x36,0x80,0x75,0x0a,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3,0x8d
299,0x3e,0xe0,0xfe,0xe5,0x00,0x26,0x89,0x05,0xe5,0x02,0x26,0x89,0x45,0x02,0xa1,0xad
300,0x36,0x26,0x89,0x45,0x04,0xe5,0x06,0x26,0x89,0x45,0x06,0xe5,0x08,0x26,0x89,0x45
301,0x08,0xe5,0x0a,0x26,0x89,0x45,0x0a,0xe5,0x0e,0x26,0x89,0x45,0x0c,0xe5,0x48,0x26
302,0x89,0x45,0x0e,0xe5,0x4a,0x26,0x89,0x45,0x10,0xe5,0x4c,0x26,0x89,0x45,0x12,0xa1
303,0xb7,0x36,0x26,0x89,0x45,0x14,0xe5,0x50,0x26,0x89,0x45,0x16,0xe5,0x52,0x26,0x89
304,0x45,0x18,0xe5,0x54,0x26,0x89,0x45,0x1a,0xe5,0x56,0x26,0x89,0x45,0x1c,0xe5,0x58
305,0x26,0x89,0x45,0x1e,0xe5,0x62,0x26,0x89,0x45,0x20,0xe5,0x64,0x26,0x89,0x45,0x22
306,0xe5,0x66,0x26,0x89,0x45,0x24,0xe5,0x68,0x26,0x89,0x45,0x26,0xe5,0x6a,0x26,0x89
307,0x45,0x28,0xe5,0x6c,0x26,0x89,0x45,0x2a,0xe5,0x70,0x26,0x89,0x45,0x2c,0xe5,0x72
308,0x26,0x89,0x45,0x2e,0xe5,0x74,0x26,0x89,0x45,0x30,0xe5,0x76,0x26,0x89,0x45,0x32
309,0xe5,0x7c,0x26,0x89,0x45,0x34,0xe5,0x7e,0x26,0x89,0x45,0x36,0xe5,0x80,0x26,0x89
310,0x45,0x38,0xe5,0x82,0x26,0x89,0x45,0x3a,0xe5,0x86,0x26,0x89,0x45,0x3c,0xe5,0x88
311,0x26,0x89,0x45,0x3e,0xe5,0x9a,0x26,0x89,0x45,0x40,0xe5,0x9e,0x26,0x89,0x45,0x42
312,0xe5,0xcc,0x26,0x89,0x45,0x44,0xe5,0xce,0x26,0x89,0x45,0x46,0xe5,0xd0,0x26,0x89
313,0x45,0x48,0xe5,0xd2,0x26,0x89,0x45,0x4a,0xba,0x00,0x01,0xed,0x11,0x06,0x66,0x34
314,0x73,0x04,0xff,0x06,0x68,0x34,0x26,0x89,0x45,0x4c,0xba,0x02,0x01,0xed,0xc1,0xe0
315,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0x26,0x89,0x45,0x4e,0xba
316,0x04,0x01,0xed,0x11,0x06,0x6a,0x34,0x73,0x04,0xff,0x06,0x6c,0x34,0x26,0x89,0x45
317,0x50,0xba,0x06,0x01,0xed,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06
318,0x74,0x34,0x26,0x89,0x45,0x52,0xba,0x08,0x01,0xed,0x26,0x89,0x45,0x54,0xba,0x0a
319,0x01,0xed,0x26,0x89,0x45,0x56,0xba,0x0c,0x01,0xed,0x26,0x89,0x45,0x58,0xba,0x0e
320,0x01,0xed,0x01,0x06,0x7a,0x34,0x26,0x89,0x45,0x5e,0xba,0x10,0x01,0xed,0x26,0x89
321,0x45,0x5c,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x74,0x07,0xf6,0x06,0x93,0x36
322,0x20,0x75,0x03,0xb0,0x01,0xc3,0x26,0x80,0x7f,0x06,0x00,0x75,0x30,0x80,0x3e,0x95
323,0x36,0x00,0x74,0x52,0xc6,0x06,0x95,0x36,0x00,0x83,0x26,0xad,0x36,0xfe,0xa1,0xad
324,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef,0xed,0xba,0x10,0x01,0xb8,0x02
325,0x02,0xef,0xed,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0xb0,0x00,0xc3,0x26,0x8b,0x47
326,0x04,0x3d,0x00,0x00,0x74,0x20,0x3d,0x03,0x00,0x77,0x1b,0xba,0x10,0x01,0xb8,0x02
327,0x00,0xef,0xba,0xe0,0x00,0xb8,0x01,0x10,0xef,0x83,0x0e,0xad,0x36,0x01,0xa1,0xad
328,0x36,0xe7,0x04,0xb0,0x00,0xc3,0xb0,0x06,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03
329,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x04,0x01,0x74,0x0a,0x26,0x83,0x7f,0x04,0x02,0x74
330,0x19,0xb0,0x06,0xc3,0x26,0x83,0x7f,0x06,0x0c,0x77,0xf6,0x26,0x83,0x7f,0x0a,0x60
331,0x77,0xef,0xe8,0x10,0x00,0x72,0x0b,0xb0,0x46,0xc3,0xe8,0x4e,0x00,0x72,0x03,0xb0
332,0x46,0xc3,0xb0,0x00,0xc3,0x51,0xb1,0x0a,0x8b,0x3e,0x20,0xf3,0x26,0x83,0x7d,0x0c
333,0x02,0x75,0x03,0xe9,0x0e,0x00,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01,0xe9,0xeb
334,0xff,0x59,0xf8,0xc3,0x57,0x8d,0x7d,0x0e,0x8d,0x77,0x06,0xb9,0x12,0x00,0xf3,0xa4
335,0x8d,0x7d,0x20,0x8d,0x36,0xe0,0xfe,0x26,0x8b,0x4d,0x12,0xf3,0xa4,0xff,0x06,0x01
336,0x35,0x5f,0x26,0xc7,0x45,0x0c,0x01,0x00,0x59,0xf9,0xc3,0x51,0xb1,0x0a,0x8d,0x3e
337,0x20,0xf3,0x8d,0x36,0xe0,0xfe,0x26,0x83,0x7d,0x0c,0x01,0x75,0x1b,0x57,0xe8,0x25
338,0x00,0x5f,0x73,0x14,0x33,0xc0,0xb9,0x20,0x01,0xf3,0xaa,0x26,0xc7,0x45,0x0c,0x02
339,0x00,0xff,0x0e,0x01,0x35,0x59,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01
340,0xe9,0xd3,0xff,0x59,0xf8,0xc3,0x51,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0xf3,0xa6
341,0x74,0x03,0x59,0xf8,0xc3,0x59,0xf9,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
342,0x80,0x3e,0xec,0x34,0x06,0x72,0x33,0xff,0x06,0xf0,0x33,0x50,0xc4,0x1e,0x8c,0x36
343,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x29,0x00,0x58,0x81,0x26,0xc2,0x34,0xdf,0x7f,0x81
344,0x26,0xe9,0x34,0xdf,0x7f,0x0b,0xdb,0x74,0x11,0x26,0xc6,0x07,0x84,0x26,0xc6,0x47
345,0x02,0xff,0x26,0x89,0x47,0x06,0xe8,0xac,0x00,0xc3,0xff,0x06,0xea,0x33,0xe9,0xf5
346,0xff,0x57,0x26,0x8b,0x3f,0x03,0xf9,0x26,0x3b,0x7f,0x02,0x74,0x16,0x26,0x3b,0x7f
347,0x04,0x7c,0x2a,0x3d,0x00,0x00,0x75,0x13,0x8d,0x7f,0x08,0x03,0xf9,0x26,0x3b,0x7f
348,0x02,0x7c,0x14,0xff,0x06,0xde,0x33,0x33,0xdb,0x5f,0xc3,0x26,0x8b,0x7f,0x02,0x26
349,0x89,0x3f,0x03,0xf9,0xe9,0x06,0x00,0x26,0x89,0x3f,0x26,0x29,0x0f,0x26,0xc7,0x05
350,0xff,0xff,0x26,0x87,0x3f,0x26,0x89,0x0d,0x8d,0x5d,0x02,0x50,0x8b,0xfb,0x83,0xe9
351,0x02,0x33,0xc0,0xf3,0xaa,0x58,0xfe,0x0e,0xec,0x34,0x5f,0xc3,0x8b,0x7c,0x02,0x3b
352,0x3c,0x74,0x2f,0x83,0x3d,0xff,0x75,0x0b,0x8d,0x7c,0x08,0x89,0x7c,0x02,0x83,0x3d
353,0xff,0x74,0x1e,0x8a,0x45,0x02,0x3c,0x81,0x75,0x0c,0x80,0x3e,0xeb,0x34,0x00,0x74
354,0x05,0x33,0xc0,0xe9,0x0b,0x00,0x8b,0x0d,0x01,0x4c,0x02,0x8d,0x75,0x02,0x83,0xe9
355,0x02,0xc3,0x80,0x3e,0xec,0x34,0x06,0x72,0x05,0x33,0xc0,0xe9,0xf3,0xff,0xff,0x06
356,0xee,0x33,0xe9,0xbe,0xff,0xf6,0x06,0x92,0x36,0x40,0x74,0x01,0xc3,0x57,0x56,0x51
357,0x52,0x8b,0x36,0x8c,0x36,0xe8,0xa4,0xff,0x75,0x03,0xe9,0x1a,0x00,0xe9,0x1c,0x00
358,0xfe,0x06,0xec,0x34,0xc4,0x3e,0x80,0x36,0xf3,0xa4,0x80,0x0e,0x92,0x36,0x40,0xba
359,0x0c,0x01,0xb8,0x80,0x80,0xef,0xed,0x5a,0x59,0x5e,0x5f,0xc3,0xff,0x06,0xe0,0x33
360,0x80,0x3c,0x81,0x75,0x0c,0xff,0x06,0xe2,0x33,0xc6,0x06,0xeb,0x34,0x01,0xe9,0xcf
361,0xff,0x80,0x3c,0x84,0x75,0x07,0xff,0x06,0xe6,0x33,0xe9,0xc3,0xff,0xff,0x06,0xe8
362,0x33,0xe9,0xbc,0xff,0x8d,0x3e,0xe0,0xfe,0xa1,0x72,0x34,0xc7,0x06,0x72,0x34,0x00
363,0x00,0x89,0x05,0xa1,0x74,0x34,0xc7,0x06,0x74,0x34,0x00,0x00,0x89,0x45,0x02,0xba
364,0x04,0x01,0xed,0x89,0x45,0x04,0xc7,0x45,0x06,0x00,0x00,0xa1,0x6e,0x34,0xc7,0x06
365,0x6e,0x34,0x00,0x00,0x89,0x45,0x08,0xa1,0x70,0x34,0xc7,0x06,0x70,0x34,0x00,0x00
366,0x89,0x45,0x0a,0xba,0x00,0x01,0xed,0x89,0x45,0x0c,0xc7,0x45,0x0e,0x00,0x00,0x32
367,0xe4,0xba,0x0e,0x01,0xec,0x89,0x45,0x10,0xa1,0x7e,0x34,0xc7,0x06,0x7e,0x34,0x00
368,0x00,0x89,0x45,0x12,0xa1,0x8c,0x34,0xc7,0x06,0x8c,0x34,0x00,0x00,0x89,0x45,0x14
369,0xa1,0x8a,0x34,0xc7,0x06,0x8a,0x34,0x00,0x00,0x89,0x45,0x16,0xa1,0x7c,0x34,0xc7
370,0x06,0x7c,0x34,0x00,0x00,0x89,0x45,0x18,0xa1,0x88,0x34,0xc7,0x06,0x88,0x34,0x00
371,0x00,0x89,0x45,0x1a,0xa1,0xca,0x33,0xc7,0x06,0xca,0x33,0x00,0x00,0x89,0x45,0x1c
372,0xa1,0x78,0x34,0xc7,0x06,0x78,0x34,0x00,0x00,0x89,0x45,0x1e,0xa1,0xc6,0x34,0xc7
373,0x06,0xc6,0x34,0x00,0x00,0x89,0x45,0x20,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00
374,0xfa,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0xb8,0xa0,0x01,0xc1,0xe8,0x04,0x8e,0xd0,0x8d
375,0x26,0x80,0x00,0xe8,0x00,0x01,0xe8,0x10,0xeb,0x8b,0x1e,0xf7,0x34,0x8b,0x16,0xf9
376,0x34,0x8b,0x36,0xff,0x34,0x33,0xc0,0xb9,0xef,0xff,0x8d,0x3e,0x14,0x00,0x2b,0xcf
377,0x2b,0xce,0xd1,0xe9,0xf3,0xab,0x89,0x1e,0xf7,0x34,0x89,0x16,0xf9,0x34,0x83,0xfe
378,0x00,0x74,0x0c,0xb9,0xef,0xff,0xbf,0x80,0xfe,0x2b,0xcf,0xd1,0xe9,0xf3,0xab,0xb9
379,0xff,0xff,0x81,0xe9,0x00,0x3b,0x83,0xfe,0x00,0x74,0x03,0xe9,0x1b,0x00,0x51,0x1e
380,0xb8,0x00,0xe0,0x8e,0xd8,0x33,0xf6,0x8d,0x3e,0x00,0xd8,0xb9,0x00,0x0c,0xf3,0xa5
381,0x1f,0x59,0xbe,0xff,0xff,0x81,0xee,0x00,0xd8,0x2b,0xce,0x81,0xe1,0x00,0xff,0x89
382,0x0e,0xac,0x33,0x8d,0x06,0x20,0x02,0xc1,0xe8,0x04,0xa3,0x32,0x34,0x8e,0xd0,0x36
383,0xc7,0x06,0x1e,0x00,0x80,0x18,0x36,0xc7,0x06,0x22,0x00,0xff,0x7f,0x36,0xc7,0x06
384,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0x8d,0x06,0xa0,0x02,0xc1
385,0xe8,0x04,0xa3,0x30,0x34,0x8e,0xd0,0x36,0xc7,0x06,0x1e,0x00,0x50,0x28,0x36,0xc7
386,0x06,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0xb8,0xa0,0x01,0xc1
387,0xe8,0x04,0xa3,0x34,0x34,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00,0xb8,0x00
388,0x90,0xe7,0x02,0x8d,0x3e,0x70,0x01,0x8b,0xc7,0xc1,0xe8,0x04,0xb9,0x03,0x00,0x89
389,0x45,0x0e,0x89,0x45,0x02,0xc7,0x05,0xff,0xff,0x83,0xc7,0x10,0x05,0x01,0x00,0xe2
390,0xee,0xe8,0x5b,0x01,0xe5,0xce,0xa3,0xb5,0x36,0xe8,0x21,0x00,0xe8,0x45,0x01,0xa1
391,0x32,0x34,0x8c,0xcb,0xcd,0x37,0x0e,0x58,0xa9,0x00,0xf0,0x74,0x07,0x33,0xf6,0x89
392,0x36,0xff,0x34,0xc3,0x8d,0x36,0x30,0x61,0x89,0x36,0xff,0x34,0xc3,0x33,0xc0,0x8b
393,0xd0,0x8b,0xf2,0xb9,0x68,0x00,0x2e,0x80,0xbc,0xac,0x17,0x80,0x75,0x01,0xef,0x83
394,0xc2,0x02,0x46,0xe2,0xf1,0xb8,0x02,0x00,0xe7,0x50,0xb9,0x5a,0x00,0x33,0xff,0xc7
395,0x05,0x65,0x18,0x8c,0x4d,0x02,0x83,0xc7,0x04,0xe2,0xf4,0x33,0xc0,0x8e,0xc0,0x8c
396,0xc8,0x8e,0xd8,0x8d,0x3e,0x80,0x00,0x8d,0x36,0x9c,0x17,0xb9,0x08,0x00,0xe8,0x37
397,0x00,0x8d,0x36,0x20,0x21,0x8d,0x3e,0xc0,0x00,0xb9,0x0d,0x00,0xe8,0x29,0x00,0x8d
398,0x3e,0x40,0x01,0xb9,0x0a,0x00,0xe8,0x1f,0x00,0xe8,0x4b,0x0e,0x33,0xc0,0x8e,0xd8
399,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xe7,0x48,0xe7,0x4c,0xb8,0x40,0x9c,0xe7,0x4a,0xe5
400,0x48,0x90,0xb8,0x00,0x70,0xe7,0x48,0xc3,0xa5,0x83,0xc7,0x02,0xe2,0xfa,0xc3,0xe5
401,0x4c,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe5,0x58,0xd1
402,0xe0,0x73,0x11,0x8b,0xf0,0xd1,0xe6,0x33,0xc0,0x8e,0xd8,0x8b,0xb4,0x80,0x00,0x83
403,0xc6,0x0b,0xff,0xe6,0x1f,0x07,0x5a,0x5f,0x5e,0x59,0x58,0xcf,0x58,0x1c,0xe4,0x1c
404,0x6c,0x1c,0x8e,0x1a,0xc0,0x1f,0x40,0x1a,0x44,0x1c,0x65,0x18,0x80,0x80,0x80,0xff
405,0x80,0x03,0x02,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
406,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
407,0x80,0x03,0x03,0x43,0x80,0x80,0x02,0x80,0x42,0x03,0x02,0xff,0x03,0x01,0x03,0x01
408,0x01,0x03,0x02,0x03,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x02,0x03,0x01,0x03
409,0x03,0xff,0x01,0x01,0xff,0x01,0xff,0x01,0x01,0x03,0x03,0x03,0xff,0xff,0xff,0xff
410,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
411,0xff,0xff,0xff,0x02,0xb8,0x0f,0x00,0xe7,0x84,0xb8,0x0f,0xf8,0xe7,0x82,0xc3,0xb9
412,0x08,0x00,0x89,0x0e,0xe6,0x3a,0x8d,0x06,0x20,0x03,0x8b,0xd0,0xc1,0xe8,0x04,0xa3
413,0x90,0x01,0x8b,0xc2,0x8b,0xd8,0xc1,0xe8,0x04,0x8e,0xc0,0x05,0x61,0x00,0x26,0xa3
414,0x00,0x00,0xa1,0x30,0x34,0x26,0xa3,0x02,0x00,0x83,0xc3,0x14,0xd1,0xeb,0x26,0x89
415,0x1e,0x08,0x00,0x81,0xc2,0x10,0x06,0xe2,0xd9,0x26,0xc7,0x06,0x00,0x00,0xff,0xff
416,0x8c,0x06,0x92,0x01,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8
417,0xe7,0x5a,0xff,0x06,0xbe,0x33,0xba,0xd2,0x00,0xed,0xcf,0x00,0x00,0x00,0x00,0x00
418,0x8c,0xcb,0xa1,0x30,0x34,0xcd,0x37,0xe9,0x06,0xed,0xb8,0x32,0x00,0xc3,0xe8,0x8c
419,0x01,0xfe,0x06,0xe2,0x34,0xe8,0x21,0x01,0x75,0xf0,0xe8,0x53,0x0e,0x81,0x0e,0xaf
420,0x36,0x00,0xc0,0xc7,0x06,0xad,0x36,0x60,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75
421,0x1a,0xf7,0x06,0xe6,0x34,0x00,0x08,0x74,0x09,0xc7,0x06,0xab,0x36,0x0b,0x00,0xe9
422,0x0f,0x00,0xc7,0x06,0xab,0x36,0x03,0x00,0xe9,0x06,0x00,0xc7,0x06,0xab,0x36,0x11
423,0x9c,0xc7,0x06,0xa9,0x36,0x18,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75,0x0d,0xf7
424,0x06,0xb5,0x36,0x02,0x00,0x74,0x05,0x83,0x0e,0xa9,0x36,0x20,0xa1,0xa9,0x36,0xe7
425,0x00,0xa1,0xab,0x36,0xe7,0x02,0xf7,0x06,0xe6,0x34,0x80,0x00,0x74,0x2e,0xe8,0xf2
426,0x2f,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08
427,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xb8,0x40,0x00,0xe7,0x4e,0x33
428,0xc0,0xe7,0x0e,0xc7,0x06,0x26,0x02,0x00,0x00,0xe9,0x23,0x00,0xc7,0x06,0x4e,0x37
429,0x3f,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x80,0x74,0x07,0x26
430,0x81,0x0e,0x08,0x00,0x00,0x80,0xc6,0x06,0xe0,0x34,0x01,0xb8,0x00,0x00,0xc3,0xfe
431,0x06,0xe1,0x34,0xc6,0x06,0xe0,0x34,0x00,0xa1,0x26,0x02,0x0b,0xc0,0x74,0x01,0xc3
432,0xe8,0x04,0x00,0xb8,0x00,0x00,0xc3,0xa1,0xa9,0x36,0xe7,0x00,0x8b,0x1e,0xab,0x36
433,0x83,0xe3,0x06,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc3,0x0d,0x10,0x00,0xe7,0x02,0xa1
434,0xad,0x36,0xe7,0x04,0xc3,0xb8,0x0a,0x00,0xe7,0x84,0xfe,0x06,0xe5,0x34,0xc6,0x06
435,0xe3,0x34,0x01,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x40,0x74,0x07
436,0x26,0x81,0x0e,0x08,0x00,0x00,0x40,0xc3,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xfe,0x06
437,0xe4,0x34,0xc6,0x06,0xe3,0x34,0x00,0xc3,0xc3,0xf6,0x06,0x18,0x34,0x80,0x75,0x0d
438,0xa1,0x18,0x34,0x0b,0x06,0x1a,0x34,0x0b,0x06,0x1c,0x34,0x75,0x01,0xc3,0xa1,0x2e
439,0x34,0x25,0xff,0xfe,0x8b,0x16,0xe7,0x36,0x81,0xe2,0x00,0x01,0x0b,0xc2,0xa3,0x2e
440,0x34,0x8d,0x16,0x10,0x00,0xbf,0x00,0x00,0xb9,0x08,0x00,0x8b,0x85,0x00,0x34,0xef
441,0x83,0xc2,0x10,0x8b,0x85,0x02,0x34,0xef,0x83,0xc2,0x10,0x8b,0x85,0x04,0x34,0xef
442,0x83,0xc2,0xe2,0x83,0xc7,0x06,0x49,0x75,0xe2,0xb8,0x00,0x00,0x8e,0xc0,0xbe,0x00
443,0x34,0xbf,0xb9,0x36,0xb9,0x18,0x00,0xf3,0xa5,0xb8,0x00,0x00,0xc3,0x33,0xc0,0x8e
444,0xc0,0x8d,0x3e,0xb0,0x33,0xb9,0x08,0x00,0xf3,0xab,0x8d,0x3e,0x3e,0x34,0xb9,0x03
445,0x00,0xf3,0xab,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
446,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xba
447,0x33,0xe5,0x56,0x0d,0x20,0x00,0xe7,0x56,0xba,0x7a,0x00,0xed,0x08,0x26,0x94,0x36
448,0x33,0xc0,0xb1,0x08,0x32,0xed,0x06,0x8e,0xc0,0x8d,0x3e,0xe0,0xff,0xf3,0xaa,0x8e
449,0x06,0x32,0x34,0x26,0x81,0x0e,0x08,0x00,0x00,0x02,0x07,0xe5,0x56,0x25,0xdf,0xff
450,0xe7,0x56,0xe9,0xf8,0xfc,0x00,0xbd,0x1b,0x10,0x1b,0xd9,0x1a,0xf3,0x1a,0x50,0x51
451,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb6,0x33,0x53
452,0x06,0x51,0xe5,0x80,0xa3,0xb4,0x33,0x8b,0xd8,0x8b,0xc8,0x25,0x10,0x00,0xa3,0xed
453,0x34,0x0b,0xc0,0x74,0x14,0xff,0x06,0x80,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x03
454,0xe9,0x06,0x00,0xb8,0x80,0x00,0xe8,0x9d,0x04,0x83,0xe3,0x03,0xd1,0xe3,0x2e,0xff
455,0x97,0x86,0x1a,0x59,0x07,0x5b,0xe9,0xa4,0xfc,0xba,0x20,0x00,0x8e,0x06,0x3c,0x34
456,0x83,0x3e,0x3c,0x34,0x00,0x75,0x03,0xe9,0xf0,0x00,0xc7,0x06,0x3c,0x34,0x00,0x00
457,0xe9,0x2a,0x00,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0x83,0x3e,0x3a,0x34,0x00,0x75
458,0x03,0xe9,0xd5,0xff,0xc7,0x06,0x3a,0x34,0x00,0x00,0xe8,0x10,0x00,0xe9,0xc9,0xff
459,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0xc7,0x06,0x3a,0x34,0x00,0x00,0x26,0xa1,0x14
460,0x00,0x26,0xa3,0x0c,0x00,0x26,0xa1,0x16,0x00,0x26,0xa3,0x0e,0x00,0x26,0xc6,0x06
461,0x0a,0x00,0x00,0xc1,0xea,0x02,0x23,0xd1,0x74,0x1c,0xba,0x20,0x00,0x26,0xc7,0x06
462,0x0e,0x00,0xea,0x05,0x26,0x0b,0x16,0x0c,0x00,0x26,0x89,0x16,0x0c,0x00,0xff,0x06
463,0x86,0x34,0xff,0x06,0xdc,0x33,0x26,0xa1,0x0c,0x00,0xa9,0x00,0x37,0x74,0x16,0x26
464,0xc6,0x06,0x0a,0x00,0x02,0xa9,0x00,0x30,0x74,0x04,0xff,0x06,0x7a,0x34,0xff,0x06
465,0xda,0x33,0xe9,0x49,0x00,0xc0,0xec,0x07,0x83,0x16,0x8a,0x34,0x00,0x24,0x07,0x3c
466,0x07,0x75,0x04,0xff,0x06,0x8c,0x34,0xff,0x06,0x7e,0x34,0xa1,0x30,0x34,0x8c,0xc3
467,0x8e,0xc0,0x8e,0xdb,0x26,0x83,0x0e,0x08,0x00,0x40,0x8c,0xd8,0x26,0x87,0x06,0x16
468,0x00,0x26,0x83,0x3e,0x14,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x8c,0x1e,0x00,0x00
469,0xe9,0x05,0x00,0x26,0x8c,0x1e,0x14,0x00,0x33,0xc0,0x8e,0xd8,0xc3,0xc3,0x8c,0xc0
470,0x87,0x06,0x92,0x01,0x3d,0xff,0xff,0x74,0x0d,0x8e,0xd8,0x8c,0x06,0x00,0x00,0x33
471,0xc0,0x8e,0xd8,0xe9,0x04,0x00,0x8c,0x06,0x90,0x01,0xe8,0x01,0x00,0xc3,0x06,0x83
472,0x3e,0x90,0x01,0xff,0x74,0x29,0x83,0x3e,0x3a,0x34,0x00,0x75,0x11,0xba,0x86,0x00
473,0xe8,0x1e,0x00,0x8c,0x06,0x3a,0x34,0x83,0x3e,0x90,0x01,0xff,0x74,0x11,0x83,0x3e
474,0x3c,0x34,0x00,0x75,0x0a,0xba,0x88,0x00,0xe8,0x06,0x00,0x8c,0x06,0x3c,0x34,0x07
475,0xc3,0xa1,0x90,0x01,0x8e,0xc0,0x26,0xa1,0x08,0x00,0xef,0x26,0xa1,0x00,0x00,0x26
476,0xc7,0x06,0x00,0x00,0xff,0xff,0xa3,0x90,0x01,0x3d,0xff,0xff,0x75,0x03,0xa3,0x92
477,0x01,0x83,0x3e,0xed,0x34,0x00,0x74,0x0b,0xb8,0x10,0x00,0xe7,0x84,0xc7,0x06,0xed
478,0x34,0x00,0x00,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7
479,0x5a,0xff,0x06,0xbc,0x33,0xe9,0x25,0xfb,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33
480,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb0,0x33,0xe9,0x11,0xfb,0x50,0x51,0x56,0x57
481,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb4,0x33,0x06,0xff,0x06
482,0x76,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x04,0x07,0xe9,0xf0,0xfa,0xb8,0x80,0x00
483,0xe8,0xd3,0x02,0x07,0xe9,0xe6,0xfa,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
484,0xc6,0x1d,0x08,0x1d,0x91,0x1e,0x5d,0x1e,0x73,0x1e,0x89,0x1e,0x91,0x1e,0xa8,0x1d
485,0x91,0x1e,0x91,0x1e,0xaf,0x1e,0xaf,0x1e,0x15,0x1d,0x15,0x1d,0x91,0x1e,0x99,0x1f
486,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x02,0x00,0x00
487,0x00,0x01,0x00,0x10,0x00,0x01,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00
488,0x07,0xe9,0x99,0xfa,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7
489,0x5a,0xff,0x06,0xb2,0x33,0x06,0x68,0xf6,0x1c,0xe5,0x06,0xa3,0xb2,0x33,0x8b,0xf0
490,0x83,0xe6,0x1e,0x2e,0xff,0xa4,0xa0,0x1c,0xe5,0x0c,0xa9,0x80,0x00,0x74,0x06,0xe8
491,0xa4,0x01,0xe5,0x06,0xc3,0x53,0xe5,0x0c,0x8b,0xd8,0xa9,0x01,0x00,0x74,0x14,0x83
492,0x3e,0xe0,0x3a,0x00,0x74,0x0d,0x8e,0x06,0x38,0x34,0xe8,0xbf,0x06,0xc7,0x06,0xe0
493,0x3a,0x00,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7
494,0x02,0x8b,0xc3,0x5b,0xa9,0x01,0x00,0x74,0x01,0xc3,0x8b,0xd0,0xb8,0x00,0x08,0xe7
495,0x84,0x8b,0xc2,0x8e,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0x8b,0xd0,0xc1,0xe0,0x03
496,0x83,0x16,0x88,0x34,0x00,0xff,0x06,0x7c,0x34,0x26,0x83,0x3e,0x06,0x00,0x0a,0x75
497,0x21,0x8b,0xc2,0x25,0x40,0x18,0x3d,0x40,0x00,0x74,0x0c,0x3d,0x00,0x10,0x75,0x12
498,0x26,0xfe,0x0e,0x0a,0x00,0x74,0x0b,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x03,0xe9
499,0x5a,0x06,0x8c,0xc0,0x26,0x8e,0x06,0x02,0x00,0x26,0x83,0x0e,0x08,0x00,0x20,0x26
500,0xa3,0x12,0x00,0x26,0xa3,0x10,0x00,0xc3,0xff,0x06,0xc4,0x33,0xe5,0x0c,0xa9,0x01
501,0x00,0x75,0x01,0xc3,0xa9,0xf0,0x07,0x74,0x01,0xc3,0xff,0x06,0xd4,0x33,0xe5,0x00
502,0x0d,0x18,0x00,0xe7,0x00,0xc3,0xff,0x06,0xca,0x33,0x80,0x3e,0xa0,0x36,0x08,0x75
503,0x14,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26,0x81
504,0x0e,0x08,0x00,0x00,0x08,0xe5,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe5,0x0c,0x50,0xe5
505,0x80,0x25,0x00,0x07,0xa3,0xe4,0x3a,0xe5,0x8c,0x25,0x00,0x80,0xa3,0xe2,0x3a,0x58
506,0xa9,0x02,0x00,0x75,0x25,0x83,0x3e,0xe2,0x3a,0x00,0x75,0x1e,0x83,0x3e,0xe4,0x3a
507,0x00,0x75,0x17,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe8,0x6a,0x01
508,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xe9,0x21,0x00,0xe8,0x1a,0x06,0x80,0x3e,0xe8
509,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x0d,0x00,0xc6,0x06
510,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0x80,0x3e,0x9f,0x36,0x06
511,0x75,0x05,0x83,0x0e,0x99,0x36,0x40,0xb8,0x00,0x01,0xe9,0x09,0x01,0xff,0x06,0xcc
512,0x33,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xff,0x06,0xc6,0x34
513,0xe9,0x1e,0x00,0xff,0x06,0xce,0x33,0xff,0x06,0x95,0x37,0x81,0x26,0xaf,0x36,0xff
514,0xef,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x08,0x00,0xff,0x06,0xd0,0x33,0xff,0x06,0x7a
515,0x34,0xff,0x06,0xd2,0x33,0xd1,0xe6,0x8e,0x06,0x30,0x34,0x2e,0x8b,0x84,0xc0,0x1c
516,0x26,0x09,0x06,0x08,0x00,0x2e,0x8b,0x84,0xc2,0x1c,0x09,0x06,0x66,0x37,0xc3,0xe5
517,0x0c,0xa9,0x80,0x00,0x74,0x56,0x50,0xe8,0xf0,0x00,0x58,0xa9,0x00,0x01,0x75,0x07
518,0xff,0x06,0xc6,0x33,0xe9,0x08,0x00,0xff,0x06,0x78,0x34,0xff,0x06,0xc8,0x33,0xe5
519,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe8,0x6e,0x05,0xba,0x10,0x01,0xed,0x80,0x3e,0xe8
520,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x1d,0x00,0xc6,0x06
521,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xe9,0x0d,0x00,0xc6,0x06
522,0xe8,0xff,0x03,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xc3,0xa9,0x01,0x00,0x74
523,0x1c,0xe8,0x2c,0x00,0x83,0x3e,0xe0,0x3a,0x00,0x74,0x0f,0x06,0x8e,0x06,0x38,0x34
524,0xe8,0xc9,0x04,0xc7,0x06,0xe0,0x3a,0x00,0x00,0x07,0xe9,0x5d,0x00,0x8b,0xd0,0x8e
525,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0xe8,0x06,0x00,0x68,0x69,0x1d,0xe9,0x4a,0x00
526,0xa9,0x00,0x04,0x74,0x0a,0xb8,0x00,0x04,0xff,0x06,0xd8,0x33,0xe9,0x17,0x00,0xa9
527,0x00,0x01,0x74,0x0a,0xff,0x06,0x39,0x37,0xb8,0x00,0x01,0xe9,0x08,0x00,0xa9,0x10
528,0x00,0xb8,0x10,0x00,0x74,0x1d,0x09,0x06,0x66,0x37,0x8c,0xc0,0x8e,0x06,0x30,0x34
529,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01
530,0x8e,0xc0,0xc3,0xff,0x06,0xc2,0x33,0xe9,0xf8,0xff,0xe5,0x00,0x0d,0x18,0x00,0xe7
531,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0xc3,0x58,0xe9,0x43,0xfd,0xe5,0x08,0x0d
532,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe9,0xe0,0xff,0xe5,0x0e,0xa9,0x00,0x08,0x75
533,0x01,0xc3,0xe9,0xf5,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
534,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb8
535,0x33,0xe5,0x48,0x06,0x53,0x57,0xff,0x16,0x4e,0x37,0x5f,0x5b,0x83,0x3e,0x80,0x01
536,0xff,0x74,0x58,0x8e,0x06,0x80,0x01,0x26,0xff,0x0e,0x08,0x00,0x75,0x4d,0x26,0xa1
537,0x00,0x00,0xa3,0x80,0x01,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8c,0xc0,0x26,0x8e
538,0x06,0x02,0x00,0x26,0x81,0x0e,0x08,0x00,0x80,0x00,0x8b,0xd0,0x26,0x87,0x06,0x1a
539,0x00,0x26,0x83,0x3e,0x18,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x89,0x16,0x00,0x00
540,0xe9,0x05,0x00,0x26,0x89,0x16,0x18,0x00,0x83,0x3e,0x80,0x01,0xff,0x74,0x0c,0x8e
541,0x06,0x80,0x01,0x26,0x83,0x3e,0x08,0x00,0x00,0x74,0xb3,0x07,0xe9,0x3e,0xf7,0xe5
542,0x4c,0x90,0xe5,0x02,0xa9,0x00,0x20,0x74,0x0d,0x25,0xff,0xdf,0x0d,0x01,0x00,0xe7
543,0x02,0x0d,0x00,0x20,0xe7,0x02,0xe5,0x0a,0x8b,0xd8,0xa3,0xf4,0x33,0x25,0xc3,0x57
544,0x0d,0x00,0x10,0xe7,0x0a,0xf7,0x06,0x9b,0x36,0x00,0x80,0x74,0x37,0xf7,0xc3,0x00
545,0x80,0x74,0x06,0xf7,0xc3,0x00,0x08,0x74,0x5d,0x81,0x26,0xc2,0x34,0x7f,0xff,0xc7
546,0x06,0x35,0x37,0x05,0x00,0xb8,0x80,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0xff,0x7f
547,0xc7,0x06,0x0f,0x37,0x04,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06
548,0x0f,0x37,0x03,0x00,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x2a,0xf7,0xc3,0x00,0x08
549,0x74,0x24,0x80,0x3e,0x9d,0x36,0x06,0x7c,0x1d,0xff,0x06,0x94,0x34,0x83,0x0e,0x66
550,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26
551,0x81,0x0e,0x08,0x00,0x00,0x01,0xf7,0xc3,0x00,0x20,0x75,0x3b,0xf7,0x06,0x9a,0x37
552,0x80,0x00,0x74,0x0b,0xff,0x06,0x89,0x37,0x33,0xc0,0xe7,0x0e,0xe9,0x04,0x00,0xff
553,0x06,0x3b,0x37,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x1c,0x80,0x26,0x9e,0x36,0xff
554,0x75,0x15,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26
555,0x81,0x0e,0x08,0x00,0x00,0x08,0xc3,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
556,0x02,0x23,0x02,0x23,0x02,0x23,0x02,0x23,0x03,0x23,0xdd,0x22,0x02,0x23,0xfd,0x21
557,0x02,0x23,0xa4,0x24,0xf3,0x24,0x02,0x23,0x8d,0x22,0x7a,0x23,0x02,0x23,0x97,0x24
558,0x1b,0x24,0x75,0x24,0x02,0x23,0x02,0x23,0x8e,0x25,0xfb,0x8e,0x06,0x7e,0x01,0xfb
559,0x26,0x83,0x3e,0x00,0x00,0xff,0x74,0xf2,0x26,0x8e,0x06,0x00,0x00,0xfa,0x26,0x8b
560,0x1e,0x08,0x00,0x26,0x23,0x1e,0x0a,0x00,0x74,0xe5,0x8c,0xc0,0x8e,0xd0,0x26,0x8b
561,0x26,0x02,0x00,0x8c,0x16,0xf2,0x33,0x22,0xff,0x75,0x6a,0x26,0xa1,0x1c,0x00,0x8a
562,0xe3,0x8a,0xdc,0x22,0xd8,0x75,0x0d,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0xf2,0xb0
563,0x80,0xe9,0xed,0xff,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0x02,0xb0,0x80,0x32,0xe4
564,0x26,0xa3,0x1c,0x00,0xf7,0xc3,0x08,0x00,0x75,0x47,0x2e,0x8a,0x9f,0xc5,0x25,0x2e
565,0x8b,0xbf,0xc5,0x26,0x80,0xc3,0x10,0x26,0x8e,0x1d,0x26,0x8c,0x1e,0x06,0x00,0x8b
566,0x16,0x00,0x00,0xc7,0x06,0x00,0x00,0xff,0xff,0x26,0x89,0x15,0x83,0xfa,0xff,0x75
567,0x0a,0x2e,0x8b,0x97,0xcd,0x26,0x26,0x21,0x16,0x08,0x00,0x33,0xc0,0x8e,0xd8,0x26
568,0x89,0x1e,0x04,0x00,0xc3,0x8a,0xdf,0xb7,0x00,0x2e,0x8a,0x9f,0xc5,0x25,0xe9,0xe0
569,0xff,0x26,0x83,0x26,0x08,0x00,0xf7,0x83,0xc3,0x10,0xe9,0xde,0xff,0x60,0x06,0x1e
570,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x8b,0x0e,0x34,0x34,0x39,0x0e
571,0xf2,0x33,0x74,0x0e,0x26,0x81,0x0e,0x0a,0x00,0x00,0x02,0x26,0x81,0x0e,0x08,0x00
572,0x00,0x02,0x26,0x89,0x26,0x02,0x00,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00
573,0x36,0x89,0x26,0x02,0x00,0x36,0x89,0x1e,0x20,0x00,0x36,0xc7,0x06,0x08,0x00,0x00
574,0x00,0xb9,0x04,0x00,0xbe,0x00,0x00,0x2e,0x8b,0xbc,0xc5,0x26,0x36,0xc7,0x05,0xff
575,0xff,0x36,0xc7,0x45,0x02,0xff,0xff,0x83,0xc6,0x02,0xe2,0xeb,0x8e,0x06,0x7e,0x01
576,0x36,0x8b,0x0e,0x22,0x00,0x8c,0xc0,0x26,0x83,0x3e,0x00,0x00,0xff,0x26,0x8e,0x06
577,0x00,0x00,0x74,0x07,0x26,0x3b,0x0e,0x22,0x00,0x7d,0xea,0x36,0x8c,0x06,0x00,0x00
578,0x8e,0xc0,0x26,0x8c,0x16,0x00,0x00,0xfb,0x36,0xff,0x2e,0x1e,0x00,0x06,0x1e,0x68
579,0x8b,0x25,0x6a,0x00,0x1f,0x26,0x09,0x36,0x08,0x00,0xf7,0xc6,0x00,0xff,0x74,0x01
580,0xc3,0x56,0x52,0x2e,0x8b,0xb4,0xc5,0x25,0x81,0xe6,0xff,0x00,0x2e,0x8b,0xb4,0xc5
581,0x26,0x8c,0xc2,0x8e,0xc0,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8e,0xc2,0x26,0x83
582,0x3c,0xff,0x74,0x0f,0x8b,0xd0,0x26,0x87,0x54,0x02,0x8e,0xc2,0x26,0xa3,0x00,0x00
583,0xe9,0x07,0x00,0x26,0x89,0x44,0x02,0x26,0x89,0x04,0x5a,0x5e,0xc3,0x06,0x1e,0x68
584,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x26,0xa3,0x0a,0x00,0x26,0x89,0x26
585,0x02,0x00,0xa1,0x34,0x34,0x8e,0xd0,0x8d,0x26,0x80,0x00,0x8c,0x16,0xf2,0x33,0xe9
586,0x4d,0xfe,0xcf,0x50,0x1e,0x52,0x53,0x33,0xc0,0x8e,0xd8,0x26,0x83,0x3e,0x04,0x00
587,0xff,0x26,0xc7,0x06,0x04,0x00,0x00,0x00,0x74,0x03,0xe9,0x1a,0x00,0x83,0x3e,0xe6
588,0x3a,0x02,0x76,0x13,0xff,0x06,0xd6,0x33,0x8c,0xc0,0x8e,0x06,0x32,0x34,0xbe,0x40
589,0x00,0x68,0x3a,0x23,0xe9,0x5e,0xff,0xe8,0x84,0xf8,0x5b,0x5a,0x1f,0x58,0xcf,0xe8
590,0xe1,0x00,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0x8a,0x1e,0x29,0x00,0x88,0x1e,0x1b
591,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c,0x26,0xa1
592,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x80,0xfb,0x08,0x74,0x09,0x0d,0x18,0xac,0xe7,0x00
593,0x07,0x1f,0x58,0xcf,0x0d,0x18,0x00,0xe9,0xf4,0xff,0x50,0x1e,0x06,0x33,0xc0,0x8e
594,0xd8,0x83,0x3e,0xa1,0x36,0x00,0x75,0xb7,0x26,0x8b,0x36,0x06,0x00,0x2e,0xff,0x94
595,0xdc,0x23,0x07,0x1f,0x58,0xcf,0xe8,0x8a,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00
596,0xe8,0x49,0x00,0xc3,0x53,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x2d,0xe5,0x8c,0x25
597,0x00,0x70,0x8b,0xd8,0xe5,0x8c,0x25,0x00,0x70,0x3b,0xc3,0x74,0x05,0x8b,0xd8,0xe9
598,0xf2,0xff,0x3d,0x00,0x30,0x75,0x10,0xe5,0x02,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06
599,0xe0,0x3a,0xff,0xff,0xe9,0x03,0x00,0xe8,0x12,0x00,0x5b,0xc3,0xa3,0x23,0x96,0x23
600,0xa4,0x23,0xa4,0x23,0x96,0x23,0xa4,0x23,0x96,0x23,0x96,0x23,0x26,0xa0,0x29,0x00
601,0xa2,0x1b,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c
602,0x26,0xa1,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x25,0xff,0x53,0x26,0x8b,0x36,0x06,0x00
603,0x83,0xe6,0x0e,0x2e,0x0b,0x84,0xad,0x25,0xe7,0x00,0xc3,0x06,0x1e,0x68,0x8b,0x25
604,0x6a,0x00,0x1f,0x83,0x0e,0xef,0x34,0x20,0x83,0x0e,0x9b,0x36,0x08,0xe5,0x00,0x25
605,0xef,0xff,0x0d,0x08,0x00,0xe7,0x00,0xe5,0x00,0xa9,0x10,0x00,0x75,0x01,0xc3,0xe5
606,0x00,0xa9,0x10,0x00,0x75,0xf9,0xc3,0x50,0x53,0x51,0x56,0x06,0x1e,0x33,0xc0,0x8e
607,0xd8,0xb8,0x05,0x00,0xe7,0x84,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08
608,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0x1f,0x07
609,0x5e,0x59,0x5b,0x58,0xc3,0x50,0x1e,0x33,0xc0,0x8e,0xd8,0xc7,0x06,0xef,0x34,0x00
610,0x00,0x83,0x26,0x9b,0x36,0xf7,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d
611,0x11,0x00,0xe7,0x02,0x1f,0x58,0xcf,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f
612,0xe8,0x16,0xf5,0xc3,0x06,0x1e,0x68,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x26,0x83
613,0x3e,0x0a,0x00,0x00,0x74,0x03,0xe8,0x43,0x00,0x26,0xc7,0x06,0x0a,0x00,0xff,0xff
614,0x26,0x8b,0x16,0x06,0x00,0x8e,0x1e,0x8e,0x01,0x8c,0xd8,0x8b,0xca,0x83,0x3e,0x00
615,0x00,0xff,0x8e,0x1e,0x00,0x00,0x74,0x0a,0x2b,0x16,0x08,0x00,0x73,0xeb,0x29,0x0e
616,0x08,0x00,0x26,0x89,0x0e,0x08,0x00,0x26,0x8c,0x1e,0x00,0x00,0x8e,0xd8,0x8c,0x06
617,0x00,0x00,0xc3,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x8b,0xc8
618,0x8e,0x1e,0x8e,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x8c,0xd8,0x83,0x3e,0x00
619,0x00,0xff,0x74,0x25,0x3b,0x0e,0x00,0x00,0x8e,0x1e,0x00,0x00,0x75,0xed,0x8e,0xd8
620,0x26,0xa1,0x00,0x00,0xa3,0x00,0x00,0x3d,0xff,0xff,0x74,0x56,0x8e,0xd8,0x26,0xa1
621,0x08,0x00,0x01,0x06,0x08,0x00,0xe9,0x49,0x00,0x26,0x8e,0x1e,0x02,0x00,0xbe,0x18
622,0x00,0x83,0x3c,0xff,0x74,0x3c,0x39,0x0c,0x74,0x19,0x8e,0x1c,0xbe,0x00,0x00,0x83
623,0x3e,0x00,0x00,0xff,0x74,0x2c,0x39,0x0e,0x00,0x00,0x74,0x07,0x8e,0x1e,0x00,0x00
624,0xe9,0xec,0xff,0x26,0xa1,0x00,0x00,0x89,0x04,0x33,0xc9,0x8e,0xd9,0x3d,0xff,0xff
625,0x75,0x10,0x83,0xfe,0x18,0x75,0x0b,0x26,0x8e,0x1e,0x02,0x00,0x81,0x26,0x08,0x00
626,0x7f,0xff,0x33,0xc0,0x8e,0xd8,0xc3,0x1f,0x07,0x61,0xcf,0x1f,0x07,0xcf,0x60,0x06
627,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75
628,0xf6,0xb9,0x08,0x00,0xe5,0x58,0xe7,0x5a,0x23,0xc0,0xe0,0xf8,0xc3,0x00,0x00,0x00
629,0x00,0x00,0x00,0x00,0x00,0x00,0xac,0x00,0x00,0x00,0xa8,0x00,0x8c,0x02,0x04,0x00
630,0x00,0x08,0x10,0x20,0x00,0xff,0x0e,0x0c,0x0c,0x0a,0x0a,0x0a,0x0a,0x08,0x08,0x08
631,0x08,0x08,0x08,0x08,0x08,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06
632,0x06,0x06,0x06,0x06,0x06,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04
633,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04
634,0x04,0x04,0x04,0x04,0x04,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
635,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
636,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
637,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02
638,0x02,0x02,0x02,0x02,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
639,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
640,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
641,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
642,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
643,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
644,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
645,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
646,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x14,0x00,0x10,0x00,0x0c,0x00,0xff,0x7f,0xff
647,0xbf,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0x7f,0xff,0xbf
648,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0xff,0x00,0x00,0x00
649,0x80,0x3e,0xe2,0x34,0x01,0x76,0x03,0xe9,0xa5,0x00,0xb8,0x00,0x00,0xe7,0x4e,0xb9
650,0x28,0x00,0xe2,0xfe,0xc6,0x06,0x45,0x37,0x02,0xbf,0x3f,0x28,0x2e,0x8b,0x45,0x08
651,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x1d,0xc7,0x06,0xb3,0x36,0x40,0x11
652,0xc7,0x06,0xb1,0x36,0x27,0x00,0xc7,0x06,0x46,0x37,0x02,0x00,0xc7,0x06,0x48,0x37
653,0x64,0x00,0xf7,0x06,0xb5,0x36,0x02,0x00,0x75,0x1c,0x2e,0x0b,0x5d,0x02,0x81,0x26
654,0xb3,0x36,0xff,0xfe,0xc7,0x06,0xb1,0x36,0x9c,0x00,0xc7,0x06,0x46,0x37,0x08,0x00
655,0xc7,0x06,0x48,0x37,0x90,0x01,0x89,0x1e,0xb7,0x36,0x89,0x1e,0xfe,0x33,0xbe,0x20
656,0x00,0x8b,0xc3,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x45,0x04,0xe7,0x4e
657,0xb9,0x28,0x00,0xe2,0xfe,0xe5,0x4e,0x8b,0xcb,0x2e,0x23,0x45,0x06,0x2e,0x23,0x4d
658,0x06,0x3a,0xc1,0x74,0x36,0x4e,0x75,0xd9,0x80,0x3e,0x45,0x37,0x00,0x74,0x0b,0xc6
659,0x06,0x45,0x37,0x00,0xbf,0x2f,0x28,0xe9,0x72,0xff,0xc6,0x06,0x45,0x37,0x01,0xf7
660,0x06,0xb5,0x36,0x02,0x00,0x74,0x14,0xe5,0xce,0x25,0xfd,0xff,0xe7,0xce,0xe8,0x43
661,0x00,0xe5,0xce,0x0d,0x02,0x00,0xe7,0xce,0xe8,0x39,0x00,0x80,0x3e,0xe2,0x34,0x01
662,0x76,0x01,0xc3,0xb8,0xea,0x05,0xe7,0x8c,0xfa,0xe8,0x12,0xf4,0xfb,0x8d,0x06,0xd0
663,0x39,0x8b,0xd8,0xc1,0xe8,0x04,0xa3,0x38,0x34,0x8e,0xc0,0xa1,0x30,0x34,0x26,0xa3
664,0x02,0x00,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x83,0xc3,0x18,0xd1,0xeb,0x26,0x89
665,0x1e,0x08,0x00,0xc3,0xe5,0x02,0x0d,0x00,0x40,0xe7,0x02,0xe5,0x00,0x0d,0x04,0x00
666,0xe7,0x00,0xb8,0x00,0x00,0xe7,0x0a,0xe5,0x0a,0xa9,0x00,0x80,0x75,0x14,0xe5,0x08
667,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x0a,0x0d,0x00,0x08,0xb9,0x05,0x00,0xe7,0x0a,0xe2
668,0xfc,0xc3,0xe5,0x08,0x0d,0x00,0x10,0xb9,0x05,0x00,0xe7,0x08,0xe2,0xfc,0xc3,0x04
669,0x0c,0x20,0x00,0x01,0x0c,0x7e,0xff,0x00,0x0c,0x02,0x00,0x10,0x00,0x40,0x00,0x0c
670,0xc6,0x01,0x00,0x00,0xc0,0xf7,0xff,0x00,0xc0,0x02,0x00,0x10,0x00,0x40,0x00,0x00
671,0x33,0xc0,0x8e,0xd8,0x8d,0x3e,0x72,0x49,0x8d,0x36,0xb0,0x37,0xb9,0x14,0x00,0x8b
672,0x1e,0x30,0x34,0x89,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05
673,0x89,0x44,0x04,0x83,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xc6,0x06,0x9e,0x36,0x0e
674,0xe8,0xfd,0x26,0x68,0x83,0x28,0xa1,0xaa,0x02,0xcd,0x35,0x83,0x3e,0xa1,0x36,0x00
675,0x74,0x03,0xe9,0x3b,0x27,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e
676,0xff,0xa4,0x2e,0x30,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x01,0x00,0xc6
677,0x06,0xca,0x34,0x01,0xe9,0x7d,0x19,0x80,0x3e,0xa0,0x36,0x08,0x74,0xe6,0x80,0x26
678,0x9e,0x36,0xff,0x75,0x1a,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x12,0xf7,0x06,0x9b
679,0x36,0x03,0x00,0x75,0x0a,0x83,0x0e,0x66,0x37,0x10,0xc6,0x06,0xa0,0x36,0x08,0xe9
680,0xfb,0x01,0x80,0x3e,0x9e,0x36,0x02,0x75,0xce,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xec
681,0x01,0xc3,0xe9,0xe8,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x26,0xff,0x26,0x04
682,0x00,0xa1,0xd1,0x36,0x26,0x39,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39
683,0x06,0x1c,0x00,0x75,0x18,0xa1,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26
684,0xf7,0x06,0x0c,0x00,0x40,0x00,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf
685,0x36,0x00,0x10,0xa1,0xaf,0x36,0xe7,0x06,0x80,0x3e,0x9d,0x36,0x02,0x75,0x06,0xcd
686,0x34,0xe9,0xa2,0x1a,0xc3,0xf7,0x06,0x9b,0x36,0x10,0x00,0x75,0x54,0x26,0xf6,0x06
687,0x0a,0x00,0xff,0x75,0x4c,0x26,0xa0,0x19,0x00,0x24,0xc0,0x3c,0x40,0x75,0x11,0x80
688,0x3e,0x95,0x36,0x00,0x74,0x3b,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0xe9,0x31,0x00
689,0xe8,0xf1,0x04,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0x2f,0x8b,0xd8,0xb8,0x7d,0x03
690,0xcd,0x3a,0x8b,0xc3,0xc6,0x06,0xa0,0x36,0x06,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75
691,0x05,0xc6,0x06,0xa0,0x36,0x04,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83,0x26,0x9b,0x36
692,0xfc,0xe9,0x23,0x01,0xe8,0x87,0x1d,0xe9,0x33,0x01,0x50,0x26,0xa1,0x0c,0x00,0x25
693,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x84,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9
694,0x7c,0x00,0x83,0x3e,0xe8,0x3a,0x04,0x74,0x75,0x83,0x3e,0xe8,0x3a,0x02,0x74,0x6e
695,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00
696,0x80,0x74,0x35,0x26,0x80,0x3e,0x29,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36
697,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x74,0x45
698,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1
699,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b
700,0x26,0x80,0x3e,0x19,0x00,0x00,0x74,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10
701,0x00,0x74,0x12,0x26,0xa0,0x28,0x00,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7
702,0x06,0x04,0x00,0xff,0xff,0x58,0x23,0xc0,0x74,0x03,0xe9,0x57,0xff,0x81,0x26,0x9b
703,0x36,0xff,0xfe,0x83,0xfe,0x06,0x7f,0x24,0x26,0xa1,0x20,0x00,0x3b,0x06,0xd1,0x36
704,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10,0x26,0xa1,0x24,0x00
705,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01,0x26,0xa1,0x20,0x00
706,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba,0x34,0x26,0xa1,0x24
707,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1,0xe6,0x80,0xfc,0x09
708,0x74,0x03,0xe8,0xaa,0x1c,0x8b,0xc6,0x2e,0xff,0xa4,0x30,0x49,0x26,0xa1,0x0c,0x00
709,0x3d,0xff,0x7f,0x74,0x0f,0x26,0xff,0x26,0x04,0x00,0x8e,0x06,0x38,0x34,0xe8,0x36
710,0x06,0xcd,0x50,0xc3,0xe9,0x16,0x00,0xcd,0x34,0xe9,0x11,0x00,0xcd,0x34,0x89,0x36
711,0x3d,0x37,0xa1,0x9d,0x36,0xa3,0x3f,0x37,0xc6,0x06,0xa0,0x36,0x0c,0xe8,0x8e,0x00
712,0xa1,0x9f,0x36,0x22,0xe4,0x75,0x32,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x2a,0xf6
713,0x06,0x9d,0x36,0x80,0x74,0x07,0x88,0x26,0x9e,0x36,0xe9,0x31,0x00,0x3a,0x06,0x9d
714,0x36,0xa3,0x9d,0x36,0x74,0x28,0x8b,0xf0,0x2e,0xff,0xa4,0x0d,0x2b,0x44,0x29,0xee
715,0x42,0x19,0x44,0xcd,0x44,0x2f,0x45,0x5a,0x45,0x3a,0x26,0x9e,0x36,0x75,0x01,0xc3
716,0x32,0xc0,0x86,0xc4,0x8b,0xf0,0xa2,0x9e,0x36,0x2e,0xff,0xa4,0x20,0x49,0x8b,0x2e
717,0x99,0x36,0x23,0xed,0x75,0x01,0xc3,0xbf,0x01,0x00,0xbe,0x00,0x00,0x85,0xfd,0x75
718,0x1a,0x46,0xd1,0xe7,0xe9,0xf6,0xff,0x2a,0x00,0x29,0x00,0x28,0x00,0x27,0x00,0x25
719,0x00,0x05,0x00,0x07,0x00,0x26,0x00,0x06,0x00,0x20,0x00,0xf7,0xd7,0x21,0x3e,0x99
720,0x36,0xd1,0xe6,0x2e,0x8b,0xb4,0x47,0x2b,0xe9,0x4f,0xff,0xe9,0x56,0xff,0x80,0x26
721,0x9e,0x36,0xff,0x75,0x17,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x0f,0xf6,0x06,0x9d
722,0x36,0x80,0x74,0x08,0xf7,0x06,0x66,0x37,0xff,0xff,0x75,0x07,0xc7,0x06,0x66,0x37
723,0x00,0x00,0xc3,0xf7,0x06,0x41,0x37,0x01,0x00,0x75,0x0b,0xb8,0x7f,0x03,0xcd,0x39
724,0xc7,0x06,0x41,0x37,0x01,0x00,0x33,0xf6,0xb8,0x00,0x40,0x85,0x06,0x66,0x37,0x74
725,0x21,0x80,0xbc,0x54,0x37,0xff,0x74,0x04,0xfe,0x84,0x54,0x37,0x80,0xbc,0x96,0x34
726,0xff,0x74,0x04,0xfe,0x84,0x96,0x34,0x31,0x06,0x66,0x37,0x83,0x3e,0x66,0x37,0x00
727,0x74,0x05,0x46,0xd1,0xe8,0x73,0xd4,0xc3,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b
728,0xa9,0x00,0x10,0x75,0x09,0x8b,0x1e,0x43,0x37,0xff,0xe3,0xe9,0xd7,0x00,0xc7,0x06
729,0x35,0x37,0x05,0x00,0xc7,0x06,0x43,0x37,0x1e,0x2c,0xf7,0x06,0xf4,0x33,0x00,0x08
730,0x74,0x06,0xc7,0x06,0x43,0x37,0x10,0x2c,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xcd,0xfe
731,0xa9,0x00,0x08,0x74,0xd9,0xff,0x0e,0x35,0x37,0x75,0xed,0xe9,0x66,0x00,0xa9,0x00
732,0x08,0x75,0xcb,0xff,0x0e,0x35,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6
733,0x06,0x9d,0x36,0x80,0x74,0x48,0x81,0x0e,0x9b,0x36,0x00,0x80,0xf7,0x06,0x9b,0x36
734,0x01,0x00,0x74,0x1e,0xb8,0x7d,0x03,0xcd,0x3a,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83
735,0x26,0x9b,0x36,0xfe,0xc7,0x06,0x0f,0x37,0x02,0x00,0xc6,0x06,0xa0,0x36,0x04,0xe9
736,0x7b,0xfe,0x80,0x3e,0xa0,0x36,0x04,0x75,0x07,0x83,0x3e,0x0f,0x37,0x01,0x75,0x05
737,0xc6,0x06,0xa0,0x36,0x06,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x5f,0xfe,0xbe,0x02
738,0x00,0xe9,0x4a,0xfe,0x80,0x26,0x9e,0x36,0xff,0x75,0x3a,0xf6,0x06,0x9d,0x36,0x80
739,0x74,0x2d,0xf7,0x06,0x9b,0x36,0x00,0x20,0x75,0x2b,0xc6,0x06,0xa0,0x36,0x06,0xff
740,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a
741,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01,0xe9,0x06,0x00,0xbe
742,0x04,0x00,0xe9,0x09,0xfe,0x81,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06
743,0xe5,0x0a,0xa9,0x00,0x80,0x74,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36
744,0xe7,0x06,0xe9,0x09,0xff,0xe9,0xf5,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0x83,0x0e
745,0x99,0x36,0x02,0xe9,0xe7,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0x1d,0xf7,0x06,0x9b
746,0x36,0x00,0x40,0x75,0x05,0x83,0x0e,0x99,0x36,0x08,0x83,0x0e,0x99,0x36,0x20,0x81
747,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x85,0x03,0xcd,0x39,0xe9,0xc0,0xfd,0x80,0x3e,0x9e
748,0x36,0x06,0x74,0x07,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x34,0xf6,0x06,0x9d,0x36,0x80
749,0x75,0x06,0xbe,0x07,0x00,0xe9,0x96,0xfd,0xc6,0x06,0xa0,0x36,0x04,0x83,0x3e,0x0f
750,0x37,0x02,0x74,0x1b,0xc7,0x06,0x0f,0x37,0x04,0x00,0x80,0x3e,0x9e,0x36,0x06,0x75
751,0x0e,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xe9
752,0x7b,0xfd,0x80,0x3e,0x9d,0x36,0x04,0x75,0x12,0x81,0x0e,0xc2,0x34,0x00,0x40,0xff
753,0x06,0x92,0x34,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x62,0xfd,0xbe,0x05,0x00,0xe9,0x4d
754,0xfd,0xf6,0x06,0x9d,0x36,0x80,0x75,0x19,0x83,0x0e,0xc2,0x34,0x04,0xbe,0x06,0x00
755,0xe9,0x3b,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0xc5,0xff,0x06,0x31,0x37,0xe9,0x00
756,0x00,0x83,0x26,0xc2,0x34,0xbf,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x2f,0xfd,0xe5,0x0a
757,0x50,0x25,0xc3,0xbf,0xe7,0x0a,0x58,0x80,0x26,0x9e,0x36,0xff,0x75,0x0d,0xa9,0x00
758,0x40,0x75,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x12,0xfd,0xb8,0x83,0x03,0xcd,0x39
759,0xc3,0xb8,0x7c,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09,0xc7,0x06
760,0x33,0x37,0x02,0x00,0xe9,0xf6,0xfc,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9,0xed,0xfc
761,0xff,0x06,0x8e,0x34,0xe8,0xf7,0x19,0x83,0x0e,0xc2,0x34,0x08,0xbe,0x03,0x00,0xe9
762,0xcc,0xfc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x04,0x04,0x05
763,0x04,0x04,0x04,0x00,0x03,0x00,0x03,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
764,0x00,0x04,0x00,0x08,0x08,0x05,0x08,0x08,0x08,0x00,0x03,0x00,0x03,0x03,0x00,0x00
765,0x02,0x04,0x04,0x04,0x04,0x00,0x00,0x08,0x00,0x00,0x0a,0x14,0x00,0x00,0x1a,0x00
766,0x1c,0x00,0x1e,0x20,0x00,0x00,0x04,0x41,0x06,0x0b,0x08,0xc2,0xff,0xe7,0x04,0x03
767,0x06,0x04,0x04,0x05,0x04,0x06,0x04,0x87,0x04,0x03,0x06,0x04,0x04,0x85,0x4e,0xa2
768,0x04,0xcf,0x04,0xcd,0xc7,0x06,0xa2,0x37,0x00,0x00,0xc7,0x06,0xa6,0x37,0x00,0x00
769,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xf5,0x36,0x26,0xa1,0x22,0x00,0xa3,0xf7
770,0x36,0x26,0xa1,0x24,0x00,0xa3,0xf9,0x36,0xe8,0x3b,0x19,0x8b,0xf0,0x26,0x8b,0x0e
771,0x0e,0x00,0x2b,0xc8,0x83,0xe9,0x0e,0xb8,0x01,0x80,0x83,0xf9,0x04,0x7c,0x51,0x26
772,0x8a,0x54,0x28,0x88,0x16,0x1c,0x37,0x40,0x26,0x8b,0x6c,0x26,0x86,0xcd,0x3b,0xcd
773,0x86,0xcd,0x89,0x0e,0xa4,0x37,0x75,0x38,0x40,0x32,0xff,0x26,0x8a,0x5c,0x29,0x80
774,0xfb,0x15,0x77,0x25,0x80,0xfb,0x0a,0x74,0x20,0x80,0xfb,0x01,0x74,0x1b,0xb8,0x04
775,0x80,0x2e,0x3a,0x97,0x02,0x2e,0x74,0x07,0x2e,0x3a,0x97,0x18,0x2e,0x75,0x11,0x33
776,0xc0,0x80,0xfb,0x09,0x75,0x4f,0x8b,0xf3,0xc3,0x26,0xc7,0x06,0x04,0x00,0xff,0xff
777,0x50,0x52,0xa1,0xa4,0x37,0x86,0xc4,0x26,0x3b,0x06,0x26,0x00,0x7c,0x32,0x26,0x81
778,0x3e,0x26,0x00,0x00,0x04,0x7e,0x29,0x8d,0x74,0x2a,0x26,0x8b,0x14,0x22,0xd2,0x74
779,0x1f,0x80,0xe6,0xbf,0x80,0xfe,0x09,0x75,0x17,0xc7,0x06,0xa2,0x37,0x01,0x00,0x80
780,0xfa,0x04,0x75,0x0c,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34
781,0x5a,0x58,0xe9,0xb1,0xff,0xbd,0x72,0x37,0x2e,0x8a,0x87,0x2e,0x2e,0x22,0xc0,0x74
782,0x16,0x05,0x44,0x2e,0x8b,0xf8,0x2e,0x8b,0x05,0x3e,0x89,0x46,0x00,0x83,0xc5,0x02
783,0x83,0xc7,0x02,0x22,0xe4,0x7d,0xef,0x8d,0x74,0x2a,0x83,0xe9,0x04,0x75,0x03,0xe9
784,0xa1,0x00,0x26,0x8b,0x14,0x22,0xd2,0x75,0x03,0xe9,0x7c,0x00,0xc7,0x06,0xa6,0x37
785,0x01,0x00,0xbf,0x72,0x37,0x8b,0x05,0x83,0xc7,0x02,0x80,0xe6,0xbf,0x80,0xe4,0x3f
786,0x80,0xfe,0x09,0x75,0x22,0x80,0xfa,0x04,0x75,0x5e,0xc7,0x06,0xa2,0x37,0x01,0x00
787,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34,0x86,0xc4,0xc7,0x06
788,0xa6,0x37,0x00,0x00,0xe9,0x47,0x00,0x3b,0xfd,0x7e,0x15,0x26,0x8b,0x04,0xa8,0x40
789,0x74,0x06,0xb8,0x07,0x80,0xe9,0x38,0xff,0x32,0xc0,0x26,0x8b,0x04,0xe9,0x2e,0x00
790,0x3a,0xf4,0x75,0xb1,0xc7,0x45,0xfe,0x00,0x00,0x80,0xfe,0x22,0x75,0x0d,0x3a,0xd0
791,0x77,0x16,0xc7,0x06,0xa6,0x37,0x00,0x00,0xe9,0x13,0x00,0x3a,0xd0,0x75,0x09,0xc7
792,0x06,0xa6,0x37,0x00,0x00,0xe9,0x06,0x00,0xb8,0x05,0x80,0xe9,0x02,0xff,0x32,0xf6
793,0x03,0xf2,0x2b,0xca,0xb8,0x05,0x80,0x23,0xc9,0x76,0x03,0xe9,0x64,0xff,0x74,0x03
794,0xe9,0xed,0xfe,0x33,0xc0,0xbf,0x72,0x37,0x8b,0x15,0x47,0x47,0x3b,0xfd,0x7f,0x1b
795,0xf6,0xc6,0x80,0x74,0x16,0xf7,0x06,0xa6,0x37,0x01,0x00,0x74,0x06,0xb8,0x08,0x80
796,0xe9,0xc3,0xfe,0xf6,0xc6,0x40,0x74,0xe0,0xb8,0x07,0x80,0xe9,0xb8,0xfe,0x7d,0x42
797,0xa3,0x45,0x44,0x29,0x44,0x29,0xb7,0x28,0xe2,0x28,0xee,0x2b,0xf2,0x28,0xf5,0x28
798,0x01,0x29,0xac,0x2a,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x00,0x00
799,0x73,0x36,0x00,0x00,0x03,0x36,0xc5,0x35,0x83,0x35,0x45,0x35,0x07,0x35,0xd2,0x34
800,0x45,0x34,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
801,0x00,0x00,0xa6,0x38,0x00,0x00,0xe0,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
802,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
803,0xf2,0x33,0x00,0x00,0xa6,0x33,0x60,0x33,0xfd,0x32,0xbc,0x32,0x77,0x32,0x3c,0x32
804,0xfb,0x31,0x6a,0x31,0x0a,0x31,0xe0,0xe0,0x10,0x10,0x10,0xe0,0xe0,0xe0,0xe0,0x00
805,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
806,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,0x00,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0
807,0xe0,0x33,0xff,0x26,0xf6,0x06,0x1a,0x00,0x80,0x74,0x1b,0x26,0x80,0x26,0x1a,0x00
808,0x7f,0x26,0x8b,0x3e,0x26,0x00,0x83,0xe7,0x1f,0x74,0x0b,0x26,0x80,0x0e,0x20,0x00
809,0x80,0x26,0x01,0x3e,0x0e,0x00,0xc3,0x60,0x2e,0x8b,0x84,0xa6,0x30,0x26,0xa3,0x18
810,0x00,0xd1,0xe6,0x2e,0xff,0x94,0x50,0x30,0x61,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4
811,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x16,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26
812,0xc6,0x06,0x19,0x00,0x00,0xe8,0xbf,0x05,0xe8,0x98,0x05,0x26,0xc7,0x06,0x26,0x00
813,0x00,0x08,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x2a,0xbf,0x2a
814,0x00,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x2a,0xa1,0x93,0x37,0x33,0xdb,0xa9
815,0x40,0x00,0x75,0x02,0xb3,0x01,0xa9,0x00,0x10,0x74,0x02,0xb7,0x88,0xa9,0x00,0x08
816,0x74,0x03,0x80,0xcf,0x44,0x26,0x89,0x5d,0x02,0xc3,0x83,0x0e,0xc2,0x34,0x20,0x26
817,0xc7,0x06,0x04,0x00,0x6b,0x2b,0x26,0xc7,0x06,0x0e,0x00,0x30,0x00,0x26,0xc7,0x06
818,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00
819,0x00,0xe8,0x69,0x05,0xe8,0x2c,0x05,0x26,0xc7,0x06,0x26,0x00,0x00,0x22,0x26,0xc6
820,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x29,0xbf,0x2a,0x00,0x26,0xc6,0x05
821,0x08,0x26,0xc6,0x45,0x01,0x2d,0x8d,0x7d,0x02,0xbe,0x54,0x37,0xb9,0x03,0x00,0xf3
822,0xa5,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x2e,0x8d,0x7d,0x02,0xbe,0x5a,0x37
823,0xb9,0x03,0x00,0xf3,0xa5,0xe8,0xd4,0x05,0xe8,0x64,0x05,0xb9,0x06,0x00,0xbe,0x54
824,0x37,0x8d,0x2e,0x2c,0x00,0x26,0x8b,0x46,0x00,0x29,0x04,0x83,0xc6,0x02,0x83,0xc5
825,0x02,0x83,0xf9,0x04,0x75,0x02,0x45,0x45,0xe2,0xeb,0xc3,0x26,0xc7,0x06,0x04,0x00
826,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00
827,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xe4,0x04,0xe8,0xa7,0x04,0x26,0xc7,0x06,0x26
828,0x00,0x00,0x16,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x28,0xbf
829,0x2a,0x00,0xe8,0x5b,0x06,0xe8,0x74,0x05,0xe8,0x04,0x05,0xc3,0x26,0xc7,0x06,0x04
830,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1a,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
831,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xa3,0x04,0xe8,0x66,0x04,0x26,0xc7,0x06
832,0x26,0x00,0x00,0x0c,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x27
833,0xbf,0x2a,0x00,0xe8,0x21,0x05,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7
834,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a
835,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x4b,0x04,0xe8,0x24,0x04,0x26
836,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29
837,0x00,0x26,0xbf,0x2a,0x00,0xe8,0xf4,0x04,0xe8,0x84,0x04,0xc3,0x26,0xc7,0x06,0x04
838,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
839,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x0d,0x04,0xe8,0xe6,0x03,0x26,0xc7,0x06
840,0x26,0x00,0x00,0x26,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x25
841,0xbf,0x2a,0x00,0xe8,0xb6,0x04,0xe8,0x46,0x04,0xe8,0xfa,0x04,0xc3,0x26,0xc7,0x06
842,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x38,0x00,0xa1,0xa2,0x37,0x50,0x0b
843,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06
844,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x99,0x03,0xe8,0xa4,0xfd,0x26,0xc7,0x45
845,0x26,0x00,0x2a,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c
846,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x24,0x83,0xc7,0x2a
847,0xe8,0x29,0x04,0xe8,0xa0,0x04,0xe8,0x22,0x05,0xe8,0xf8,0x03,0xe8,0x09,0x04,0xc3
848,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x32,0x00,0x26,0xc7
849,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x45,0x03,0xe8,0x50
850,0xfd,0x26,0xc7,0x45,0x26,0x00,0x24,0xa1,0x1c,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45
851,0x28,0x26,0xc6,0x45,0x29,0x23,0x83,0xc7,0x2a,0xe8,0xe0,0x03,0xe8,0x6c,0x04,0xe8
852,0x8a,0x04,0xe8,0x9c,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06
853,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00
854,0x00,0xe8,0xff,0x02,0xe8,0x0a,0xfd,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c,0x37
855,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x22,0x83,0xc7,0x2a,0xe8
856,0x9a,0x03,0xe8,0xc7,0x03,0xe8,0x57,0x03,0xe8,0xf8,0x03,0xe8,0x78,0x04,0xe8,0x8a
857,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0x74,0x45,0x26,0xc7,0x06,0x0e,0x00,0x3e,0x00
858,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6
859,0x06,0x19,0x00,0x00,0xe8,0xfc,0x02,0xe8,0xa9,0x02,0x83,0x3e,0x8d,0x37,0x03,0x75
860,0x01,0x90,0x26,0xc7,0x06,0x26,0x00,0x00,0x30,0x26,0xc6,0x06,0x28,0x00,0x50,0x26
861,0xc6,0x06,0x29,0x00,0x20,0xbf,0x2a,0x00,0xe8,0xd0,0x03,0xe8,0x01,0x03,0xe8,0xb5
862,0x03,0xe8,0x9f,0x03,0xc3,0x26,0xc7,0x06,0x04,0x00,0x61,0x43,0xb9,0xf0,0x00,0x83
863,0xe9,0x02,0x26,0x89,0x0e,0x0e,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6
864,0x06,0x19,0x00,0x00,0x26,0xc7,0x06,0x1a,0x00,0x00,0x00,0x26,0xc7,0x06,0x1c,0x00
865,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x00,0xe8,0x47,0x02,0x83,0xe9,0x0e,0x86
866,0xcd,0x26,0x89,0x0e,0x26,0x00,0x86,0xcd,0x26,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6
867,0x06,0x29,0x00,0x08,0xbf,0x2a,0x00,0x83,0xe9,0x04,0x26,0x89,0x0d,0x26,0xc6,0x45
868,0x01,0x26,0x8d,0x7d,0x02,0x83,0xe9,0x02,0xbb,0x01,0x00,0xb8,0x30,0x30,0x4b,0x75
869,0x17,0xbb,0x0a,0x00,0x8a,0xc4,0x26,0x88,0x05,0xb0,0x31,0x80,0xc4,0x01,0x80,0xfc
870,0x3a,0x75,0x0a,0xb4,0x61,0xe9,0x05,0x00,0x26,0x88,0x05,0x04,0x01,0x47,0x49,0x75
871,0xdd,0xc3,0x26,0xc7,0x06,0x04,0x00,0x04,0x45,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00
872,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x01,0xe8,0xe5,0x01
873,0xe8,0xd0,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x04,0x26,0xc6,0x06,0x28,0x00,0x00
874,0x26,0xc6,0x06,0x29,0x00,0x07,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7
875,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19
876,0x00,0x06,0xe8,0x04,0x02,0xe8,0x9b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26
877,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x06,0xbf,0x2a,0x00,0xe8,0x6b
878,0x02,0xe8,0xfb,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e
879,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x05
880,0xe8,0xc6,0x01,0xe8,0x5d,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06
881,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x05,0xbf,0x2a,0x00,0xe8,0x2d,0x02,0xe8
882,0xbd,0x01,0xc3,0xff,0x06,0x82,0x34,0x26,0xc7,0x06,0x04,0x00,0x3d,0x41,0x26,0xc7
883,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0e,0x00,0x26,0xc6,0x06,0x19
884,0x00,0x04,0xe8,0x84,0x01,0xe8,0x1b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26
885,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x04,0xbf,0x2a,0x00,0xe8,0xeb
886,0x01,0xe8,0x7b,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7,0x06,0x0e
887,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19,0x00,0x03
888,0xe8,0x46,0x01,0xe8,0xdd,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06
889,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x03,0xbf,0x2a,0x00,0xe8,0xad,0x01,0xe8
890,0x3d,0x01,0xc3,0xff,0x06,0x84,0x34,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7
891,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19
892,0x00,0x02,0xe8,0x04,0x01,0xe8,0x9b,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x16,0x26
893,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x02,0xbf,0x2a,0x00,0x26,0xc6
894,0x05,0x04,0x26,0xc6,0x45,0x01,0x01,0xa1,0x0f,0x37,0x86,0xe0,0xf6,0x06,0x6f,0x37
895,0x01,0x75,0x0f,0x39,0x06,0xcc,0x34,0x74,0x09,0x8b,0xd8,0xb8,0x89,0x03,0xcd,0x39
896,0x8b,0xc3,0xa3,0xcc,0x34,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xe8,0x3d,0x01,0xe8
897,0xcd,0x00,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1c
898,0x00,0xa1,0xa2,0x37,0x50,0x0b,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x18,0x00
899,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x23,0x00
900,0xe8,0x2e,0xfa,0x26,0xc7,0x45,0x26,0x00,0x0e,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7
901,0x45,0x26,0x00,0x0a,0x26,0xc6,0x45,0x29,0x00,0x83,0xc7,0x2a,0xe8,0xbd,0x00,0xe8
902,0xff,0x00,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x20,0x00,0xf3
903,0xa5,0x59,0x5f,0x5e,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x1a
904,0x00,0xf3,0xa5,0x59,0x5f,0x5e,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7
905,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x10,0xc3,0x26,0xc7,0x06
906,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00
907,0x00,0x08,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00
908,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x02,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00
909,0x26,0xc7,0x06,0x1c,0x00,0xff,0xff,0x26,0xc7,0x06,0x1e,0x00,0xff,0xff,0xc3,0x26
910,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03
911,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x06,0xa1,0x0d,0x37
912,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01
913,0x07,0xa1,0x0b,0x37,0x26,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0xa1,0xa2,0x37,0x0b
914,0xc0,0x74,0x13,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x09,0xa1,0x03,0x37,0x26
915,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02
916,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06
917,0x26,0xc6,0x45,0x01,0x0b,0x8d,0x7d,0x02,0xbe,0xef,0x36,0xb9,0x02,0x00,0xf3,0xa5
918,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x20,0xa1,0x68,0x37,0x26,0x89,0x45
919,0x02,0xa1,0x6a,0x37,0x26,0x88,0x65,0x05,0xc1,0xe0,0x04,0x26,0x88,0x45,0x04,0x83
920,0xc7,0x06,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45,0x02
921,0x00,0x00,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x14,0x26,0xc6,0x45,0x01,0x22,0x8d
922,0x7d,0x02,0xbe,0x1f,0x37,0xb9,0x09,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x0c,0x26
923,0xc6,0x45,0x01,0x23,0x8d,0x7d,0x02,0x1e,0x0e,0x1f,0x8d,0x36,0x40,0x54,0xb9,0x03
924,0x00,0xf3,0xa5,0x33,0xc0,0xb9,0x02,0x00,0xf3,0xab,0x1f,0xc3,0x26,0xc6,0x05,0x08
925,0x26,0xc6,0x45,0x01,0x28,0x8d,0x7d,0x02,0xbe,0xd1,0x36,0xb9,0x03,0x00,0xf3,0xa5
926,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x29,0xa1,0xc2,0x34,0x86,0xe0,0x26
927,0x89,0x45,0x02,0xa1,0x9b,0x36,0x26,0x89,0x45,0x04,0x26,0x88,0x45,0x06,0x26,0x88
928,0x45,0x07,0x8d,0x7d,0x08,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x2b,0x8d
929,0x7d,0x02,0xbe,0xbb,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06,0x26
930,0xc6,0x45,0x01,0x2c,0x8d,0x7d,0x02,0xbe,0xe5,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3
931,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x30,0xa1,0x37,0x37,0x86,0xe0,0x26,0x89
932,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc7,0x06,0x0e,0x00,0x1e,0x00,0x26,0xc7,0x06
933,0x06,0x00,0x02,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x6c,0xfe,0xe8,0x03,0xfe
934,0x26,0xc7,0x06,0x26,0x00,0x00,0x10,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06
935,0x29,0x00,0x11,0xbf,0x2a,0x00,0xe8,0x35,0x00,0xe8,0x45,0x00,0xe8,0x55,0x00,0xc3
936,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6
937,0x06,0x19,0x00,0x00,0xe8,0x32,0xfe,0xe8,0xc9,0xfd,0x26,0xc7,0x06,0x26,0x00,0x00
938,0x04,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06,0x29,0x00,0x13,0xc3,0x26,0xc6
939,0x05,0x04,0x26,0xc6,0x45,0x01,0x0c,0x26,0xc7,0x45,0x02,0x00,0x01,0x83,0xc7,0x04
940,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x0e,0x26,0xc7,0x45,0x02,0x00,0x02
941,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45
942,0x02,0x00,0x00,0x83,0xc7,0x04,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
943,0xb3,0x39,0xc9,0x39,0x83,0x3a,0xb3,0x39,0xb3,0x39,0xb3,0x39,0x1c,0x3a,0x1c,0x3a
944,0xa3,0xb6,0x34,0xa1,0xe9,0x36,0xa3,0x11,0x37,0xa3,0xd2,0x34,0xa1,0xeb,0x36,0xa3
945,0x13,0x37,0xa3,0xd4,0x34,0xa1,0xed,0x36,0xa3,0x15,0x37,0xa3,0xd6,0x34,0xa1,0x01
946,0x37,0xa3,0xce,0x34,0xa1,0xf7,0x36,0xa3,0x17,0x37,0xa3,0xdc,0x34,0xa1,0xf9,0x36
947,0xa3,0x19,0x37,0xa3,0xde,0x34,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75,0x0c,0x33,0xc0
948,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x50,0x39,0xe9,0x0f,0x01,0xbe,0x07,0x00
949,0xe9,0x19,0xf1,0xf6,0x06,0x9d,0x36,0x80,0x74,0xf3,0xc6,0x06,0xa0,0x36,0x02,0xc6
950,0x06,0x6e,0x37,0x08,0xc6,0x06,0x70,0x37,0x02,0xb8,0x88,0x03,0xcd,0x39,0xf6,0x06
951,0x6f,0x37,0x01,0x75,0x4a,0xa1,0xd1,0x36,0x3a,0x06,0xe9,0x36,0x75,0x41,0x3a,0x26
952,0xea,0x36,0x75,0x3b,0xa1,0xd3,0x36,0x3a,0x06,0xeb,0x36,0x75,0x32,0x3a,0x26,0xec
953,0x36,0x75,0x2c,0xa1,0xd5,0x36,0x3a,0x06,0xed,0x36,0x75,0x23,0x3a,0x26,0xee,0x36
954,0x75,0x1d,0xc6,0x06,0x70,0x37,0x02,0xfe,0x0e,0x6e,0x37,0x75,0x0f,0xb8,0x88,0x03
955,0xcd,0x3a,0x83,0x0e,0x9b,0x36,0x12,0xc6,0x06,0xa0,0x36,0x0c,0xe9,0xa8,0xf0,0xa1
956,0x05,0x37,0x26,0x3b,0x06,0x20,0x00,0x75,0x40,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22
957,0x00,0x75,0x36,0xa1,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x2c,0xa0,0x9e,0x36
958,0x3c,0x02,0x75,0x08,0x26,0xf6,0x06,0x18,0x00,0x08,0x75,0x47,0xc6,0x06,0x6e,0x37
959,0x08,0xfe,0x0e,0x70,0x37,0x75,0x1c,0xc6,0x06,0x70,0x37,0x02,0xe5,0x02,0x0d,0x01
960,0x04,0x25,0xef,0xff,0xe7,0x02,0xe9,0x5e,0xf0,0xc6,0x06,0x70,0x37,0x02,0xc6,0x06
961,0x6e,0x37,0x08,0xe5,0x02,0x25,0xff,0xfb,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02
962,0xe9,0x44,0xf0,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x25,0x26,0xf6,0x06,0x18,0x00
963,0x08,0x75,0xed,0x81,0x26,0x9b,0x36,0x7f,0xff,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x84
964,0x03,0xcd,0x3a,0xc6,0x06,0xa0,0x36,0x06,0x83,0x26,0xc2,0x34,0xaf,0xe9,0x17,0xf0
965,0xa1,0x01,0x37,0x3a,0x26,0x0f,0x37,0x7f,0xc7,0xe9,0xf7,0xfe,0x83,0x26,0x9b,0x36
966,0xec,0xe8,0x2a,0x0d,0x81,0x0e,0x9b,0x36,0x80,0x00,0xbb,0xff,0x7f,0xcd,0x53,0xc6
967,0x06,0xa0,0x36,0x02,0xe9,0xf0,0xef,0x83,0x0e,0x9b,0x36,0x11,0xc6,0x06,0xa0,0x36
968,0x0c,0xe9,0xf9,0xef,0x44,0x3b,0x2c,0x3b,0xc7,0x2a,0x6b,0x3b,0x44,0x3b,0xc7,0x2a
969,0xc7,0x2a,0xc7,0x2a,0xa3,0xb6,0x34,0x81,0x0e,0xc2,0x34,0x00,0x20,0xf7,0x06,0x41
970,0x37,0x01,0x00,0x74,0x1b,0x8c,0xc3,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f,0x03
971,0xcd,0x3a,0x33,0xc0,0x8e,0xc0,0xbf,0x54,0x37,0xb9,0x06,0x00,0xf3,0xab,0x8e,0xc3
972,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0xe4,0x3a,0xf7,0x06,0x9b,0x36
973,0x00,0x01,0x75,0x21,0x83,0x26,0xc2,0x34,0xbf,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0x9b
974,0x36,0xe9,0x09,0x00,0xa1,0x9b,0x36,0x81,0x26,0x9b,0x36,0xff,0xdf,0xa9,0x00,0x20
975,0x75,0x06,0xe9,0x6e,0x00,0xe9,0x6f,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37
976,0x37,0x01,0x00,0xc6,0x06,0xca,0x34,0x01,0xe9,0x58,0x00,0x83,0x0e,0x9b,0x36,0x40
977,0xe8,0x58,0x00,0xa1,0x05,0x37,0x3b,0x06,0xe9,0x36,0x75,0x37,0xa1,0x07,0x37,0x3b
978,0x06,0xeb,0x36,0x75,0x2e,0xa1,0x09,0x37,0x3b,0x06,0xed,0x36,0x75,0x25,0xfe,0x0e
979,0x71,0x37,0x75,0x1c,0xb8,0x87,0x03,0xcd,0x3a,0x83,0x0e,0x99,0x36,0x10,0xa1,0x50
980,0x37,0xc7,0x06,0x50,0x37,0x00,0x00,0x09,0x06,0x99,0x36,0xc6,0x06,0xa0,0x36,0x08
981,0xe9,0x14,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x03,0x00,0xc6,0x06
982,0xca,0x34,0x03,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0xfc,0xee,0xa1,0xd1,0x36,0x26,0x3b
983,0x06,0x20,0x00,0x75,0x15,0xa1,0xd3,0x36,0x26,0x3b,0x06,0x22,0x00,0x75,0x12,0xa1
984,0xd5,0x36,0x26,0x3b,0x06,0x24,0x00,0x75,0x0f,0xc3,0x8d,0x36,0x20,0x00,0xe9,0x0b
985,0x00,0x8d,0x36,0x22,0x00,0xe9,0x04,0x00,0x8d,0x36,0x24,0x00,0x83,0xc4,0x02,0xf7
986,0x06,0xe6,0x34,0x01,0x00,0x74,0x15,0x26,0x3a,0x04,0x77,0x08,0x72,0x0e,0x26,0x3a
987,0x64,0x01,0x72,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xab,0xee,0xe8,0x7c,0x0a,0x8c
988,0xc0,0x3d,0xff,0xff,0x74,0x1b,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0xc7,0x06,0x04
989,0x00,0x49,0x3c,0x26,0xc7,0x06,0x06,0x00,0x0c,0x00,0xcd,0x50,0xb9,0x4e,0x00,0xe2
990,0xfe,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0x94,0xee,0xe9,0x7b,0xee,0x8f,0x3c,0x06,0x3d
991,0x06,0x3d,0x06,0x3d,0xd2,0x3c,0xea,0x3c,0x06,0x3d,0x06,0x3d,0xa3,0xb6,0x34,0x81
992,0x26,0xc2,0x34,0xaf,0xdf,0xc7,0x06,0x4c,0x37,0x00,0x00,0xb8,0x8a,0x03,0xcd,0x3a
993,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74,0x05,0xc6,0x06
994,0x9f,0x36,0x06,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x4c,0x3c,0xf7
995,0x06,0x9b,0x36,0x00,0x20,0x75,0x0e,0x81,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x8b,0x03
996,0xcd,0x3a,0xe9,0x54,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x03,0xe9,0x17,0xee
997,0xc7,0x06,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04
998,0x83,0x0e,0x50,0x37,0x04,0xf6,0x06,0x9d,0x36,0x80,0x75,0x2a,0xe8,0x1f,0x0b,0xe9
999,0x27,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x75,0xd3,0xc7,0x06,0x37,0x37,0x02,0x00
1000,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0xc6,0x06,0xa0,0x36,0x00,0xf6
1001,0x06,0x9d,0x36,0x80,0x74,0x03,0xe8,0xde,0x0a,0x81,0x26,0x9b,0x36,0x7c,0xff,0xbb
1002,0xff,0xff,0xcd,0x53,0xcd,0x54,0xe9,0xbe,0xed,0xa3,0xb6,0x34,0xe8,0xad,0x01,0xb8
1003,0x86,0x03,0xcd,0x39,0xc7,0x06,0x4c,0x37,0x00,0x00,0x81,0x26,0xc2,0x34,0xaf,0xdf
1004,0xf6,0x06,0x9d,0x36,0x80,0x74,0x34,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x56,0xf7
1005,0x06,0x9b,0x36,0x00,0x01,0x74,0x27,0xe8,0x35,0x01,0x72,0x1c,0xbe,0x00,0x40,0x85
1006,0x36,0xc2,0x34,0x75,0x08,0x09,0x36,0xc2,0x34,0xff,0x06,0x92,0x34,0xe8,0x8b,0x01
1007,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x6c,0xed,0xe9,0xb5,0x00,0xc7,0x06
1008,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0x83,0x0e
1009,0x50,0x37,0x04,0x80,0x3e,0x9e,0x36,0x08,0x74,0x03,0xe8,0x5a,0x0a,0xe8,0xef,0x00
1010,0x72,0xd6,0xe9,0xc8,0xff,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x12,0xc6,0x06,0xa0,0x36
1011,0x00,0xf7,0x06,0x9b,0x36,0x08,0x00,0x74,0x02,0xcd,0x54,0xe8,0x39,0x0a,0x81,0x26
1012,0x9b,0x36,0xff,0xbf,0xe8,0xc8,0x00,0x72,0xaf,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x9c
1013,0xff,0xf6,0x06,0x9e,0x36,0xff,0x75,0x58,0xa3,0xb6,0x34,0xe8,0xfe,0x00,0x81,0x26
1014,0xc2,0x34,0xff,0xbf,0xf6,0x06,0x9d,0x36,0x80,0x74,0x48,0xf7,0x06,0x9b,0x36,0x00
1015,0x20,0x74,0x22,0xf7,0x06,0x9b,0x36,0x00,0x40,0x75,0x08,0xe8,0x91,0x00,0x72,0x30
1016,0xe9,0x22,0x00,0x26,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x75,0x24,0x81,0x0e,0x66,0x37
1017,0x00,0x08,0xe9,0xd2,0xec,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe8,0x71,0x00,0x72,0x10
1018,0xb8,0x8b,0x03,0xcd,0x39,0xe8,0xd3,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00
1019,0xe9,0xb4,0xec,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74
1020,0x46,0xc6,0x06,0x9f,0x36,0x06,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x0c,0x80,0x3e
1021,0x9d,0x36,0x08,0x75,0x05,0xc6,0x06,0x9f,0x36,0x0a,0xe8,0x32,0x00,0x72,0xd1,0xe8
1022,0x99,0x00,0x80,0x3e,0x9d,0x36,0x08,0x75,0x13,0x81,0x0e,0x99,0x36,0x80,0x00,0xf7
1023,0x06,0x9b,0x36,0x00,0x20,0x75,0x08,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x68,0xec,0xc6
1024,0x06,0x9f,0x36,0x0a,0xe9,0x60,0xec,0xb8,0x86,0x03,0xcd,0x3a,0xe9,0x58,0xec,0x26
1025,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x74,0x08,0x81,0x26,0xc2,0x34,0xff,0xbf,0xf9,0xc3
1026,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x13,0x81,0x0e,0x66,0x37,0x00,0x08,0xe8,0x4a
1027,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xf9,0xc3,0x81,0x0e,0x9b,0x36,0x00
1028,0x40,0x80,0x26,0x6f,0x37,0xfe,0x81,0x26,0x9b,0x36,0x7f,0xff,0xc6,0x06,0xa0,0x36
1029,0x00,0xf8,0xc3,0x81,0x0e,0x99,0x36,0x00,0x01,0xe9,0x21,0xec,0x26,0xa1,0x20,0x00
1030,0xa3,0xfb,0x36,0xa3,0xaa,0x34,0x26,0xa1,0x22,0x00,0xa3,0xfd,0x36,0xa3,0xac,0x34
1031,0x26,0xa1,0x24,0x00,0xa3,0xff,0x36,0xa3,0xae,0x34,0xc3,0xa1,0x05,0x37,0x26,0x3b
1032,0x06,0x20,0x00,0x75,0x19,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22,0x00,0x75,0x0f,0xa1
1033,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x05,0xe8,0x02,0x00,0xf8,0xc3,0x51,0x1e
1034,0x06,0x8b,0xc7,0x8d,0x36,0x20,0x00,0xbf,0x05,0x37,0xb9,0x03,0x00,0x1e,0x06,0x1f
1035,0x07,0xf3,0xa5,0x8b,0xf8,0x8d,0x36,0x20,0x00,0xbf,0xa0,0x34,0xb9,0x03,0x00,0xf3
1036,0xa5,0x07,0x1f,0x59,0x8b,0xf8,0xa1,0x07,0x37,0xa3,0xa6,0x34,0xa1,0x09,0x37,0xa3
1037,0xa8,0x34,0xf9,0xc3,0xc6,0x06,0xb6,0x34,0x01,0xe9,0x8b,0xeb,0xe8,0x87,0x08,0x8b
1038,0xf0,0x05,0x12,0x00,0x26,0x29,0x06,0x0e,0x00,0x26,0x8b,0x44,0x2a,0x26,0x3a,0x06
1039,0x0e,0x00,0x75,0x5b,0x26,0x83,0x2e,0x0e,0x00,0x02,0x80,0xfc,0x27,0x75,0x50,0x26
1040,0x8b,0x44,0x2c,0xa9,0xff,0xff,0x75,0x47,0x8b,0xfe,0x33,0xc0,0x26,0xf6,0x45,0x3c
1041,0x80,0x74,0x06,0x26,0x8a,0x45,0x3a,0x24,0x1f,0x03,0xf8,0x26,0x80,0x7d,0x45,0x09
1042,0x75,0x2d,0x8c,0xc2,0x8e,0x06,0x38,0x34,0x8e,0xda,0x8b,0x0e,0x0e,0x00,0x26,0x89
1043,0x0e,0x0e,0x00,0x8d,0x74,0x2c,0xbf,0x18,0x00,0xf3,0xa4,0x33,0xc0,0x8e,0xd8,0x26
1044,0xc7,0x06,0x04,0x00,0xb5,0x3f,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0xcd,0x50,0xb8
1045,0x06,0x80,0xe9,0xef,0xe9,0x26,0xa1,0x0c,0x00,0xa3,0x93,0x37,0x83,0x0e,0x99,0x36
1046,0x01,0xe9,0x00,0xeb,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e
1047,0x00,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36
1048,0x26,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3
1049,0x1e,0x00,0xb8,0x0a,0x80,0xe8,0x36,0x07,0xe9,0xe2,0xea,0xff,0x06,0x90,0x34,0xbe
1050,0x0a,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e
1051,0xc2,0x34,0x01,0xe9,0xb6,0xea,0x80,0x3e,0x9d,0x36,0x0a,0x75,0x0f,0x26,0xa1,0x0c
1052,0x00,0x25,0x07,0x00,0x3d,0x04,0x00,0x75,0x03,0xe8,0x79,0x00,0xa1,0xf3,0x36,0x86
1053,0xe0,0xe7,0x1e,0xa3,0xe3,0x36,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37
1054,0x7b,0x7f,0x83,0x0e,0x0d,0x37,0x48,0xe8,0x1e,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07
1055,0x00,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20,0x00,0x75,0x06,0xb8
1056,0x01,0x00,0xe9,0x3f,0xe9,0xe9,0x5f,0xea,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f
1057,0x03,0xcd,0x3a,0xa1,0x1d,0x37,0xa3,0xc4,0x34,0x86,0xe0,0x68,0x7f,0x03,0x1f,0xa3
1058,0x06,0x00,0x33,0xc0,0x8e,0xd8,0xa1,0x0b,0x37,0xa3,0xb2,0x34,0xa1,0x0d,0x37,0xa3
1059,0xb4,0x34,0xa1,0xf3,0x36,0xa3,0xc8,0x34,0xa1,0xef,0x36,0xa3,0x9c,0x34,0xa1,0xf1
1060,0x36,0xa3,0x9e,0x34,0xc3,0x80,0x0e,0x9d,0x36,0x80,0xbe,0x00,0x00,0xe8,0xb4,0x07
1061,0xb8,0x7b,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00
1062,0xa1,0xe5,0x36,0xe7,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xb8,0x82,0x03,0xcd,0x3a,0xf7
1063,0x06,0x9b,0x36,0x00,0x20,0x75,0x03,0xe8,0xfd,0x06,0xa1,0xd3,0x36,0xa3,0xef,0x36
1064,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3,0xf1,0x36,0xa3,0x9e,0x34,0xc3,0xf6,0x06,0x9d
1065,0x36,0x80,0x74,0x31,0xbe,0x22,0x00,0xe9,0x17,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74
1066,0x24,0xbe,0x23,0x00,0xe9,0x0a,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x17,0xbe,0x24
1067,0x00,0x56,0xe8,0xa8,0x05,0x8c,0xc0,0x3d,0xff,0xff,0x5e,0x74,0x05,0xe8,0xd7,0xef
1068,0xcd,0x50,0xe9,0x1f,0xe8,0xe9,0x9f,0xe9,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1069,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x8a,0x03,0xcd,0x39,0xe9,0xf7,0x00,0x80,0x3e,0xa0
1070,0x36,0x08,0x75,0x2e,0xa9,0xd0,0x07,0x75,0x2c,0xa1,0xb1,0x36,0x0d,0x00,0x04,0xe7
1071,0x08,0xe5,0x00,0x25,0xff,0x73,0xe7,0x00,0xb8,0x8a,0x03,0xcd,0x3a,0xe8,0xc3,0x06
1072,0x33,0xc0,0xe7,0x0e,0xe5,0x0a,0x25,0xc3,0x17,0xe7,0x0a,0xcd,0x54,0xc6,0x06,0xa0
1073,0x36,0x00,0xe9,0x68,0xe9,0xbe,0x04,0x00,0xe9,0x3f,0xe9,0x83,0x26,0x9b,0x36,0xbf
1074,0xc6,0x06,0x71,0x37,0x03,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8
1075,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x39,0x81,0x0e,0xc2,0x34,0x00,0x20,0xe9
1076,0x92,0x00,0xe8,0x49,0x06,0xb8,0x87,0x03,0xcd,0x39,0xbb,0xff,0x7f,0xcd,0x53,0xb8
1077,0x84,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x83
1078,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xc3,0xe5,0x00
1079,0x25,0xff,0x53,0xe7,0x00,0x83,0x0e,0xc2,0x34,0x40,0x83,0x26,0xc2,0x34,0xef,0xe8
1080,0x0c,0x06,0xbb,0xff,0x7f,0xcd,0x53,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd
1081,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a
1082,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xc3
1083,0x83,0x0e,0xc2,0x34,0x50,0xe8,0x18,0x04,0xe8,0xd3,0x05,0xf6,0x06,0x6f,0x37,0x01
1084,0x75,0x12,0xb8,0x89,0x03,0xcd,0x39,0x83,0x3e,0x0f,0x37,0x00,0x75,0x06,0xc7,0x06
1085,0x0f,0x37,0x04,0x00,0xa1,0x9d,0x36,0x80,0xfc,0x08,0x74,0x05,0xb8,0x84,0x03,0xcd
1086,0x39,0xe5,0x02,0x0d,0x01,0x08,0x25,0xef,0xff,0xe7,0x02,0xa1,0x9d,0x36,0x86,0xe0
1087,0x32,0xe4,0x8b,0xf0,0xd1,0xee,0x33,0xc0,0x0d,0x20,0x00,0x09,0x06,0xad,0x36,0xa1
1088,0xad,0x36,0xe7,0x04,0xe9,0x53,0xe8,0xe9,0x5a,0xe8,0x33,0xc0,0xa0,0x1b,0x37,0xd1
1089,0xe0,0x3a,0x06,0xa0,0x36,0x75,0x03,0xe9,0xba,0xff,0xe9,0x60,0xe8,0xc7,0x06,0x41
1090,0x37,0x00,0x00,0xe8,0xc1,0xe1,0xe8,0x6a,0x06,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56
1091,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x02,0x25,0xf9,0xff,0x0d,0x03,0x00
1092,0xe7,0x02,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1,0xad,0x36,0xe7
1093,0x04,0xe8,0x7c,0x03,0xe8,0x9f,0x03,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b
1094,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x99,0x36,0xa3,0x9b
1095,0x36,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xa3,0x4c,0x37,0xa3,0xf3,0x36,0xa3,0xef,0x36
1096,0xa3,0xf1,0x36,0xe8,0x82,0xfd,0xc6,0x06,0x9f,0x36,0x02,0xe9,0xef,0xe7,0xe5,0x02
1097,0x0d,0x01,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xe8,0xf2
1098,0x05,0xe5,0x0a,0x0d,0x40,0x00,0xe7,0x0a,0x33,0xc0,0xa3,0x81,0x37,0xa3,0x85,0x37
1099,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xe5,0x00,0x0d,0x00,0x84,0xe7,0x00
1100,0xb8,0x8c,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff
1101,0xe5,0x00,0x25,0xff,0x7b,0xe7,0x00,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x7e,0x03
1102,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0xa7,0xed
1103,0x83,0x26,0xef,0x34,0xdf,0xff,0x06,0x81,0x37,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20
1104,0xc3,0xf7,0x06,0x9a,0x37,0x80,0x00,0x74,0x3d,0xa9,0xd0,0x07,0x74,0x10,0xa9,0x00
1105,0x04,0x74,0x12,0x33,0xc0,0xe7,0x0e,0xff,0x06,0x87,0x37,0xe9,0xd2,0xff,0xff,0x06
1106,0x85,0x37,0xe9,0xcb,0xff,0xff,0x06,0x83,0x37,0xe9,0xc4,0xff,0x83,0x26,0x9a,0x37
1107,0x7f,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f,0x01,0xc3,0xbb,0xff
1108,0x7f,0xcd,0x53,0xe9,0x00,0x00,0xe5,0x02,0x25,0xff,0xfb,0x25,0xef,0xff,0x0d,0x01
1109,0x00,0xe7,0x02,0xa1,0x83,0x37,0x3b,0x06,0x46,0x37,0x7f,0x2a,0xa1,0x85,0x37,0x3b
1110,0x06,0x48,0x37,0x7c,0x21,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f
1111,0x15,0xc6,0x06,0x9f,0x36,0x04,0xe5,0x02,0x25,0xff,0xf7,0x0d,0x01,0x00,0x25,0xef
1112,0xff,0xe7,0x02,0xe9,0xf7,0xe6,0xbe,0x01,0x00,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74
1113,0x0a,0x83,0x26,0x9b,0x36,0xfc,0x83,0x0e,0xc2,0x34,0x04,0xe9,0xd0,0xe6,0xb8,0x7b
1114,0x03,0xcd,0x39,0xe5,0x02,0x0d,0x01,0x60,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06,0xf1
1115,0x34,0x20,0x03,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0x81,0x26,0xc2,0x34,0x7f,0xff,0x80
1116,0x0e,0x6f,0x37,0x01,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0xd2,0xb8,0x7b,0x03,0xcd
1117,0x3a,0xb8,0x7d,0x03,0xcd,0x39,0x83,0x26,0x9b,0x36,0xef,0x33,0xc0,0xb0,0x8a,0xa2
1118,0x9f,0x36,0xa2,0x9d,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc7,0x06,0x0f,0x37,0x04
1119,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xb8
1120,0x8d,0x03,0xcd,0x39,0xe8,0x00,0xd5,0xe5,0x02,0x0d,0x01,0x40,0x25,0xef,0xff,0x8b
1121,0xd8,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00,0x8b,0xc3,0x0d,0x00
1122,0x20,0x25,0xf9,0xff,0x0b,0x06,0xe8,0x3a,0xe7,0x02,0xc3,0xff,0x0e,0xf1,0x34,0x75
1123,0x01,0xc3,0xe5,0x4e,0xa9,0x01,0x00,0x75,0x12,0xe5,0x00,0xa9,0x00,0x04,0x75,0x05
1124,0x0d,0x00,0x04,0xe7,0x00,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0xe5,0x00,0xa9,0x00,0x04
1125,0x74,0xf3,0x25,0xff,0xfb,0xe7,0x00,0xe9,0xeb,0xff,0xc6,0x06,0xa0,0x36,0x04,0x83
1126,0x26,0x9b,0x36,0xfc,0x81,0x0e,0x9b,0x36,0x80,0x00,0xe9,0x10,0xe6,0xb8,0x8e,0x03
1127,0xcd,0x3a,0xcd,0x54,0x81,0x0e,0xaf,0x36,0x00,0x18,0xa1,0xaf,0x36,0xe7,0x06,0xb8
1128,0x7b,0x03,0xcd,0x39,0xa1,0xd3,0x36,0xa3,0x8f,0x37,0xa1,0xd5,0x36,0xa3,0x91,0x37
1129,0xc7,0x06,0x8b,0x37,0x02,0x00,0xc7,0x06,0x8d,0x37,0x02,0x00,0x83,0x0e,0x99,0x36
1130,0x40,0xe9,0xd9,0xe5,0x80,0x3e,0x9f,0x36,0x06,0x75,0x15,0xa9,0xd0,0x07,0x75,0xec
1131,0x25,0x00,0x18,0x75,0x0e,0xff,0x0e,0x8b,0x37,0x75,0xe1,0xc6,0x06,0x9f,0x36,0x08
1132,0xe9,0xba,0xe5,0xff,0x0e,0x8d,0x37,0x75,0xd3,0xbe,0x08,0x00,0xe9,0x9f,0xe5,0xb8
1133,0x7b,0x03,0xcd,0x39,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x08,0xc6,0x06,0x9f,0x36
1134,0x0a,0xe9,0x0d,0x00,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x0b,0xb8,0x8b,0x03,0xcd
1135,0x39,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x83,0xe5,0xb8,0x7b,0x03,0xcd,0x39,0xc7
1136,0x06,0x8b,0x37,0x04,0x00,0xc7,0x06,0x8d,0x37,0x04,0x00,0x81,0x0e,0x99,0x36,0x00
1137,0x02,0xe9,0x69,0xe5,0xf6,0x06,0x9d,0x36,0x80,0x75,0x1b,0xa9,0xd0,0x07,0x75,0xeb
1138,0xa9,0x00,0x18,0x75,0x0c,0xff,0x0e,0x8d,0x37,0x75,0xe0,0xe8,0x17,0xfb,0xe9,0x4c
1139,0xe5,0xb8,0x82,0x03,0xcd,0x39,0xc3,0xff,0x0e,0x8b,0x37,0x75,0xce,0xbe,0x09,0x00
1140,0xe9,0x2b,0xe5,0xc7,0x06,0x3d,0x37,0x00,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8
1141,0x3c,0x02,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b
1142,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02
1143,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x50,0x00
1144,0xe8,0x73,0x00,0xb8,0x81,0x03,0xcd,0x39,0xc3,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74
1145,0x0d,0xc6,0x06,0x9f,0x36,0x02,0xc6,0x06,0xa0,0x36,0x00,0xe9,0xdf,0xe4,0x83,0x0e
1146,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xe7,0x02,0xe5,0x56,0x0d,0x02
1147,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0x8b,0x36,0x3d,0x37,0xe8,0x44,0x02
1148,0xc6,0x06,0xa0,0x36,0x0e,0xe9,0xb5,0xe4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1149,0x06,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a
1150,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8
1151,0x88,0x03,0xcd,0x3a,0x07,0xc3,0x06,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x7b,0x03,0xcd
1152,0x3a,0xb8,0x82,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x3a
1153,0xb8,0x7e,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0xb8,0x81,0x03,0xcd,0x3a,0xb8
1154,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x7d,0x03,0xcd,0x3a,0xb8,0x8d
1155,0x03,0xcd,0x3a,0xc7,0x06,0x41,0x37,0x00,0x00,0x07,0xc3,0x06,0x8e,0x06,0x38,0x34
1156,0x1f,0x8b,0x0e,0x0e,0x00,0x26,0x89,0x0e,0x0e,0x00,0xbe,0x18,0x00,0xbf,0x18,0x00
1157,0xf3,0xa4,0x06,0x1e,0x07,0xcd,0x34,0x07,0x33,0xc0,0x8e,0xd8,0xc3,0x26,0xf6,0x06
1158,0x20,0x00,0x80,0x74,0x44,0x33,0xc0,0x26,0xa0,0x26,0x00,0x24,0x1f,0x8b,0xf0,0x26
1159,0x8b,0x5c,0x28,0x89,0x1e,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04
1160,0x26,0x88,0x5c,0x28,0x8b,0xc6,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3
1161,0xa4,0x8b,0xc8,0x83,0xc7,0x06,0xf3,0xa4,0x26,0x81,0x26,0x26,0x00,0x1f,0x80,0x26
1162,0x81,0x36,0x26,0x00,0x00,0x80,0xe9,0xa9,0xff,0x26,0x8b,0x1e,0x28,0x00,0x89,0x1e
1163,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04,0x26,0x88,0x1e,0x28,0x00
1164,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3,0xa4,0xe9,0x84,0xff,0x86,0xc4
1165,0xa3,0x68,0x37,0xe8,0x87,0xff,0xf7,0x06,0x6a,0x37,0x0f,0x00,0x74,0x10,0x80,0x3e
1166,0x9e,0x36,0x00,0x75,0x09,0xbe,0x00,0x00,0xe8,0xac,0xe9,0xcd,0x50,0xc3,0xc3,0x50
1167,0x56,0x06,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06,0x26,0xa0,0x26,0x00
1168,0x24,0x1f,0x8b,0xf0,0x26,0x8b,0x5c,0x26,0x86,0xfb,0x83,0xeb,0x04,0x74,0x4f,0x83
1169,0xc6,0x2a,0x8c,0xc0,0x8e,0xd8,0xb9,0x07,0x00,0x33,0xc0,0x8e,0xc0,0xbf,0x72,0x37
1170,0xf3,0xab,0x33,0xc9,0x8a,0x0c,0x80,0xf9,0x00,0x75,0x03,0xe9,0x30,0x00,0x3b,0xd9
1171,0x73,0x03,0xe9,0x29,0x00,0x2b,0xd9,0x8a,0x44,0x01,0x25,0x3f,0x00,0x74,0x19,0x3d
1172,0x0b,0x00,0x7d,0x14,0xd1,0xe0,0x8b,0xf8,0x2e,0x8b,0xbd,0x5c,0x49,0x8d,0x74,0x02
1173,0x83,0xe9,0x02,0xf3,0xa4,0xe9,0x02,0x00,0x03,0xf1,0x23,0xdb,0x75,0xc4,0x33,0xc0
1174,0x8e,0xd8,0x07,0x5e,0x58,0xc3,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06
1175,0x26,0xa0,0x26,0x00,0x24,0x1f,0xc3,0xe5,0x0a,0x25,0xc3,0xbf,0xe7,0x0a,0xb8,0x86
1176,0x03,0xcd,0x39,0xb8,0x83,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0x7c,0xdf,0xb8,0x85
1177,0x03,0xcd,0x3a,0xe5,0x02,0x25,0xff,0xf3,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02
1178,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00,0xa1,0xe7,0x36,0x25,0xff,0xfe,0xa3,0xe7,0x36
1179,0xe7,0x3e,0x83,0x26,0x99,0x36,0xcf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36
1180,0xe7,0x06,0xc3,0xe5,0x02,0x0d,0x01,0x0c,0x25,0xef,0xff,0xe7,0x02,0xa1,0xe7,0x36
1181,0x0d,0x00,0x01,0xe7,0x3e,0xa3,0xe7,0x36,0x81,0x0e,0x9b,0x36,0x00,0x20,0x83,0x0e
1182,0x99,0x36,0x20,0x81,0x26,0x9b,0x36,0x7c,0xbf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1
1183,0xaf,0x36,0xe7,0x06,0xb8,0x86,0x03,0xcd,0x39,0xb8,0x85,0x03,0xcd,0x39,0xb8,0x83
1184,0x03,0xcd,0x3a,0xc3,0x0b,0xf6,0x75,0x49,0x06,0x8e,0x06,0x32,0x34,0x80,0x3e,0xe0
1185,0x34,0x01,0x75,0x1b,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26,0xf7,0x06
1186,0x0a,0x00,0x00,0x20,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x20,0x07,0xc3,0x80
1187,0x3e,0xe3,0x34,0x01,0x75,0x19,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26
1188,0xf7,0x06,0x0a,0x00,0x00,0x10,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x10,0x07
1189,0xc3,0xe9,0xb4,0xff,0x50,0x51,0x57,0x33,0xc0,0xb9,0x06,0x00,0x8e,0xc0,0xbf,0xd1
1190,0x36,0xf3,0xae,0x5f,0x74,0x0c,0x26,0xf6,0x06,0x00,0x00,0xc0,0x75,0x04,0xf8,0x59
1191,0x58,0xc3,0xf9,0xe9,0xf9,0xff,0x8b,0x05,0x0b,0x45,0x02,0x0b,0x45,0x04,0xc3,0x52
1192,0x50,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75,0xf6,0xb8,0x01,0x80,0xe7,0x5a
1193,0x58,0x5a,0xc3,0xe8,0xe9,0xff,0x50,0xe5,0x02,0x25,0xff,0x7f,0x0d,0x01,0x00,0x25
1194,0xef,0xff,0xe7,0x02,0x0d,0x00,0x80,0xe7,0x02,0xa1,0xad,0x36,0xe7,0x04,0xa1,0xaf
1195,0x36,0xe7,0x06,0x58,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1196,0x2e,0x2b,0xce,0x41,0x10,0x42,0x7b,0x41,0x30,0x41,0xa2,0x41,0xaf,0x45,0x44,0x29
1197,0xc7,0x2a,0xc7,0x2a,0x60,0x39,0xf4,0x3a,0x5c,0x3c,0x09,0x3d,0xb1,0x3d,0x34,0x3f
1198,0xc7,0x2a,0x3c,0x3f,0xc7,0x2a,0xc4,0x3f,0x16,0x40,0x16,0x40,0xed,0x40,0xfa,0x40
1199,0x07,0x41,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xd6,0x52,0x00,0x00,0x01,0x37
1200,0xe9,0x36,0xf3,0x36,0xef,0x36,0x1d,0x37,0x0d,0x37,0x0b,0x37,0x9c,0x37,0x03,0x37
1201,0xfb,0x36,0x62,0x2d,0x40,0x06,0xd1,0x2d,0xf4,0x01,0xba,0x44,0x40,0x06,0x8c,0x43
1202,0x64,0x00,0xe8,0x2c,0xc8,0x00,0xd8,0x2b,0x05,0x00,0xe9,0x45,0x50,0x00,0x97,0x45
1203,0xfa,0x00,0xae,0x2d,0x04,0x01,0x6a,0x42,0x02,0x00,0xf6,0x2c,0xbc,0x02,0x93,0x2d
1204,0xdc,0x05,0x1d,0x2d,0x64,0x00,0xa1,0x2d,0x14,0x00,0xd7,0x3a,0x08,0x07,0x81,0x2d
1205,0x64,0x00,0xb3,0x3e,0x02,0x00,0x30,0x43,0x64,0x00,0xc5,0x2c,0xf4,0x01,0x8b,0x44
1206,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1207,0x80,0x3e,0xfd,0x34,0x02,0x74,0x0c,0xe8,0x20,0x05,0xc7,0x06,0xa1,0x36,0x00,0x00
1208,0xe9,0x9a,0xf8,0xff,0x06,0xc0,0x33,0xe8,0x10,0x05,0x8b,0x36,0x3d,0x37,0xe8,0x73
1209,0xfe,0xc3,0xcd,0x34,0xe9,0xe8,0x05,0xc7,0x06,0xa3,0x36,0x00,0x00,0xc7,0x06,0x41
1210,0x37,0x00,0x00,0xe8,0xed,0xfe,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36
1211,0x0d,0x00,0x10,0xe7,0x08,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1
1212,0xad,0x36,0xe7,0x04,0xe8,0x2b,0x09,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b
1213,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x9b,0x36,0xa3,0x9d
1214,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x05,0x37
1215,0x00,0x00,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xa3,0xf3
1216,0x36,0xa3,0xef,0x36,0xa3,0xf1,0x36,0xe8,0xfe,0xf5,0xe5,0x02,0x25,0xf9,0xff,0x0d
1217,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02
1218,0xb8,0x8f,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff
1219,0xa1,0xa9,0x36,0xa3,0xa7,0x36,0x0d,0x00,0xa4,0x0d,0x00,0x08,0xe7,0x00,0xa3,0xa9
1220,0x36,0xc7,0x06,0xa3,0x36,0x01,0x00,0xc7,0x06,0xa5,0x36,0x0c,0x00,0x83,0x3e,0xa5
1221,0x36,0x00,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9,0x13,0xff,0xff,0x0e,0xa5
1222,0x36,0xbe,0x11,0x00,0xe8,0x22,0x05,0xb8,0x90,0x03,0xcd,0x39,0xc3,0x83,0x3e,0xa3
1223,0x36,0x01,0x74,0xd9,0xc3,0xb8,0x90,0x03,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b
1224,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x01,0x74,0x03,0xe9,0xf0,0x04,0x3c
1225,0x0f,0x75,0x1e,0x81,0xfb,0x00,0x02,0x75,0x18,0x26,0xa1,0x20,0x00,0xa3,0x05,0x37
1226,0x26,0xa1,0x22,0x00,0xa3,0x07,0x37,0x26,0xa1,0x24,0x00,0xa3,0x09,0x37,0xe9,0x09
1227,0x00,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xb6,0xfe,0xc7,0x06,0xa3,0x36,0x02,0x00
1228,0xc6,0x06,0x9e,0x36,0xff,0xe8,0xcb,0xfd,0xe8,0x1c,0xd9,0x33,0xc0,0xa3,0x85,0x37
1229,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xb8,0x91,0x03,0xcd,0x39,0xb8,0x80
1230,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00
1231,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x92,0x03,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe
1232,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0x8e,0xe5,0x26,0xc7,0x06,0x04,0x00,0x7d,0x4b
1233,0x83,0x26,0xef,0x34,0xdf,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20,0xc3,0xf7,0x06,0x9a
1234,0x37,0x80,0x00,0x74,0x32,0xa9,0xd0,0x07,0x74,0x0c,0xa9,0x00,0x04,0x74,0x0e,0x33
1235,0xc0,0xe7,0x0e,0xe9,0xda,0xff,0xff,0x06,0x85,0x37,0xe9,0xd3,0xff,0xff,0x06,0x83
1236,0x37,0xe9,0xcc,0xff,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0x36,0xfe,0x83,0x26,0x9a
1237,0x37,0x7f,0xbb,0xff,0x7f,0xcd,0x53,0xe5,0x00,0x0d,0x00,0xac,0xe7,0x00,0xe5,0x02
1238,0x25,0xff,0xfb,0x25,0xef,0xff,0x25,0xff,0xf7,0x0d,0x01,0x00,0xe7,0x02,0xa1,0x83
1239,0x37,0x3b,0x06,0x46,0x37,0x7f,0xcd,0xa1,0x85,0x37,0x3b,0x06,0x48,0x37,0x7c,0xc4
1240,0xc7,0x06,0xa3,0x36,0x03,0x00,0xbe,0x13,0x00,0xe8,0xfd,0x03,0xb8,0x93,0x03,0xcd
1241,0x39,0xb8,0x94,0x03,0xcd,0x39,0xb8,0x96,0x03,0xcd,0x39,0xb8,0x95,0x03,0xcd,0x39
1242,0xbe,0x06,0x00,0xe8,0xe3,0x03,0xe9,0xd6,0x03,0x83,0x3e,0xa3,0x36,0x03,0x74,0x01
1243,0xc3,0xbe,0x13,0x00,0xe8,0xd2,0x03,0xb8,0x94,0x03,0xcd,0x39,0xc3,0xb8,0x94,0x03
1244,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3
1245,0x36,0x03,0x74,0x03,0xe9,0xa8,0x03,0x3c,0x0d,0x75,0x3e,0x83,0xfb,0x00,0x75,0x39
1246,0xe5,0x02,0x0d,0x00,0x20,0xe7,0x02,0xb8,0x93,0x03,0xcd,0x3a,0xc7,0x06,0xa3,0x36
1247,0x04,0x00,0xbe,0x00,0x00,0xe8,0x0c,0xfc,0xc6,0x06,0x9d,0x36,0x80,0xc6,0x06,0x9e
1248,0x36,0x00,0xc7,0x06,0x33,0x37,0x02,0x00,0xb8,0x9a,0x03,0xcd,0x39,0xe8,0xfc,0x00
1249,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe9,0x66,0x03,0xc7,0x06,0x3d,0x37,0x08,0x00,0xe9
1250,0x61,0xfd,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9
1251,0x51,0xfd,0xe9,0x4a,0x03,0x83,0x3e,0xa3,0x36,0x04,0x74,0x12,0x83,0x3e,0xa3,0x36
1252,0x05,0x74,0x0b,0xcd,0x34,0xc7,0x06,0x3d,0x37,0x07,0x00,0xe9,0x35,0xfd,0xc7,0x06
1253,0xa3,0x36,0x06,0x00,0xc6,0x06,0x9e,0x36,0xff,0xb8,0x9a,0x03,0xcd,0x3a,0xb8,0x99
1254,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03
1255,0xcd,0x39,0xb8,0x9b,0x03,0xcd,0x39,0xe9,0x18,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36
1256,0x04,0x77,0x18,0x83,0x3e,0xa3,0x36,0x03,0x75,0x08,0xf7,0x06,0x9b,0x36,0x00,0x01
1257,0x75,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xe8,0xfc,0xe9,0xe1,0x02,0xcd,0x34
1258,0x83,0x3e,0xa3,0x36,0x02,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xd3,0xfc
1259,0x83,0x3e,0xa3,0x36,0x04,0x77,0x05,0xb8,0x96,0x03,0xcd,0x39,0xe9,0xc0,0x02,0x83
1260,0x3e,0xa3,0x36,0x03,0x75,0x10,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x50,0x3d,0x04
1261,0x00,0x75,0x03,0xe8,0x36,0x00,0xa1,0xf3,0x36,0x86,0xe0,0xe7,0x1e,0xa3,0xe3,0x36
1262,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37,0x7b,0x7f,0x83,0x0e,0x0d,0x37
1263,0x48,0xe8,0x14,0xf3,0x58,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20
1264,0x00,0x75,0x06,0xb8,0x01,0x00,0xe9,0x7a,0x02,0xe9,0x86,0xfc,0xa1,0xe5,0x36,0xe7
1265,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xa1,0xd3,0x36,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3
1266,0x9e,0x34,0xc3,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e,0x00
1267,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36,0x26
1268,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3,0x1e
1269,0x00,0xb8,0x0a,0x80,0xe9,0x2c,0x02,0xe9,0x38,0xfc,0xff,0x06,0x90,0x34,0xbe,0x0a
1270,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e,0xc2
1271,0x34,0x01,0xcd,0x34,0xe9,0x0c,0xfc,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06
1272,0x3d,0x37,0x05,0x00,0xe9,0xfc,0xfb,0xe5,0x02,0x0d,0x03,0x00,0x0d,0x00,0x88,0x0d
1273,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x05,0x00,0xc6,0x06,0x9e
1274,0x36,0xff,0xbe,0x02,0x00,0xe8,0xe1,0x01,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x9a,0x03
1275,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x39,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03,0xcd
1276,0x39,0xe9,0xbb,0x01,0x83,0x3e,0xa3,0x36,0x03,0x74,0x0a,0x83,0x3e,0xa3,0x36,0x04
1277,0x74,0x03,0xe9,0xaa,0x01,0xbe,0x06,0x00,0xe8,0xae,0x01,0xb8,0x95,0x03,0xcd,0x39
1278,0xe9,0x9c,0x01,0x83,0x3e,0xa3,0x36,0x05,0x74,0x03,0xe9,0x92,0x01,0xbe,0x02,0x00
1279,0xe8,0x96,0x01,0xb8,0x99,0x03,0xcd,0x39,0xe9,0x84,0x01,0xc7,0x06,0x0f,0x37,0x05
1280,0x00,0xe9,0x7b,0x01,0xe5,0x02,0x25,0xff,0xdf,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x07
1281,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xe9,0x65,0x01,0xe8,0xd5,0x04,0xc6,0x06,0x9d
1282,0x36,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc7,0x06
1283,0xa8,0x02,0x00,0x00,0xc7,0x06,0x4c,0x37,0x01,0x00,0xe5,0x02,0x25,0xf9,0xff,0x0d
1284,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02
1285,0xe9,0x67,0xfc,0xb8,0x9a,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09
1286,0xc7,0x06,0x33,0x37,0x02,0x00,0xe9,0x16,0x01,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9
1287,0x0d,0x01,0xff,0x06,0x8e,0x34,0x83,0x0e,0xc2,0x34,0x08,0xc7,0x06,0x3d,0x37,0x03
1288,0x00,0xe9,0xff,0xfa,0xc3,0x52,0x50,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0x58,0x5a
1289,0xc3,0xc7,0x06,0x3d,0x37,0x00,0x00,0xe9,0xe9,0xfa,0xfa,0xe8,0x54,0x04,0xb8,0x80
1290,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xd8,0x2b,0xb8,0x7f,0x03,0x8e,0xc0,0x26
1291,0xc7,0x06,0x04,0x00,0xe8,0x2c,0x33,0xc0,0x8e,0xc0,0xa1,0xa7,0x36,0xa3,0xa9,0x36
1292,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0xab,0x36,0xe7,0x02,0xc7,0x06,0x05,0x37,0x00,0x00
1293,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xc6,0x06,0x9d,0x36
1294,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0xa3,0x36
1295,0x00,0x00,0xc7,0x06,0x0f,0x37,0x00,0x00,0xc7,0x06,0xa8,0x02,0x00,0x00,0xc7,0x06
1296,0x4c,0x37,0x01,0x00,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0xbb
1297,0xff,0x7f,0xcd,0x53,0xe8,0x7c,0xf9,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xfb,0xc3
1298,0x8d,0x3e,0xc0,0x53,0x8d,0x36,0xf0,0x38,0xb9,0x0e,0x00,0x8b,0x1e,0x30,0x34,0x89
1299,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05,0x89,0x44,0x04,0x83
1300,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xb8,0x80,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04
1301,0x00,0xe2,0x51,0xb8,0x7f,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xb2,0x52,0x33
1302,0xc0,0x8e,0xc0,0xc7,0x06,0xa1,0x36,0x01,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc3
1303,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e,0xff,0xa4,0xa0,0x53,0xe8
1304,0x8c,0xdb,0xc3,0xe8,0x48,0xf7,0xe9,0xf6,0xff,0x8e,0x06,0x38,0x34,0xe8,0x07,0xe1
1305,0x26,0xc7,0x06,0x04,0x00,0xdf,0x4f,0xcd,0x50,0xc3,0x26,0xc7,0x06,0x0a,0x00,0x00
1306,0x00,0x26,0xff,0x26,0x04,0x00,0xcd,0x34,0xe9,0xd4,0xff,0xa1,0xd1,0x36,0x26,0x39
1307,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39,0x06,0x1c,0x00,0x75,0x18,0xa1
1308,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00
1309,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36
1310,0xe7,0x06,0x83,0x3e,0xa3,0x36,0x02,0x75,0x05,0xcd,0x34,0xe9,0x56,0xfb,0x83,0x3e
1311,0xa3,0x36,0x00,0x74,0xb1,0x83,0x3e,0xa3,0x36,0x05,0x77,0xaa,0x26,0xf6,0x06,0x0a
1312,0x00,0xff,0x75,0xa2,0xe8,0xfd,0xdd,0x50,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9
1313,0x8c,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x76
1314,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9,0x6e,0x00,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75
1315,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00,0x80,0x74,0x35,0x26,0x80,0x3e,0x29
1316,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9
1317,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x75,0x45,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34
1318,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26
1319,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b,0x26,0x80,0x3e,0x19,0x00,0x00,0x74
1320,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10,0x00,0x74,0x12,0x26,0xa0,0x28,0x00
1321,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0x58,0x23
1322,0xc0,0x74,0x03,0xe9,0xdd,0xfe,0x81,0x26,0x9b,0x36,0xff,0xfe,0x26,0xa1,0x20,0x00
1323,0x3b,0x06,0xd1,0x36,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10
1324,0x26,0xa1,0x24,0x00,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01
1325,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba
1326,0x34,0x26,0xa1,0x24,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1
1327,0xe6,0x80,0xfc,0x09,0x74,0x03,0xe8,0xf6,0xf5,0xa1,0x05,0x37,0x0b,0x06,0x07,0x37
1328,0x0b,0x06,0x09,0x37,0x74,0x3e,0x26,0xa1,0x20,0x00,0x3b,0x06,0x05,0x37,0x75,0x17
1329,0x26,0xa1,0x22,0x00,0x3b,0x06,0x07,0x37,0x75,0x0d,0x26,0xa1,0x24,0x00,0x3b,0x06
1330,0x09,0x37,0x75,0x03,0xe9,0x1d,0x00,0x26,0xa0,0x28,0x00,0x24,0x0f,0x3c,0x03,0x74
1331,0x1b,0x3c,0x00,0x75,0x0f,0x83,0x3e,0xa3,0x36,0x04,0x74,0x10,0xf7,0x06,0x9b,0x36
1332,0x00,0x01,0x74,0x08,0x2e,0xff,0x94,0xf8,0x53,0xe9,0x33,0xfe,0xcd,0x34,0xc7,0x06
1333,0x3d,0x37,0x01,0x00,0xe9,0x2c,0xf8,0x83,0x3e,0xa3,0x36,0x05,0x74,0x10,0x83,0x3e
1334,0xa3,0x36,0x01,0x7e,0x09,0x83,0xee,0x16,0x2e,0xff,0x94,0x24,0x54,0xc3,0xcd,0x34
1335,0xc3,0x26,0xa1,0x0c,0x00,0x3d,0xff,0x7f,0x74,0x05,0x26,0xff,0x26,0x04,0x00,0xe9
1336,0xfd,0xfd,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b,0xa9,0x00,0x10,0x75,0x09,0x8b
1337,0x1e,0x43,0x37,0xff,0xe3,0xe9,0x97,0x00,0xc7,0x06,0x35,0x37,0x05,0x00,0xc7,0x06
1338,0x43,0x37,0x28,0x52,0xf7,0x06,0xf4,0x33,0x00,0x08,0x74,0x06,0xc7,0x06,0x43,0x37
1339,0x1a,0x52,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xc5,0xfd,0xa9,0x00,0x08,0x74,0xd9,0xff
1340,0x0e,0x35,0x37,0x75,0xed,0xe9,0x30,0x00,0xa9,0x00,0x08,0x75,0xcb,0xff,0x0e,0x35
1341,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x0f
1342,0x81,0x0e,0x9b,0x36,0x00,0x80,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x90,0xfd,0xc7
1343,0x06,0x3d,0x37,0x02,0x00,0xe9,0x8b,0xf7,0x80,0x26,0x9e,0x36,0xff,0x75,0x30,0xf6
1344,0x06,0x9d,0x36,0x80,0x74,0x20,0xff,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e
1345,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08
1346,0x00,0x00,0x01,0xe9,0x09,0x00,0xc7,0x06,0x3d,0x37,0x04,0x00,0xe9,0x54,0xf7,0x81
1347,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06,0xe5,0x0a,0xa9,0x00,0x80,0x74
1348,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x49,0xff,0xe9
1349,0x2d,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0xbe,0x29,0x00,0xe8,0x2b,0xfd,0xe9,0x1e
1350,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x04,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00
1351,0xe9,0x10,0xf7,0xe9,0x09,0xfd,0xcd,0x34,0xc3,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8
1352,0x0c,0xf5,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b
1353,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02
1354,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x20,0xf3
1355,0xe8,0x43,0xf3,0x83,0x0e,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xd2
1356,0xf5,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0xbe,0x00
1357,0x00,0xe8,0x30,0xf5,0xc6,0x06,0xa0,0x36,0x0e,0xb8,0x9c,0x03,0xcd,0x39,0xb8,0x80
1358,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xc7,0x06,0xa1,0x36,0x01,0x00,0xe9
1359,0xa5,0xf6,0x06,0xb8,0x8f,0x03,0xcd,0x3a,0xb8,0x90,0x03,0xcd,0x3a,0xb8,0x91,0x03
1360,0xcd,0x3a,0xb8,0x92,0x03,0xcd,0x3a,0xb8,0x93,0x03,0xcd,0x3a,0xb8,0x94,0x03,0xcd
1361,0x3a,0xb8,0x95,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x3a
1362,0xb8,0x98,0x03,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x3a,0xb8,0x9a,0x03,0xcd,0x3a,0xb8
1363,0x9b,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0x07,0xc3
1364,0xf7,0x49,0xf1,0x4e,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xf8,0x51,0xdf,0x4f
1365,0xfa,0x4f,0x0b,0x50,0xd1,0x51,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f
1366,0xe4,0x4e,0x06,0x00,0xcd,0x4a,0x04,0x00,0xe4,0x4e,0x19,0x00,0xad,0x4b,0xfa,0x00
1367,0x82,0x4c,0x08,0x07,0x09,0x4c,0x14,0x00,0x24,0x4e,0x64,0x00,0xd7,0x4d,0xf4,0x01
1368,0x64,0x4e,0xbc,0x02,0x7a,0x4e,0xe8,0x03,0x43,0x4e,0x02,0x00,0xb3,0x4e,0xf4,0x01
1369,0x5b,0x4e,0xf4,0x01,0xe5,0x4e,0x14,0x00,0x06,0x50,0x06,0x50,0x95,0x4c,0xc1,0x52
1370,0xc1,0x52,0xfe,0x4c,0xda,0x4c,0x06,0x50,0x06,0x50,0x06,0x50,0x06,0x50,0xb7,0x51
1371,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0x06,0x50,0xd5,0x4a,0x06,0x50
1372,0x1d,0x4c,0x06,0x50,0x83,0x4d,0x1f,0x4d,0x1f,0x4d,0xed,0x40,0xfa,0x40,0x07,0x41
1373,0x37,0x37,0x2e,0x37,0x37,0x20,0x20,0x79,0x79,0x2f,0x79,0x79,0x2f,0x79,0x79,0x20
1374,0x30,0x31,0x2e,0x39,0x30,0x20,0x20,0x30,0x32,0x2f,0x31,0x37,0x2f,0x39,0x39,0x20
1375,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1376,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1377,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1378,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1379,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1380,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1381,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1382,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1383,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1384,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1385,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1386,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1387,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1388,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1389,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1390,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1391,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1392,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1393,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1394,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1395,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1396,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1397,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1398,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1399,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1400,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1401,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1402,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1403,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1404,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1405,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1406,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1407,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1408,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1409,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1410,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1411,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1412,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1413,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1414,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1415,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1416,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1417,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1418,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1419,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1420,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1421,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1422,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1423,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1424,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1425,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1426,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1427,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1428,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1429,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1430,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1431,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1432,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1433,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1434,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1435,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1436,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1437,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1438,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1439,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1440,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1441,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1442,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1443,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1444,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1445,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1446,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1447,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1448,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1449,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1450,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1451,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1452,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1453,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1454,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1455,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1456,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1457,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1458,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1459,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1460,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1461,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1462,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1463,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1464,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1465,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1466,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1467,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1468,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1469,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1470,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1471,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1472,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1473,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1474,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1475,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1476,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1477,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1478,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1479,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1480,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1481,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1482,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1483,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1484,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1485,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1486,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1487,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1488,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1489,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1490,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1491,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1492,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1493,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1494,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1495,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1496,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1497,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1498,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1499,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1500,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1501,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1502,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1503,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1504,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1505,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1506,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1507,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1508,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1509,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1510,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1511,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1512,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1513,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1514,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1515,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1516,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1517,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1518,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1519,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1520,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1521,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1522,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1523,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1524,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1525,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1526,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1527,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1528,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1529,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1530,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1531,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1532,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1533,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1534,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1535,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1536,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1537,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1538,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1539,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1540,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1541,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1542,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1543,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1544,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1545,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1546,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1547,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1548,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1549,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1550,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1551,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1552,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1553,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1554,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1555,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1556,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1557,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1558,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1559,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1560,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1561,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1562,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1563,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1564,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1565,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1566,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1567,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1568,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1569,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1570,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1571,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1572,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1573,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1574,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1575,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1576,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1577,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1578,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1579,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
1580,0x90,0xea,0xc0,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x06
1581} ;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 933fcfbf35e1..d3f39e86eb95 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/fsl_devices.h>
28#include <linux/mii.h> 27#include <linux/mii.h>
29#include <linux/phy.h> 28#include <linux/phy.h>
30#include <linux/workqueue.h> 29#include <linux/workqueue.h>
@@ -223,10 +222,10 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
223 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 222 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
224 1))); 223 1)));
225 224
226 skb->dev = ugeth->dev; 225 skb->dev = ugeth->ndev;
227 226
228 out_be32(&((struct qe_bd __iomem *)bd)->buf, 227 out_be32(&((struct qe_bd __iomem *)bd)->buf,
229 dma_map_single(&ugeth->dev->dev, 228 dma_map_single(ugeth->dev,
230 skb->data, 229 skb->data,
231 ugeth->ug_info->uf_info.max_rx_buf_length + 230 ugeth->ug_info->uf_info.max_rx_buf_length +
232 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 231 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -1872,7 +1871,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1872 continue; 1871 continue;
1873 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 1872 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1874 if (ugeth->tx_skbuff[i][j]) { 1873 if (ugeth->tx_skbuff[i][j]) {
1875 dma_unmap_single(&ugeth->dev->dev, 1874 dma_unmap_single(ugeth->dev,
1876 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1875 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1877 (in_be32((u32 __iomem *)bd) & 1876 (in_be32((u32 __iomem *)bd) &
1878 BD_LENGTH_MASK), 1877 BD_LENGTH_MASK),
@@ -1900,7 +1899,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1900 bd = ugeth->p_rx_bd_ring[i]; 1899 bd = ugeth->p_rx_bd_ring[i];
1901 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 1900 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1902 if (ugeth->rx_skbuff[i][j]) { 1901 if (ugeth->rx_skbuff[i][j]) {
1903 dma_unmap_single(&ugeth->dev->dev, 1902 dma_unmap_single(ugeth->dev,
1904 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1903 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1905 ugeth->ug_info-> 1904 ugeth->ug_info->
1906 uf_info.max_rx_buf_length + 1905 uf_info.max_rx_buf_length +
@@ -3071,7 +3070,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3071 3070
3072 /* set up the buffer descriptor */ 3071 /* set up the buffer descriptor */
3073 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3072 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3074 dma_map_single(&ugeth->dev->dev, skb->data, 3073 dma_map_single(ugeth->dev, skb->data,
3075 skb->len, DMA_TO_DEVICE)); 3074 skb->len, DMA_TO_DEVICE));
3076 3075
3077 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3076 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
@@ -3127,7 +3126,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3127 3126
3128 ugeth_vdbg("%s: IN", __func__); 3127 ugeth_vdbg("%s: IN", __func__);
3129 3128
3130 dev = ugeth->dev; 3129 dev = ugeth->ndev;
3131 3130
3132 /* collect received buffers */ 3131 /* collect received buffers */
3133 bd = ugeth->rxBd[rxQ]; 3132 bd = ugeth->rxBd[rxQ];
@@ -3161,7 +3160,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3161 skb_put(skb, length); 3160 skb_put(skb, length);
3162 3161
3163 /* Tell the skb what kind of packet this is */ 3162 /* Tell the skb what kind of packet this is */
3164 skb->protocol = eth_type_trans(skb, ugeth->dev); 3163 skb->protocol = eth_type_trans(skb, ugeth->ndev);
3165 3164
3166 dev->stats.rx_bytes += length; 3165 dev->stats.rx_bytes += length;
3167 /* Send the packet up the stack */ 3166 /* Send the packet up the stack */
@@ -3432,7 +3431,7 @@ static int ucc_geth_close(struct net_device *dev)
3432 3431
3433 ucc_geth_stop(ugeth); 3432 ucc_geth_stop(ugeth);
3434 3433
3435 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); 3434 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3436 3435
3437 netif_stop_queue(dev); 3436 netif_stop_queue(dev);
3438 3437
@@ -3446,7 +3445,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
3446 struct net_device *dev; 3445 struct net_device *dev;
3447 3446
3448 ugeth = container_of(work, struct ucc_geth_private, timeout_work); 3447 ugeth = container_of(work, struct ucc_geth_private, timeout_work);
3449 dev = ugeth->dev; 3448 dev = ugeth->ndev;
3450 3449
3451 ugeth_vdbg("%s: IN", __func__); 3450 ugeth_vdbg("%s: IN", __func__);
3452 3451
@@ -3756,7 +3755,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3756 memcpy(dev->dev_addr, mac_addr, 6); 3755 memcpy(dev->dev_addr, mac_addr, 6);
3757 3756
3758 ugeth->ug_info = ug_info; 3757 ugeth->ug_info = ug_info;
3759 ugeth->dev = dev; 3758 ugeth->dev = device;
3759 ugeth->ndev = dev;
3760 ugeth->node = np; 3760 ugeth->node = np;
3761 3761
3762 return 0; 3762 return 0;
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index e3a25e64a652..2f8ee7c87efe 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -20,7 +20,6 @@
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/fsl_devices.h>
24 23
25#include <asm/immap_qe.h> 24#include <asm/immap_qe.h>
26#include <asm/qe.h> 25#include <asm/qe.h>
@@ -1129,7 +1128,8 @@ struct ucc_geth_info {
1129struct ucc_geth_private { 1128struct ucc_geth_private {
1130 struct ucc_geth_info *ug_info; 1129 struct ucc_geth_info *ug_info;
1131 struct ucc_fast_private *uccf; 1130 struct ucc_fast_private *uccf;
1132 struct net_device *dev; 1131 struct device *dev;
1132 struct net_device *ndev;
1133 struct napi_struct napi; 1133 struct napi_struct napi;
1134 struct work_struct timeout_work; 1134 struct work_struct timeout_work;
1135 struct ucc_geth __iomem *ug_regs; 1135 struct ucc_geth __iomem *ug_regs;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index a755bea559b9..6fcb500257bc 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -28,7 +28,6 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/fsl_devices.h>
32#include <linux/ethtool.h> 31#include <linux/ethtool.h>
33#include <linux/mii.h> 32#include <linux/mii.h>
34#include <linux/phy.h> 33#include <linux/phy.h>
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cde423c6d040..f84b78d94c40 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2008 Option International 5 * Copyright (C) 2008 Option International
6 * Filip Aben <f.aben@option.com> 6 * Filip Aben <f.aben@option.com>
7 * Denis Joseph Barrow <d.barow@option.com> 7 * Denis Joseph Barrow <d.barow@option.com>
8 * Jan Dumon <j.dumon@option.com>
8 * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) 9 * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd)
9 * <ajb@spheresystems.co.uk> 10 * <ajb@spheresystems.co.uk>
10 * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> 11 * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
@@ -462,9 +463,16 @@ static const struct usb_device_id hso_ids[] = {
462 {USB_DEVICE(0x0af0, 0x7701)}, 463 {USB_DEVICE(0x0af0, 0x7701)},
463 {USB_DEVICE(0x0af0, 0x7801)}, 464 {USB_DEVICE(0x0af0, 0x7801)},
464 {USB_DEVICE(0x0af0, 0x7901)}, 465 {USB_DEVICE(0x0af0, 0x7901)},
465 {USB_DEVICE(0x0af0, 0x7361)}, 466 {USB_DEVICE(0x0af0, 0x8200)},
466 {USB_DEVICE(0x0af0, 0xd057)}, 467 {USB_DEVICE(0x0af0, 0x8201)},
468 {USB_DEVICE(0x0af0, 0xd035)},
467 {USB_DEVICE(0x0af0, 0xd055)}, 469 {USB_DEVICE(0x0af0, 0xd055)},
470 {USB_DEVICE(0x0af0, 0xd155)},
471 {USB_DEVICE(0x0af0, 0xd255)},
472 {USB_DEVICE(0x0af0, 0xd057)},
473 {USB_DEVICE(0x0af0, 0xd157)},
474 {USB_DEVICE(0x0af0, 0xd257)},
475 {USB_DEVICE(0x0af0, 0xd357)},
468 {} 476 {}
469}; 477};
470MODULE_DEVICE_TABLE(usb, hso_ids); 478MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -2410,20 +2418,22 @@ static void hso_free_net_device(struct hso_device *hso_dev)
2410 if (!hso_net) 2418 if (!hso_net)
2411 return; 2419 return;
2412 2420
2421 remove_net_device(hso_net->parent);
2422
2423 if (hso_net->net) {
2424 unregister_netdev(hso_net->net);
2425 free_netdev(hso_net->net);
2426 }
2427
2413 /* start freeing */ 2428 /* start freeing */
2414 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { 2429 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
2415 usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); 2430 usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
2416 kfree(hso_net->mux_bulk_rx_buf_pool[i]); 2431 kfree(hso_net->mux_bulk_rx_buf_pool[i]);
2432 hso_net->mux_bulk_rx_buf_pool[i] = NULL;
2417 } 2433 }
2418 usb_free_urb(hso_net->mux_bulk_tx_urb); 2434 usb_free_urb(hso_net->mux_bulk_tx_urb);
2419 kfree(hso_net->mux_bulk_tx_buf); 2435 kfree(hso_net->mux_bulk_tx_buf);
2420 2436 hso_net->mux_bulk_tx_buf = NULL;
2421 remove_net_device(hso_net->parent);
2422
2423 if (hso_net->net) {
2424 unregister_netdev(hso_net->net);
2425 free_netdev(hso_net->net);
2426 }
2427 2437
2428 kfree(hso_dev); 2438 kfree(hso_dev);
2429} 2439}
@@ -2526,14 +2536,15 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2526} 2536}
2527 2537
2528/* Creates our network device */ 2538/* Creates our network device */
2529static struct hso_device *hso_create_net_device(struct usb_interface *interface) 2539static struct hso_device *hso_create_net_device(struct usb_interface *interface,
2540 int port_spec)
2530{ 2541{
2531 int result, i; 2542 int result, i;
2532 struct net_device *net; 2543 struct net_device *net;
2533 struct hso_net *hso_net; 2544 struct hso_net *hso_net;
2534 struct hso_device *hso_dev; 2545 struct hso_device *hso_dev;
2535 2546
2536 hso_dev = hso_create_device(interface, HSO_INTF_MUX | HSO_PORT_NETWORK); 2547 hso_dev = hso_create_device(interface, port_spec);
2537 if (!hso_dev) 2548 if (!hso_dev)
2538 return NULL; 2549 return NULL;
2539 2550
@@ -2613,12 +2624,12 @@ static void hso_free_tiomget(struct hso_serial *serial)
2613{ 2624{
2614 struct hso_tiocmget *tiocmget = serial->tiocmget; 2625 struct hso_tiocmget *tiocmget = serial->tiocmget;
2615 if (tiocmget) { 2626 if (tiocmget) {
2616 kfree(tiocmget);
2617 if (tiocmget->urb) { 2627 if (tiocmget->urb) {
2618 usb_free_urb(tiocmget->urb); 2628 usb_free_urb(tiocmget->urb);
2619 tiocmget->urb = NULL; 2629 tiocmget->urb = NULL;
2620 } 2630 }
2621 serial->tiocmget = NULL; 2631 serial->tiocmget = NULL;
2632 kfree(tiocmget);
2622 2633
2623 } 2634 }
2624} 2635}
@@ -2933,7 +2944,8 @@ static int hso_probe(struct usb_interface *interface,
2933 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { 2944 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
2934 /* Create the network device */ 2945 /* Create the network device */
2935 if (!disable_net) { 2946 if (!disable_net) {
2936 hso_dev = hso_create_net_device(interface); 2947 hso_dev = hso_create_net_device(interface,
2948 port_spec);
2937 if (!hso_dev) 2949 if (!hso_dev)
2938 goto exit; 2950 goto exit;
2939 tmp_dev = hso_dev; 2951 tmp_dev = hso_dev;
@@ -2965,7 +2977,7 @@ static int hso_probe(struct usb_interface *interface,
2965 /* It's a regular bulk interface */ 2977 /* It's a regular bulk interface */
2966 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) 2978 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK)
2967 && !disable_net) 2979 && !disable_net)
2968 hso_dev = hso_create_net_device(interface); 2980 hso_dev = hso_create_net_device(interface, port_spec);
2969 else 2981 else
2970 hso_dev = 2982 hso_dev =
2971 hso_create_bulk_serial_device(interface, port_spec); 2983 hso_create_bulk_serial_device(interface, port_spec);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 7cb10a0a5316..3d0d0b0b37c5 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -36,7 +36,6 @@
36 * Run test procedures 36 * Run test procedures
37 * Fix bugs from previous two steps 37 * Fix bugs from previous two steps
38 * Snoop other OSs for any tricks we're not doing 38 * Snoop other OSs for any tricks we're not doing
39 * SMP locking
40 * Reduce arbitrary timeouts 39 * Reduce arbitrary timeouts
41 * Smart multicast support 40 * Smart multicast support
42 * Temporary MAC change support 41 * Temporary MAC change support
@@ -796,7 +795,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
796 795
797 int res; 796 int res;
798 797
799 spin_lock(&kaweth->device_lock); 798 spin_lock_irq(&kaweth->device_lock);
800 799
801 kaweth_async_set_rx_mode(kaweth); 800 kaweth_async_set_rx_mode(kaweth);
802 netif_stop_queue(net); 801 netif_stop_queue(net);
@@ -814,7 +813,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
814 if (!copied_skb) { 813 if (!copied_skb) {
815 kaweth->stats.tx_errors++; 814 kaweth->stats.tx_errors++;
816 netif_start_queue(net); 815 netif_start_queue(net);
817 spin_unlock(&kaweth->device_lock); 816 spin_unlock_irq(&kaweth->device_lock);
818 return 0; 817 return 0;
819 } 818 }
820 } 819 }
@@ -848,7 +847,7 @@ skip:
848 net->trans_start = jiffies; 847 net->trans_start = jiffies;
849 } 848 }
850 849
851 spin_unlock(&kaweth->device_lock); 850 spin_unlock_irq(&kaweth->device_lock);
852 851
853 return 0; 852 return 0;
854} 853}
diff --git a/drivers/net/vxge/Makefile b/drivers/net/vxge/Makefile
new file mode 100644
index 000000000000..8992ca26b277
--- /dev/null
+++ b/drivers/net/vxge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Neterion Inc's X3100 Series 10 GbE PCIe # I/O
3# Virtualized Server Adapter linux driver
4
5obj-$(CONFIG_VXGE) += vxge.o
6
7vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
new file mode 100644
index 000000000000..6b41c884a337
--- /dev/null
+++ b/drivers/net/vxge/vxge-config.c
@@ -0,0 +1,5264 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
18
19#include "vxge-traffic.h"
20#include "vxge-config.h"
21
22/*
23 * __vxge_hw_channel_allocate - Allocate memory for channel
24 * This function allocates required memory for the channel and various arrays
25 * in the channel
26 */
27struct __vxge_hw_channel*
28__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
29 enum __vxge_hw_channel_type type,
30 u32 length, u32 per_dtr_space, void *userdata)
31{
32 struct __vxge_hw_channel *channel;
33 struct __vxge_hw_device *hldev;
34 int size = 0;
35 u32 vp_id;
36
37 hldev = vph->vpath->hldev;
38 vp_id = vph->vpath->vp_id;
39
40 switch (type) {
41 case VXGE_HW_CHANNEL_TYPE_FIFO:
42 size = sizeof(struct __vxge_hw_fifo);
43 break;
44 case VXGE_HW_CHANNEL_TYPE_RING:
45 size = sizeof(struct __vxge_hw_ring);
46 break;
47 default:
48 break;
49 }
50
51 channel = kzalloc(size, GFP_KERNEL);
52 if (channel == NULL)
53 goto exit0;
54 INIT_LIST_HEAD(&channel->item);
55
56 channel->common_reg = hldev->common_reg;
57 channel->first_vp_id = hldev->first_vp_id;
58 channel->type = type;
59 channel->devh = hldev;
60 channel->vph = vph;
61 channel->userdata = userdata;
62 channel->per_dtr_space = per_dtr_space;
63 channel->length = length;
64 channel->vp_id = vp_id;
65
66 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
67 if (channel->work_arr == NULL)
68 goto exit1;
69
70 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
71 if (channel->free_arr == NULL)
72 goto exit1;
73 channel->free_ptr = length;
74
75 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
76 if (channel->reserve_arr == NULL)
77 goto exit1;
78 channel->reserve_ptr = length;
79 channel->reserve_top = 0;
80
81 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
82 if (channel->orig_arr == NULL)
83 goto exit1;
84
85 return channel;
86exit1:
87 __vxge_hw_channel_free(channel);
88
89exit0:
90 return NULL;
91}
92
93/*
94 * __vxge_hw_channel_free - Free memory allocated for channel
95 * This function deallocates memory from the channel and various arrays
96 * in the channel
97 */
98void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
99{
100 kfree(channel->work_arr);
101 kfree(channel->free_arr);
102 kfree(channel->reserve_arr);
103 kfree(channel->orig_arr);
104 kfree(channel);
105}
106
107/*
108 * __vxge_hw_channel_initialize - Initialize a channel
109 * This function initializes a channel by properly setting the
110 * various references
111 */
112enum vxge_hw_status
113__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
114{
115 u32 i;
116 struct __vxge_hw_virtualpath *vpath;
117
118 vpath = channel->vph->vpath;
119
120 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
121 for (i = 0; i < channel->length; i++)
122 channel->orig_arr[i] = channel->reserve_arr[i];
123 }
124
125 switch (channel->type) {
126 case VXGE_HW_CHANNEL_TYPE_FIFO:
127 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
128 channel->stats = &((struct __vxge_hw_fifo *)
129 channel)->stats->common_stats;
130 break;
131 case VXGE_HW_CHANNEL_TYPE_RING:
132 vpath->ringh = (struct __vxge_hw_ring *)channel;
133 channel->stats = &((struct __vxge_hw_ring *)
134 channel)->stats->common_stats;
135 break;
136 default:
137 break;
138 }
139
140 return VXGE_HW_OK;
141}
142
143/*
144 * __vxge_hw_channel_reset - Resets a channel
145 * This function resets a channel by properly setting the various references
146 */
147enum vxge_hw_status
148__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
149{
150 u32 i;
151
152 for (i = 0; i < channel->length; i++) {
153 if (channel->reserve_arr != NULL)
154 channel->reserve_arr[i] = channel->orig_arr[i];
155 if (channel->free_arr != NULL)
156 channel->free_arr[i] = NULL;
157 if (channel->work_arr != NULL)
158 channel->work_arr[i] = NULL;
159 }
160 channel->free_ptr = channel->length;
161 channel->reserve_ptr = channel->length;
162 channel->reserve_top = 0;
163 channel->post_index = 0;
164 channel->compl_index = 0;
165
166 return VXGE_HW_OK;
167}
168
169/*
170 * __vxge_hw_device_pci_e_init
171 * Initialize certain PCI/PCI-X configuration registers
172 * with recommended values. Save config space for future hw resets.
173 */
174void
175__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
176{
177 u16 cmd = 0;
178
179 /* Set the PErr Repconse bit and SERR in PCI command register. */
180 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
181 cmd |= 0x140;
182 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
183
184 pci_save_state(hldev->pdev);
185
186 return;
187}
188
189/*
190 * __vxge_hw_device_register_poll
191 * Will poll certain register for specified amount of time.
192 * Will poll until masked bit is not cleared.
193 */
194enum vxge_hw_status
195__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
196{
197 u64 val64;
198 u32 i = 0;
199 enum vxge_hw_status ret = VXGE_HW_FAIL;
200
201 udelay(10);
202
203 do {
204 val64 = readq(reg);
205 if (!(val64 & mask))
206 return VXGE_HW_OK;
207 udelay(100);
208 } while (++i <= 9);
209
210 i = 0;
211 do {
212 val64 = readq(reg);
213 if (!(val64 & mask))
214 return VXGE_HW_OK;
215 mdelay(1);
216 } while (++i <= max_millis);
217
218 return ret;
219}
220
221 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
222 * in progress
223 * This routine checks the vpath reset in progress register is turned zero
224 */
225enum vxge_hw_status
226__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
227{
228 enum vxge_hw_status status;
229 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
230 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
232 return status;
233}
234
235/*
236 * __vxge_hw_device_toc_get
237 * This routine sets the swapper and reads the toc pointer and returns the
238 * memory mapped address of the toc
239 */
240struct vxge_hw_toc_reg __iomem *
241__vxge_hw_device_toc_get(void __iomem *bar0)
242{
243 u64 val64;
244 struct vxge_hw_toc_reg __iomem *toc = NULL;
245 enum vxge_hw_status status;
246
247 struct vxge_hw_legacy_reg __iomem *legacy_reg =
248 (struct vxge_hw_legacy_reg __iomem *)bar0;
249
250 status = __vxge_hw_legacy_swapper_set(legacy_reg);
251 if (status != VXGE_HW_OK)
252 goto exit;
253
254 val64 = readq(&legacy_reg->toc_first_pointer);
255 toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
256exit:
257 return toc;
258}
259
260/*
261 * __vxge_hw_device_reg_addr_get
262 * This routine sets the swapper and reads the toc pointer and initializes the
263 * register location pointers in the device object. It waits until the ric is
264 * completed initializing registers.
265 */
266enum vxge_hw_status
267__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
268{
269 u64 val64;
270 u32 i;
271 enum vxge_hw_status status = VXGE_HW_OK;
272
273 hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
274
275 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
276 if (hldev->toc_reg == NULL) {
277 status = VXGE_HW_FAIL;
278 goto exit;
279 }
280
281 val64 = readq(&hldev->toc_reg->toc_common_pointer);
282 hldev->common_reg =
283 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
284
285 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
286 hldev->mrpcim_reg =
287 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
288
289 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
290 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
291 hldev->srpcim_reg[i] =
292 (struct vxge_hw_srpcim_reg __iomem *)
293 (hldev->bar0 + val64);
294 }
295
296 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
297 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
298 hldev->vpmgmt_reg[i] =
299 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
300 }
301
302 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
303 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
304 hldev->vpath_reg[i] =
305 (struct vxge_hw_vpath_reg __iomem *)
306 (hldev->bar0 + val64);
307 }
308
309 val64 = readq(&hldev->toc_reg->toc_kdfc);
310
311 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
312 case 0:
313 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
314 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
315 break;
316 case 2:
317 hldev->kdfc = (u8 __iomem *)(hldev->bar1 +
318 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
319 break;
320 case 4:
321 hldev->kdfc = (u8 __iomem *)(hldev->bar2 +
322 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
323 break;
324 default:
325 break;
326 }
327
328 status = __vxge_hw_device_vpath_reset_in_prog_check(
329 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
330exit:
331 return status;
332}
333
334/*
335 * __vxge_hw_device_id_get
336 * This routine returns sets the device id and revision numbers into the device
337 * structure
338 */
339void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
340{
341 u64 val64;
342
343 val64 = readq(&hldev->common_reg->titan_asic_id);
344 hldev->device_id =
345 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
346
347 hldev->major_revision =
348 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
349
350 hldev->minor_revision =
351 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
352
353 return;
354}
355
356/*
357 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
358 * This routine returns the Access Rights of the driver
359 */
360static u32
361__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
362{
363 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
364
365 switch (host_type) {
366 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
367 if (func_id == 0) {
368 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
369 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
370 }
371 break;
372 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
373 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
374 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
375 break;
376 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
377 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
378 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
379 break;
380 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
381 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
382 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
383 break;
384 case VXGE_HW_SR_VH_FUNCTION0:
385 case VXGE_HW_VH_NORMAL_FUNCTION:
386 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
387 break;
388 }
389
390 return access_rights;
391}
392/*
393 * __vxge_hw_device_host_info_get
394 * This routine returns the host type assignments
395 */
396void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
397{
398 u64 val64;
399 u32 i;
400
401 val64 = readq(&hldev->common_reg->host_type_assignments);
402
403 hldev->host_type =
404 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
405
406 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
407
408 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
409
410 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
411 continue;
412
413 hldev->func_id =
414 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
415
416 hldev->access_rights = __vxge_hw_device_access_rights_get(
417 hldev->host_type, hldev->func_id);
418
419 hldev->first_vp_id = i;
420 break;
421 }
422
423 return;
424}
425
426/*
427 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
428 * link width and signalling rate.
429 */
430static enum vxge_hw_status
431__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
432{
433 int exp_cap;
434 u16 lnk;
435
436 /* Get the negotiated link width and speed from PCI config space */
437 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
438 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
439
440 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
441 return VXGE_HW_ERR_INVALID_PCI_INFO;
442
443 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
444 case PCIE_LNK_WIDTH_RESRV:
445 case PCIE_LNK_X1:
446 case PCIE_LNK_X2:
447 case PCIE_LNK_X4:
448 case PCIE_LNK_X8:
449 break;
450 default:
451 return VXGE_HW_ERR_INVALID_PCI_INFO;
452 }
453
454 return VXGE_HW_OK;
455}
456
457static enum vxge_hw_status
458__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
459{
460 if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
461 hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
462 hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
463 (hldev->func_id == 0))
464 return VXGE_HW_OK;
465 else
466 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
467}
468
469/*
470 * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
471 * Rebalance the RX_WRR and KDFC_WRR calandars.
472 */
473static enum
474vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
475{
476 u64 val64;
477 u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
478 u32 i, j, how_often = 1;
479 enum vxge_hw_status status = VXGE_HW_OK;
480
481 status = __vxge_hw_device_is_privilaged(hldev);
482 if (status != VXGE_HW_OK)
483 goto exit;
484
485 /* Reset the priorities assigned to the WRR arbitration
486 phases for the receive traffic */
487 for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
488 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
489
490 /* Reset the transmit FIFO servicing calendar for FIFOs */
491 for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
492 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
493 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
494 }
495
496 /* Assign WRR priority 0 for all FIFOs */
497 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
498 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
499 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
500
501 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
502 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
503 }
504
505 /* Reset to service non-offload doorbells */
506 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
507 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
508
509 /* Set priority 0 to all receive queues */
510 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
511 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
512 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
513
514 /* Initialize all the slots as unused */
515 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
516 wrr_states[i] = -1;
517
518 /* Prepare the Fifo service states */
519 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
520
521 if (!hldev->config.vp_config[i].min_bandwidth)
522 continue;
523
524 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
525 hldev->config.vp_config[i].min_bandwidth;
526 if (how_often) {
527
528 for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
529 if (wrr_states[j] == -1) {
530 wrr_states[j] = i;
531 /* Make sure each fifo is serviced
532 * atleast once */
533 if (i == j)
534 j += VXGE_HW_MAX_VIRTUAL_PATHS;
535 else
536 j += how_often;
537 } else
538 j++;
539 }
540 }
541 }
542
543 /* Fill the unused slots with 0 */
544 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
545 if (wrr_states[j] == -1)
546 wrr_states[j] = 0;
547 }
548
549 /* Assign WRR priority number for FIFOs */
550 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
551 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
552 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
553
554 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
555 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
556 }
557
558 /* Modify the servicing algorithm applied to the 3 types of doorbells.
559 i.e, none-offload, message and offload */
560 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
561 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
562 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
563 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
564 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
565 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
566 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
567 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
568 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
569
570 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
571 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
572
573 for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
574
575 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
576 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
577 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
578 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
579 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
580 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
581 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
582 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
583
584 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
585 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
586 }
587
588 /* Set up the priorities assigned to receive queues */
589 writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
590 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
591 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
592 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
593 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
594 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
595 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
596 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
597 &hldev->mrpcim_reg->rx_queue_priority_0);
598
599 writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
600 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
601 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
602 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
603 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
604 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
605 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
606 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
607 &hldev->mrpcim_reg->rx_queue_priority_1);
608
609 writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
610 &hldev->mrpcim_reg->rx_queue_priority_2);
611
612 /* Initialize all the slots as unused */
613 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
614 wrr_states[i] = -1;
615
616 /* Prepare the Ring service states */
617 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
618
619 if (!hldev->config.vp_config[i].min_bandwidth)
620 continue;
621
622 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
623 hldev->config.vp_config[i].min_bandwidth;
624
625 if (how_often) {
626 for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
627 if (wrr_states[j] == -1) {
628 wrr_states[j] = i;
629 /* Make sure each ring is
630 * serviced atleast once */
631 if (i == j)
632 j += VXGE_HW_MAX_VIRTUAL_PATHS;
633 else
634 j += how_often;
635 } else
636 j++;
637 }
638 }
639 }
640
641 /* Fill the unused slots with 0 */
642 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
643 if (wrr_states[j] == -1)
644 wrr_states[j] = 0;
645 }
646
647 for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
648 val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
649 wrr_states[j++]);
650 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
651 wrr_states[j++]);
652 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
653 wrr_states[j++]);
654 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
655 wrr_states[j++]);
656 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
657 wrr_states[j++]);
658 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
659 wrr_states[j++]);
660 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
661 wrr_states[j++]);
662 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
663 wrr_states[j++]);
664
665 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
666 }
667exit:
668 return status;
669}
670
671/*
672 * __vxge_hw_device_initialize
673 * Initialize Titan-V hardware.
674 */
675enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
676{
677 enum vxge_hw_status status = VXGE_HW_OK;
678
679 /* Validate the pci-e link width and speed */
680 status = __vxge_hw_verify_pci_e_info(hldev);
681 if (status != VXGE_HW_OK)
682 goto exit;
683
684 vxge_hw_wrr_rebalance(hldev);
685exit:
686 return status;
687}
688
689/**
690 * vxge_hw_device_hw_info_get - Get the hw information
691 * Returns the vpath mask that has the bits set for each vpath allocated
692 * for the driver, FW version information and the first mac addresse for
693 * each vpath
694 */
695enum vxge_hw_status __devinit
696vxge_hw_device_hw_info_get(void __iomem *bar0,
697 struct vxge_hw_device_hw_info *hw_info)
698{
699 u32 i;
700 u64 val64;
701 struct vxge_hw_toc_reg __iomem *toc;
702 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
703 struct vxge_hw_common_reg __iomem *common_reg;
704 struct vxge_hw_vpath_reg __iomem *vpath_reg;
705 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
706 enum vxge_hw_status status;
707
708 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
709
710 toc = __vxge_hw_device_toc_get(bar0);
711 if (toc == NULL) {
712 status = VXGE_HW_ERR_CRITICAL;
713 goto exit;
714 }
715
716 val64 = readq(&toc->toc_common_pointer);
717 common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
718
719 status = __vxge_hw_device_vpath_reset_in_prog_check(
720 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
721 if (status != VXGE_HW_OK)
722 goto exit;
723
724 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
725
726 val64 = readq(&common_reg->host_type_assignments);
727
728 hw_info->host_type =
729 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
730
731 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
732
733 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
734 continue;
735
736 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
737
738 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
739 (bar0 + val64);
740
741 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
742 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
743 hw_info->func_id) &
744 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
745
746 val64 = readq(&toc->toc_mrpcim_pointer);
747
748 mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
749 (bar0 + val64);
750
751 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
752 wmb();
753 }
754
755 val64 = readq(&toc->toc_vpath_pointer[i]);
756
757 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
758
759 hw_info->function_mode =
760 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
761
762 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
763 if (status != VXGE_HW_OK)
764 goto exit;
765
766 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
767 if (status != VXGE_HW_OK)
768 goto exit;
769
770 break;
771 }
772
773 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
774
775 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
776 continue;
777
778 val64 = readq(&toc->toc_vpath_pointer[i]);
779 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
780
781 status = __vxge_hw_vpath_addr_get(i, vpath_reg,
782 hw_info->mac_addrs[i],
783 hw_info->mac_addr_masks[i]);
784 if (status != VXGE_HW_OK)
785 goto exit;
786 }
787exit:
788 return status;
789}
790
791/*
792 * vxge_hw_device_initialize - Initialize Titan device.
793 * Initialize Titan device. Note that all the arguments of this public API
794 * are 'IN', including @hldev. Driver cooperates with
795 * OS to find new Titan device, locate its PCI and memory spaces.
796 *
797 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
798 * to enable the latter to perform Titan hardware initialization.
799 */
800enum vxge_hw_status __devinit
801vxge_hw_device_initialize(
802 struct __vxge_hw_device **devh,
803 struct vxge_hw_device_attr *attr,
804 struct vxge_hw_device_config *device_config)
805{
806 u32 i;
807 u32 nblocks = 0;
808 struct __vxge_hw_device *hldev = NULL;
809 enum vxge_hw_status status = VXGE_HW_OK;
810
811 status = __vxge_hw_device_config_check(device_config);
812 if (status != VXGE_HW_OK)
813 goto exit;
814
815 hldev = (struct __vxge_hw_device *)
816 vmalloc(sizeof(struct __vxge_hw_device));
817 if (hldev == NULL) {
818 status = VXGE_HW_ERR_OUT_OF_MEMORY;
819 goto exit;
820 }
821
822 memset(hldev, 0, sizeof(struct __vxge_hw_device));
823 hldev->magic = VXGE_HW_DEVICE_MAGIC;
824
825 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
826
827 /* apply config */
828 memcpy(&hldev->config, device_config,
829 sizeof(struct vxge_hw_device_config));
830
831 hldev->bar0 = attr->bar0;
832 hldev->bar1 = attr->bar1;
833 hldev->bar2 = attr->bar2;
834 hldev->pdev = attr->pdev;
835
836 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
837 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
838 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
839
840 __vxge_hw_device_pci_e_init(hldev);
841
842 status = __vxge_hw_device_reg_addr_get(hldev);
843 if (status != VXGE_HW_OK)
844 goto exit;
845 __vxge_hw_device_id_get(hldev);
846
847 __vxge_hw_device_host_info_get(hldev);
848
849 /* Incrementing for stats blocks */
850 nblocks++;
851
852 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
853
854 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
855 continue;
856
857 if (device_config->vp_config[i].ring.enable ==
858 VXGE_HW_RING_ENABLE)
859 nblocks += device_config->vp_config[i].ring.ring_blocks;
860
861 if (device_config->vp_config[i].fifo.enable ==
862 VXGE_HW_FIFO_ENABLE)
863 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
864 nblocks++;
865 }
866
867 if (__vxge_hw_blockpool_create(hldev,
868 &hldev->block_pool,
869 device_config->dma_blockpool_initial + nblocks,
870 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
871
872 vxge_hw_device_terminate(hldev);
873 status = VXGE_HW_ERR_OUT_OF_MEMORY;
874 goto exit;
875 }
876
877 status = __vxge_hw_device_initialize(hldev);
878
879 if (status != VXGE_HW_OK) {
880 vxge_hw_device_terminate(hldev);
881 goto exit;
882 }
883
884 *devh = hldev;
885exit:
886 return status;
887}
888
889/*
890 * vxge_hw_device_terminate - Terminate Titan device.
891 * Terminate HW device.
892 */
893void
894vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
895{
896 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
897
898 hldev->magic = VXGE_HW_DEVICE_DEAD;
899 __vxge_hw_blockpool_destroy(&hldev->block_pool);
900 vfree(hldev);
901}
902
903/*
904 * vxge_hw_device_stats_get - Get the device hw statistics.
905 * Returns the vpath h/w stats for the device.
906 */
907enum vxge_hw_status
908vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
909 struct vxge_hw_device_stats_hw_info *hw_stats)
910{
911 u32 i;
912 enum vxge_hw_status status = VXGE_HW_OK;
913
914 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
915
916 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
917 (hldev->virtual_paths[i].vp_open ==
918 VXGE_HW_VP_NOT_OPEN))
919 continue;
920
921 memcpy(hldev->virtual_paths[i].hw_stats_sav,
922 hldev->virtual_paths[i].hw_stats,
923 sizeof(struct vxge_hw_vpath_stats_hw_info));
924
925 status = __vxge_hw_vpath_stats_get(
926 &hldev->virtual_paths[i],
927 hldev->virtual_paths[i].hw_stats);
928 }
929
930 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
931 sizeof(struct vxge_hw_device_stats_hw_info));
932
933 return status;
934}
935
936/*
937 * vxge_hw_driver_stats_get - Get the device sw statistics.
938 * Returns the vpath s/w stats for the device.
939 */
940enum vxge_hw_status vxge_hw_driver_stats_get(
941 struct __vxge_hw_device *hldev,
942 struct vxge_hw_device_stats_sw_info *sw_stats)
943{
944 enum vxge_hw_status status = VXGE_HW_OK;
945
946 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
947 sizeof(struct vxge_hw_device_stats_sw_info));
948
949 return status;
950}
951
952/*
953 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
954 * and offset and perform an operation
955 * Get the statistics from the given location and offset.
956 */
957enum vxge_hw_status
958vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
959 u32 operation, u32 location, u32 offset, u64 *stat)
960{
961 u64 val64;
962 enum vxge_hw_status status = VXGE_HW_OK;
963
964 status = __vxge_hw_device_is_privilaged(hldev);
965 if (status != VXGE_HW_OK)
966 goto exit;
967
968 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
969 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
970 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
971 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
972
973 status = __vxge_hw_pio_mem_write64(val64,
974 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
975 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
976 hldev->config.device_poll_millis);
977
978 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
979 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
980 else
981 *stat = 0;
982exit:
983 return status;
984}
985
986/*
987 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
988 * Get the Statistics on aggregate port
989 */
990enum vxge_hw_status
991vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
992 struct vxge_hw_xmac_aggr_stats *aggr_stats)
993{
994 u64 *val64;
995 int i;
996 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
997 enum vxge_hw_status status = VXGE_HW_OK;
998
999 val64 = (u64 *)aggr_stats;
1000
1001 status = __vxge_hw_device_is_privilaged(hldev);
1002 if (status != VXGE_HW_OK)
1003 goto exit;
1004
1005 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1006 status = vxge_hw_mrpcim_stats_access(hldev,
1007 VXGE_HW_STATS_OP_READ,
1008 VXGE_HW_STATS_LOC_AGGR,
1009 ((offset + (104 * port)) >> 3), val64);
1010 if (status != VXGE_HW_OK)
1011 goto exit;
1012
1013 offset += 8;
1014 val64++;
1015 }
1016exit:
1017 return status;
1018}
1019
1020/*
1021 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1022 * Get the Statistics on port
1023 */
1024enum vxge_hw_status
1025vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1026 struct vxge_hw_xmac_port_stats *port_stats)
1027{
1028 u64 *val64;
1029 enum vxge_hw_status status = VXGE_HW_OK;
1030 int i;
1031 u32 offset = 0x0;
1032 val64 = (u64 *) port_stats;
1033
1034 status = __vxge_hw_device_is_privilaged(hldev);
1035 if (status != VXGE_HW_OK)
1036 goto exit;
1037
1038 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1039 status = vxge_hw_mrpcim_stats_access(hldev,
1040 VXGE_HW_STATS_OP_READ,
1041 VXGE_HW_STATS_LOC_AGGR,
1042 ((offset + (608 * port)) >> 3), val64);
1043 if (status != VXGE_HW_OK)
1044 goto exit;
1045
1046 offset += 8;
1047 val64++;
1048 }
1049
1050exit:
1051 return status;
1052}
1053
1054/*
1055 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1056 * Get the XMAC Statistics
1057 */
1058enum vxge_hw_status
1059vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1060 struct vxge_hw_xmac_stats *xmac_stats)
1061{
1062 enum vxge_hw_status status = VXGE_HW_OK;
1063 u32 i;
1064
1065 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1066 0, &xmac_stats->aggr_stats[0]);
1067
1068 if (status != VXGE_HW_OK)
1069 goto exit;
1070
1071 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1072 1, &xmac_stats->aggr_stats[1]);
1073 if (status != VXGE_HW_OK)
1074 goto exit;
1075
1076 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1077
1078 status = vxge_hw_device_xmac_port_stats_get(hldev,
1079 i, &xmac_stats->port_stats[i]);
1080 if (status != VXGE_HW_OK)
1081 goto exit;
1082 }
1083
1084 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1085
1086 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1087 continue;
1088
1089 status = __vxge_hw_vpath_xmac_tx_stats_get(
1090 &hldev->virtual_paths[i],
1091 &xmac_stats->vpath_tx_stats[i]);
1092 if (status != VXGE_HW_OK)
1093 goto exit;
1094
1095 status = __vxge_hw_vpath_xmac_rx_stats_get(
1096 &hldev->virtual_paths[i],
1097 &xmac_stats->vpath_rx_stats[i]);
1098 if (status != VXGE_HW_OK)
1099 goto exit;
1100 }
1101exit:
1102 return status;
1103}
1104
1105/*
1106 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1107 * This routine is used to dynamically change the debug output
1108 */
1109void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1110 enum vxge_debug_level level, u32 mask)
1111{
1112 if (hldev == NULL)
1113 return;
1114
1115#if defined(VXGE_DEBUG_TRACE_MASK) || \
1116 defined(VXGE_DEBUG_ERR_MASK)
1117 hldev->debug_module_mask = mask;
1118 hldev->debug_level = level;
1119#endif
1120
1121#if defined(VXGE_DEBUG_ERR_MASK)
1122 hldev->level_err = level & VXGE_ERR;
1123#endif
1124
1125#if defined(VXGE_DEBUG_TRACE_MASK)
1126 hldev->level_trace = level & VXGE_TRACE;
1127#endif
1128}
1129
1130/*
1131 * vxge_hw_device_error_level_get - Get the error level
1132 * This routine returns the current error level set
1133 */
1134u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1135{
1136#if defined(VXGE_DEBUG_ERR_MASK)
1137 if (hldev == NULL)
1138 return VXGE_ERR;
1139 else
1140 return hldev->level_err;
1141#else
1142 return 0;
1143#endif
1144}
1145
1146/*
1147 * vxge_hw_device_trace_level_get - Get the trace level
1148 * This routine returns the current trace level set
1149 */
1150u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1151{
1152#if defined(VXGE_DEBUG_TRACE_MASK)
1153 if (hldev == NULL)
1154 return VXGE_TRACE;
1155 else
1156 return hldev->level_trace;
1157#else
1158 return 0;
1159#endif
1160}
1161/*
1162 * vxge_hw_device_debug_mask_get - Get the debug mask
1163 * This routine returns the current debug mask set
1164 */
1165u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
1166{
1167#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
1168 if (hldev == NULL)
1169 return 0;
1170 return hldev->debug_module_mask;
1171#else
1172 return 0;
1173#endif
1174}
1175
1176/*
1177 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1178 * Returns the Pause frame generation and reception capability of the NIC.
1179 */
1180enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1181 u32 port, u32 *tx, u32 *rx)
1182{
1183 u64 val64;
1184 enum vxge_hw_status status = VXGE_HW_OK;
1185
1186 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1187 status = VXGE_HW_ERR_INVALID_DEVICE;
1188 goto exit;
1189 }
1190
1191 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1192 status = VXGE_HW_ERR_INVALID_PORT;
1193 goto exit;
1194 }
1195
1196 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1197 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1198 goto exit;
1199 }
1200
1201 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1202 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1203 *tx = 1;
1204 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1205 *rx = 1;
1206exit:
1207 return status;
1208}
1209
1210/*
1211 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1212 * It can be used to set or reset Pause frame generation or reception
1213 * support of the NIC.
1214 */
1215
1216enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1217 u32 port, u32 tx, u32 rx)
1218{
1219 u64 val64;
1220 enum vxge_hw_status status = VXGE_HW_OK;
1221
1222 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1223 status = VXGE_HW_ERR_INVALID_DEVICE;
1224 goto exit;
1225 }
1226
1227 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1228 status = VXGE_HW_ERR_INVALID_PORT;
1229 goto exit;
1230 }
1231
1232 status = __vxge_hw_device_is_privilaged(hldev);
1233 if (status != VXGE_HW_OK)
1234 goto exit;
1235
1236 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1237 if (tx)
1238 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1239 else
1240 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1241 if (rx)
1242 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1243 else
1244 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1245
1246 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1247exit:
1248 return status;
1249}
1250
1251u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1252{
1253 int link_width, exp_cap;
1254 u16 lnk;
1255
1256 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1257 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1258 link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1259 return link_width;
1260}
1261
1262/*
1263 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1264 * This function returns the index of memory block
1265 */
1266static inline u32
1267__vxge_hw_ring_block_memblock_idx(u8 *block)
1268{
1269 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1270}
1271
1272/*
1273 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1274 * This function sets index to a memory block
1275 */
1276static inline void
1277__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1278{
1279 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1280}
1281
1282/*
1283 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1284 * in RxD block
1285 * Sets the next block pointer in RxD block
1286 */
1287static inline void
1288__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1289{
1290 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1291}
1292
1293/*
1294 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1295 * first block
1296 * Returns the dma address of the first RxD block
1297 */
1298u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1299{
1300 struct vxge_hw_mempool_dma *dma_object;
1301
1302 dma_object = ring->mempool->memblocks_dma_arr;
1303 vxge_assert(dma_object != NULL);
1304
1305 return dma_object->addr;
1306}
1307
1308/*
1309 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1310 * This function returns the dma address of a given item
1311 */
1312static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1313 void *item)
1314{
1315 u32 memblock_idx;
1316 void *memblock;
1317 struct vxge_hw_mempool_dma *memblock_dma_object;
1318 ptrdiff_t dma_item_offset;
1319
1320 /* get owner memblock index */
1321 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1322
1323 /* get owner memblock by memblock index */
1324 memblock = mempoolh->memblocks_arr[memblock_idx];
1325
1326 /* get memblock DMA object by memblock index */
1327 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1328
1329 /* calculate offset in the memblock of this item */
1330 dma_item_offset = (u8 *)item - (u8 *)memblock;
1331
1332 return memblock_dma_object->addr + dma_item_offset;
1333}
1334
1335/*
1336 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1337 * This function returns the dma address of a given item
1338 */
1339static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1340 struct __vxge_hw_ring *ring, u32 from,
1341 u32 to)
1342{
1343 u8 *to_item , *from_item;
1344 dma_addr_t to_dma;
1345
1346 /* get "from" RxD block */
1347 from_item = mempoolh->items_arr[from];
1348 vxge_assert(from_item);
1349
1350 /* get "to" RxD block */
1351 to_item = mempoolh->items_arr[to];
1352 vxge_assert(to_item);
1353
1354 /* return address of the beginning of previous RxD block */
1355 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1356
1357 /* set next pointer for this RxD block to point on
1358 * previous item's DMA start address */
1359 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1360}
1361
1362/*
1363 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1364 * block callback
1365 * This function is callback passed to __vxge_hw_mempool_create to create memory
1366 * pool for RxD block
1367 */
1368static void
1369__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1370 u32 memblock_index,
1371 struct vxge_hw_mempool_dma *dma_object,
1372 u32 index, u32 is_last)
1373{
1374 u32 i;
1375 void *item = mempoolh->items_arr[index];
1376 struct __vxge_hw_ring *ring =
1377 (struct __vxge_hw_ring *)mempoolh->userdata;
1378
1379 /* format rxds array */
1380 for (i = 0; i < ring->rxds_per_block; i++) {
1381 void *rxdblock_priv;
1382 void *uld_priv;
1383 struct vxge_hw_ring_rxd_1 *rxdp;
1384
1385 u32 reserve_index = ring->channel.reserve_ptr -
1386 (index * ring->rxds_per_block + i + 1);
1387 u32 memblock_item_idx;
1388
1389 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1390 i * ring->rxd_size;
1391
1392 /* Note: memblock_item_idx is index of the item within
1393 * the memblock. For instance, in case of three RxD-blocks
1394 * per memblock this value can be 0, 1 or 2. */
1395 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1396 memblock_index, item,
1397 &memblock_item_idx);
1398
1399 rxdp = (struct vxge_hw_ring_rxd_1 *)
1400 ring->channel.reserve_arr[reserve_index];
1401
1402 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1403
1404 /* pre-format Host_Control */
1405 rxdp->host_control = (u64)(size_t)uld_priv;
1406 }
1407
1408 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1409
1410 if (is_last) {
1411 /* link last one with first one */
1412 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1413 }
1414
1415 if (index > 0) {
1416 /* link this RxD block with previous one */
1417 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1418 }
1419
1420 return;
1421}
1422
1423/*
1424 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1425 * This function replenishes the RxDs from reserve array to work array
1426 */
1427enum vxge_hw_status
1428vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
1429{
1430 void *rxd;
1431 int i = 0;
1432 struct __vxge_hw_channel *channel;
1433 enum vxge_hw_status status = VXGE_HW_OK;
1434
1435 channel = &ring->channel;
1436
1437 while (vxge_hw_channel_dtr_count(channel) > 0) {
1438
1439 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1440
1441 vxge_assert(status == VXGE_HW_OK);
1442
1443 if (ring->rxd_init) {
1444 status = ring->rxd_init(rxd, channel->userdata);
1445 if (status != VXGE_HW_OK) {
1446 vxge_hw_ring_rxd_free(ring, rxd);
1447 goto exit;
1448 }
1449 }
1450
1451 vxge_hw_ring_rxd_post(ring, rxd);
1452 if (min_flag) {
1453 i++;
1454 if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
1455 break;
1456 }
1457 }
1458 status = VXGE_HW_OK;
1459exit:
1460 return status;
1461}
1462
1463/*
1464 * __vxge_hw_ring_create - Create a Ring
1465 * This function creates Ring and initializes it.
1466 *
1467 */
1468enum vxge_hw_status
1469__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1470 struct vxge_hw_ring_attr *attr)
1471{
1472 enum vxge_hw_status status = VXGE_HW_OK;
1473 struct __vxge_hw_ring *ring;
1474 u32 ring_length;
1475 struct vxge_hw_ring_config *config;
1476 struct __vxge_hw_device *hldev;
1477 u32 vp_id;
1478 struct vxge_hw_mempool_cbs ring_mp_callback;
1479
1480 if ((vp == NULL) || (attr == NULL)) {
1481 status = VXGE_HW_FAIL;
1482 goto exit;
1483 }
1484
1485 hldev = vp->vpath->hldev;
1486 vp_id = vp->vpath->vp_id;
1487
1488 config = &hldev->config.vp_config[vp_id].ring;
1489
1490 ring_length = config->ring_blocks *
1491 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1492
1493 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1494 VXGE_HW_CHANNEL_TYPE_RING,
1495 ring_length,
1496 attr->per_rxd_space,
1497 attr->userdata);
1498
1499 if (ring == NULL) {
1500 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1501 goto exit;
1502 }
1503
1504 vp->vpath->ringh = ring;
1505 ring->vp_id = vp_id;
1506 ring->vp_reg = vp->vpath->vp_reg;
1507 ring->common_reg = hldev->common_reg;
1508 ring->stats = &vp->vpath->sw_stats->ring_stats;
1509 ring->config = config;
1510 ring->callback = attr->callback;
1511 ring->rxd_init = attr->rxd_init;
1512 ring->rxd_term = attr->rxd_term;
1513 ring->buffer_mode = config->buffer_mode;
1514 ring->rxds_limit = config->rxds_limit;
1515
1516 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1517 ring->rxd_priv_size =
1518 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1519 ring->per_rxd_space = attr->per_rxd_space;
1520
1521 ring->rxd_priv_size =
1522 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1523 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1524
1525 /* how many RxDs can fit into one block. Depends on configured
1526 * buffer_mode. */
1527 ring->rxds_per_block =
1528 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1529
1530 /* calculate actual RxD block private size */
1531 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1532 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1533 ring->mempool = __vxge_hw_mempool_create(hldev,
1534 VXGE_HW_BLOCK_SIZE,
1535 VXGE_HW_BLOCK_SIZE,
1536 ring->rxdblock_priv_size,
1537 ring->config->ring_blocks,
1538 ring->config->ring_blocks,
1539 &ring_mp_callback,
1540 ring);
1541
1542 if (ring->mempool == NULL) {
1543 __vxge_hw_ring_delete(vp);
1544 return VXGE_HW_ERR_OUT_OF_MEMORY;
1545 }
1546
1547 status = __vxge_hw_channel_initialize(&ring->channel);
1548 if (status != VXGE_HW_OK) {
1549 __vxge_hw_ring_delete(vp);
1550 goto exit;
1551 }
1552
1553 /* Note:
1554 * Specifying rxd_init callback means two things:
1555 * 1) rxds need to be initialized by driver at channel-open time;
1556 * 2) rxds need to be posted at channel-open time
1557 * (that's what the initial_replenish() below does)
1558 * Currently we don't have a case when the 1) is done without the 2).
1559 */
1560 if (ring->rxd_init) {
1561 status = vxge_hw_ring_replenish(ring, 1);
1562 if (status != VXGE_HW_OK) {
1563 __vxge_hw_ring_delete(vp);
1564 goto exit;
1565 }
1566 }
1567
1568 /* initial replenish will increment the counter in its post() routine,
1569 * we have to reset it */
1570 ring->stats->common_stats.usage_cnt = 0;
1571exit:
1572 return status;
1573}
1574
1575/*
1576 * __vxge_hw_ring_abort - Returns the RxD
1577 * This function terminates the RxDs of ring
1578 */
1579enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1580{
1581 void *rxdh;
1582 struct __vxge_hw_channel *channel;
1583
1584 channel = &ring->channel;
1585
1586 for (;;) {
1587 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1588
1589 if (rxdh == NULL)
1590 break;
1591
1592 vxge_hw_channel_dtr_complete(channel);
1593
1594 if (ring->rxd_term)
1595 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1596 channel->userdata);
1597
1598 vxge_hw_channel_dtr_free(channel, rxdh);
1599 }
1600
1601 return VXGE_HW_OK;
1602}
1603
1604/*
1605 * __vxge_hw_ring_reset - Resets the ring
1606 * This function resets the ring during vpath reset operation
1607 */
1608enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1609{
1610 enum vxge_hw_status status = VXGE_HW_OK;
1611 struct __vxge_hw_channel *channel;
1612
1613 channel = &ring->channel;
1614
1615 __vxge_hw_ring_abort(ring);
1616
1617 status = __vxge_hw_channel_reset(channel);
1618
1619 if (status != VXGE_HW_OK)
1620 goto exit;
1621
1622 if (ring->rxd_init) {
1623 status = vxge_hw_ring_replenish(ring, 1);
1624 if (status != VXGE_HW_OK)
1625 goto exit;
1626 }
1627exit:
1628 return status;
1629}
1630
1631/*
1632 * __vxge_hw_ring_delete - Removes the ring
1633 * This function freeup the memory pool and removes the ring
1634 */
1635enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1636{
1637 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1638
1639 __vxge_hw_ring_abort(ring);
1640
1641 if (ring->mempool)
1642 __vxge_hw_mempool_destroy(ring->mempool);
1643
1644 vp->vpath->ringh = NULL;
1645 __vxge_hw_channel_free(&ring->channel);
1646
1647 return VXGE_HW_OK;
1648}
1649
1650/*
1651 * __vxge_hw_mempool_grow
1652 * Will resize mempool up to %num_allocate value.
1653 */
1654enum vxge_hw_status
1655__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1656 u32 *num_allocated)
1657{
1658 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1659 u32 n_items = mempool->items_per_memblock;
1660 u32 start_block_idx = mempool->memblocks_allocated;
1661 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1662 enum vxge_hw_status status = VXGE_HW_OK;
1663
1664 *num_allocated = 0;
1665
1666 if (end_block_idx > mempool->memblocks_max) {
1667 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1668 goto exit;
1669 }
1670
1671 for (i = start_block_idx; i < end_block_idx; i++) {
1672 u32 j;
1673 u32 is_last = ((end_block_idx - 1) == i);
1674 struct vxge_hw_mempool_dma *dma_object =
1675 mempool->memblocks_dma_arr + i;
1676 void *the_memblock;
1677
1678 /* allocate memblock's private part. Each DMA memblock
1679 * has a space allocated for item's private usage upon
1680 * mempool's user request. Each time mempool grows, it will
1681 * allocate new memblock and its private part at once.
1682 * This helps to minimize memory usage a lot. */
1683 mempool->memblocks_priv_arr[i] =
1684 vmalloc(mempool->items_priv_size * n_items);
1685 if (mempool->memblocks_priv_arr[i] == NULL) {
1686 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1687 goto exit;
1688 }
1689
1690 memset(mempool->memblocks_priv_arr[i], 0,
1691 mempool->items_priv_size * n_items);
1692
1693 /* allocate DMA-capable memblock */
1694 mempool->memblocks_arr[i] =
1695 __vxge_hw_blockpool_malloc(mempool->devh,
1696 mempool->memblock_size, dma_object);
1697 if (mempool->memblocks_arr[i] == NULL) {
1698 vfree(mempool->memblocks_priv_arr[i]);
1699 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1700 goto exit;
1701 }
1702
1703 (*num_allocated)++;
1704 mempool->memblocks_allocated++;
1705
1706 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1707
1708 the_memblock = mempool->memblocks_arr[i];
1709
1710 /* fill the items hash array */
1711 for (j = 0; j < n_items; j++) {
1712 u32 index = i * n_items + j;
1713
1714 if (first_time && index >= mempool->items_initial)
1715 break;
1716
1717 mempool->items_arr[index] =
1718 ((char *)the_memblock + j*mempool->item_size);
1719
1720 /* let caller to do more job on each item */
1721 if (mempool->item_func_alloc != NULL)
1722 mempool->item_func_alloc(mempool, i,
1723 dma_object, index, is_last);
1724
1725 mempool->items_current = index + 1;
1726 }
1727
1728 if (first_time && mempool->items_current ==
1729 mempool->items_initial)
1730 break;
1731 }
1732exit:
1733 return status;
1734}
1735
1736/*
1737 * vxge_hw_mempool_create
1738 * This function will create memory pool object. Pool may grow but will
1739 * never shrink. Pool consists of number of dynamically allocated blocks
1740 * with size enough to hold %items_initial number of items. Memory is
1741 * DMA-able but client must map/unmap before interoperating with the device.
1742 */
1743struct vxge_hw_mempool*
1744__vxge_hw_mempool_create(
1745 struct __vxge_hw_device *devh,
1746 u32 memblock_size,
1747 u32 item_size,
1748 u32 items_priv_size,
1749 u32 items_initial,
1750 u32 items_max,
1751 struct vxge_hw_mempool_cbs *mp_callback,
1752 void *userdata)
1753{
1754 enum vxge_hw_status status = VXGE_HW_OK;
1755 u32 memblocks_to_allocate;
1756 struct vxge_hw_mempool *mempool = NULL;
1757 u32 allocated;
1758
1759 if (memblock_size < item_size) {
1760 status = VXGE_HW_FAIL;
1761 goto exit;
1762 }
1763
1764 mempool = (struct vxge_hw_mempool *)
1765 vmalloc(sizeof(struct vxge_hw_mempool));
1766 if (mempool == NULL) {
1767 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1768 goto exit;
1769 }
1770 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1771
1772 mempool->devh = devh;
1773 mempool->memblock_size = memblock_size;
1774 mempool->items_max = items_max;
1775 mempool->items_initial = items_initial;
1776 mempool->item_size = item_size;
1777 mempool->items_priv_size = items_priv_size;
1778 mempool->item_func_alloc = mp_callback->item_func_alloc;
1779 mempool->userdata = userdata;
1780
1781 mempool->memblocks_allocated = 0;
1782
1783 mempool->items_per_memblock = memblock_size / item_size;
1784
1785 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1786 mempool->items_per_memblock;
1787
1788 /* allocate array of memblocks */
1789 mempool->memblocks_arr =
1790 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1791 if (mempool->memblocks_arr == NULL) {
1792 __vxge_hw_mempool_destroy(mempool);
1793 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1794 mempool = NULL;
1795 goto exit;
1796 }
1797 memset(mempool->memblocks_arr, 0,
1798 sizeof(void *) * mempool->memblocks_max);
1799
1800 /* allocate array of private parts of items per memblocks */
1801 mempool->memblocks_priv_arr =
1802 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1803 if (mempool->memblocks_priv_arr == NULL) {
1804 __vxge_hw_mempool_destroy(mempool);
1805 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1806 mempool = NULL;
1807 goto exit;
1808 }
1809 memset(mempool->memblocks_priv_arr, 0,
1810 sizeof(void *) * mempool->memblocks_max);
1811
1812 /* allocate array of memblocks DMA objects */
1813 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1814 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1815 mempool->memblocks_max);
1816
1817 if (mempool->memblocks_dma_arr == NULL) {
1818 __vxge_hw_mempool_destroy(mempool);
1819 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1820 mempool = NULL;
1821 goto exit;
1822 }
1823 memset(mempool->memblocks_dma_arr, 0,
1824 sizeof(struct vxge_hw_mempool_dma) *
1825 mempool->memblocks_max);
1826
1827 /* allocate hash array of items */
1828 mempool->items_arr =
1829 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1830 if (mempool->items_arr == NULL) {
1831 __vxge_hw_mempool_destroy(mempool);
1832 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1833 mempool = NULL;
1834 goto exit;
1835 }
1836 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1837
1838 /* calculate initial number of memblocks */
1839 memblocks_to_allocate = (mempool->items_initial +
1840 mempool->items_per_memblock - 1) /
1841 mempool->items_per_memblock;
1842
1843 /* pre-allocate the mempool */
1844 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1845 &allocated);
1846 if (status != VXGE_HW_OK) {
1847 __vxge_hw_mempool_destroy(mempool);
1848 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1849 mempool = NULL;
1850 goto exit;
1851 }
1852
1853exit:
1854 return mempool;
1855}
1856
1857/*
1858 * vxge_hw_mempool_destroy
1859 */
1860void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1861{
1862 u32 i, j;
1863 struct __vxge_hw_device *devh = mempool->devh;
1864
1865 for (i = 0; i < mempool->memblocks_allocated; i++) {
1866 struct vxge_hw_mempool_dma *dma_object;
1867
1868 vxge_assert(mempool->memblocks_arr[i]);
1869 vxge_assert(mempool->memblocks_dma_arr + i);
1870
1871 dma_object = mempool->memblocks_dma_arr + i;
1872
1873 for (j = 0; j < mempool->items_per_memblock; j++) {
1874 u32 index = i * mempool->items_per_memblock + j;
1875
1876 /* to skip last partially filled(if any) memblock */
1877 if (index >= mempool->items_current)
1878 break;
1879 }
1880
1881 vfree(mempool->memblocks_priv_arr[i]);
1882
1883 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1884 mempool->memblock_size, dma_object);
1885 }
1886
1887 if (mempool->items_arr)
1888 vfree(mempool->items_arr);
1889
1890 if (mempool->memblocks_dma_arr)
1891 vfree(mempool->memblocks_dma_arr);
1892
1893 if (mempool->memblocks_priv_arr)
1894 vfree(mempool->memblocks_priv_arr);
1895
1896 if (mempool->memblocks_arr)
1897 vfree(mempool->memblocks_arr);
1898
1899 vfree(mempool);
1900}
1901
1902/*
1903 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1904 * Check the fifo configuration
1905 */
1906enum vxge_hw_status
1907__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1908{
1909 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1910 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1911 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1912
1913 return VXGE_HW_OK;
1914}
1915
1916/*
1917 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1918 * Check the vpath configuration
1919 */
1920enum vxge_hw_status
1921__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1922{
1923 enum vxge_hw_status status;
1924
1925 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1926 (vp_config->min_bandwidth >
1927 VXGE_HW_VPATH_BANDWIDTH_MAX))
1928 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1929
1930 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1931 if (status != VXGE_HW_OK)
1932 return status;
1933
1934 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1935 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1936 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1937 return VXGE_HW_BADCFG_VPATH_MTU;
1938
1939 if ((vp_config->rpa_strip_vlan_tag !=
1940 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1941 (vp_config->rpa_strip_vlan_tag !=
1942 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1943 (vp_config->rpa_strip_vlan_tag !=
1944 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1945 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1946
1947 return VXGE_HW_OK;
1948}
1949
1950/*
1951 * __vxge_hw_device_config_check - Check device configuration.
1952 * Check the device configuration
1953 */
1954enum vxge_hw_status
1955__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1956{
1957 u32 i;
1958 enum vxge_hw_status status;
1959
1960 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1961 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1962 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1963 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1964 return VXGE_HW_BADCFG_INTR_MODE;
1965
1966 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1967 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1968 return VXGE_HW_BADCFG_RTS_MAC_EN;
1969
1970 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1971 status = __vxge_hw_device_vpath_config_check(
1972 &new_config->vp_config[i]);
1973 if (status != VXGE_HW_OK)
1974 return status;
1975 }
1976
1977 return VXGE_HW_OK;
1978}
1979
1980/*
1981 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1982 * Initialize Titan device config with default values.
1983 */
1984enum vxge_hw_status __devinit
1985vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1986{
1987 u32 i;
1988
1989 device_config->dma_blockpool_initial =
1990 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1991 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1992 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1993 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1994 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1995 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1996 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
1997
1998 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1999
2000 device_config->vp_config[i].vp_id = i;
2001
2002 device_config->vp_config[i].min_bandwidth =
2003 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2004
2005 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2006
2007 device_config->vp_config[i].ring.ring_blocks =
2008 VXGE_HW_DEF_RING_BLOCKS;
2009
2010 device_config->vp_config[i].ring.buffer_mode =
2011 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2012
2013 device_config->vp_config[i].ring.scatter_mode =
2014 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2015
2016 device_config->vp_config[i].ring.rxds_limit =
2017 VXGE_HW_DEF_RING_RXDS_LIMIT;
2018
2019 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2020
2021 device_config->vp_config[i].fifo.fifo_blocks =
2022 VXGE_HW_MIN_FIFO_BLOCKS;
2023
2024 device_config->vp_config[i].fifo.max_frags =
2025 VXGE_HW_MAX_FIFO_FRAGS;
2026
2027 device_config->vp_config[i].fifo.memblock_size =
2028 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2029
2030 device_config->vp_config[i].fifo.alignment_size =
2031 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2032
2033 device_config->vp_config[i].fifo.intr =
2034 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2035
2036 device_config->vp_config[i].fifo.no_snoop_bits =
2037 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2038 device_config->vp_config[i].tti.intr_enable =
2039 VXGE_HW_TIM_INTR_DEFAULT;
2040
2041 device_config->vp_config[i].tti.btimer_val =
2042 VXGE_HW_USE_FLASH_DEFAULT;
2043
2044 device_config->vp_config[i].tti.timer_ac_en =
2045 VXGE_HW_USE_FLASH_DEFAULT;
2046
2047 device_config->vp_config[i].tti.timer_ci_en =
2048 VXGE_HW_USE_FLASH_DEFAULT;
2049
2050 device_config->vp_config[i].tti.timer_ri_en =
2051 VXGE_HW_USE_FLASH_DEFAULT;
2052
2053 device_config->vp_config[i].tti.rtimer_val =
2054 VXGE_HW_USE_FLASH_DEFAULT;
2055
2056 device_config->vp_config[i].tti.util_sel =
2057 VXGE_HW_USE_FLASH_DEFAULT;
2058
2059 device_config->vp_config[i].tti.ltimer_val =
2060 VXGE_HW_USE_FLASH_DEFAULT;
2061
2062 device_config->vp_config[i].tti.urange_a =
2063 VXGE_HW_USE_FLASH_DEFAULT;
2064
2065 device_config->vp_config[i].tti.uec_a =
2066 VXGE_HW_USE_FLASH_DEFAULT;
2067
2068 device_config->vp_config[i].tti.urange_b =
2069 VXGE_HW_USE_FLASH_DEFAULT;
2070
2071 device_config->vp_config[i].tti.uec_b =
2072 VXGE_HW_USE_FLASH_DEFAULT;
2073
2074 device_config->vp_config[i].tti.urange_c =
2075 VXGE_HW_USE_FLASH_DEFAULT;
2076
2077 device_config->vp_config[i].tti.uec_c =
2078 VXGE_HW_USE_FLASH_DEFAULT;
2079
2080 device_config->vp_config[i].tti.uec_d =
2081 VXGE_HW_USE_FLASH_DEFAULT;
2082
2083 device_config->vp_config[i].rti.intr_enable =
2084 VXGE_HW_TIM_INTR_DEFAULT;
2085
2086 device_config->vp_config[i].rti.btimer_val =
2087 VXGE_HW_USE_FLASH_DEFAULT;
2088
2089 device_config->vp_config[i].rti.timer_ac_en =
2090 VXGE_HW_USE_FLASH_DEFAULT;
2091
2092 device_config->vp_config[i].rti.timer_ci_en =
2093 VXGE_HW_USE_FLASH_DEFAULT;
2094
2095 device_config->vp_config[i].rti.timer_ri_en =
2096 VXGE_HW_USE_FLASH_DEFAULT;
2097
2098 device_config->vp_config[i].rti.rtimer_val =
2099 VXGE_HW_USE_FLASH_DEFAULT;
2100
2101 device_config->vp_config[i].rti.util_sel =
2102 VXGE_HW_USE_FLASH_DEFAULT;
2103
2104 device_config->vp_config[i].rti.ltimer_val =
2105 VXGE_HW_USE_FLASH_DEFAULT;
2106
2107 device_config->vp_config[i].rti.urange_a =
2108 VXGE_HW_USE_FLASH_DEFAULT;
2109
2110 device_config->vp_config[i].rti.uec_a =
2111 VXGE_HW_USE_FLASH_DEFAULT;
2112
2113 device_config->vp_config[i].rti.urange_b =
2114 VXGE_HW_USE_FLASH_DEFAULT;
2115
2116 device_config->vp_config[i].rti.uec_b =
2117 VXGE_HW_USE_FLASH_DEFAULT;
2118
2119 device_config->vp_config[i].rti.urange_c =
2120 VXGE_HW_USE_FLASH_DEFAULT;
2121
2122 device_config->vp_config[i].rti.uec_c =
2123 VXGE_HW_USE_FLASH_DEFAULT;
2124
2125 device_config->vp_config[i].rti.uec_d =
2126 VXGE_HW_USE_FLASH_DEFAULT;
2127
2128 device_config->vp_config[i].mtu =
2129 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2130
2131 device_config->vp_config[i].rpa_strip_vlan_tag =
2132 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2133 }
2134
2135 return VXGE_HW_OK;
2136}
2137
2138/*
2139 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2140 * Set the swapper bits appropriately for the lagacy section.
2141 */
2142enum vxge_hw_status
2143__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2144{
2145 u64 val64;
2146 enum vxge_hw_status status = VXGE_HW_OK;
2147
2148 val64 = readq(&legacy_reg->toc_swapper_fb);
2149
2150 wmb();
2151
2152 switch (val64) {
2153
2154 case VXGE_HW_SWAPPER_INITIAL_VALUE:
2155 return status;
2156
2157 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2158 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2159 &legacy_reg->pifm_rd_swap_en);
2160 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2161 &legacy_reg->pifm_rd_flip_en);
2162 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2163 &legacy_reg->pifm_wr_swap_en);
2164 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2165 &legacy_reg->pifm_wr_flip_en);
2166 break;
2167
2168 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2169 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2170 &legacy_reg->pifm_rd_swap_en);
2171 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2172 &legacy_reg->pifm_wr_swap_en);
2173 break;
2174
2175 case VXGE_HW_SWAPPER_BIT_FLIPPED:
2176 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2177 &legacy_reg->pifm_rd_flip_en);
2178 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2179 &legacy_reg->pifm_wr_flip_en);
2180 break;
2181 }
2182
2183 wmb();
2184
2185 val64 = readq(&legacy_reg->toc_swapper_fb);
2186
2187 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2188 status = VXGE_HW_ERR_SWAPPER_CTRL;
2189
2190 return status;
2191}
2192
2193/*
2194 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2195 * Set the swapper bits appropriately for the vpath.
2196 */
2197enum vxge_hw_status
2198__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2199{
2200#ifndef __BIG_ENDIAN
2201 u64 val64;
2202
2203 val64 = readq(&vpath_reg->vpath_general_cfg1);
2204 wmb();
2205 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2206 writeq(val64, &vpath_reg->vpath_general_cfg1);
2207 wmb();
2208#endif
2209 return VXGE_HW_OK;
2210}
2211
2212/*
2213 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2214 * Set the swapper bits appropriately for the vpath.
2215 */
2216enum vxge_hw_status
2217__vxge_hw_kdfc_swapper_set(
2218 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2219 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2220{
2221 u64 val64;
2222
2223 val64 = readq(&legacy_reg->pifm_wr_swap_en);
2224
2225 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2226 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2227 wmb();
2228
2229 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2230 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
2231 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2232
2233 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2234 wmb();
2235 }
2236
2237 return VXGE_HW_OK;
2238}
2239
2240/*
2241 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2242 * Get device configuration. Permits to retrieve at run-time configuration
2243 * values that were used to initialize and configure the device.
2244 */
2245enum vxge_hw_status
2246vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2247 struct vxge_hw_device_config *dev_config, int size)
2248{
2249
2250 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2251 return VXGE_HW_ERR_INVALID_DEVICE;
2252
2253 if (size != sizeof(struct vxge_hw_device_config))
2254 return VXGE_HW_ERR_VERSION_CONFLICT;
2255
2256 memcpy(dev_config, &hldev->config,
2257 sizeof(struct vxge_hw_device_config));
2258
2259 return VXGE_HW_OK;
2260}
2261
2262/*
2263 * vxge_hw_mgmt_reg_read - Read Titan register.
2264 */
2265enum vxge_hw_status
2266vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2267 enum vxge_hw_mgmt_reg_type type,
2268 u32 index, u32 offset, u64 *value)
2269{
2270 enum vxge_hw_status status = VXGE_HW_OK;
2271
2272 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2273 status = VXGE_HW_ERR_INVALID_DEVICE;
2274 goto exit;
2275 }
2276
2277 switch (type) {
2278 case vxge_hw_mgmt_reg_type_legacy:
2279 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2280 status = VXGE_HW_ERR_INVALID_OFFSET;
2281 break;
2282 }
2283 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2284 break;
2285 case vxge_hw_mgmt_reg_type_toc:
2286 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2287 status = VXGE_HW_ERR_INVALID_OFFSET;
2288 break;
2289 }
2290 *value = readq((void __iomem *)hldev->toc_reg + offset);
2291 break;
2292 case vxge_hw_mgmt_reg_type_common:
2293 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2294 status = VXGE_HW_ERR_INVALID_OFFSET;
2295 break;
2296 }
2297 *value = readq((void __iomem *)hldev->common_reg + offset);
2298 break;
2299 case vxge_hw_mgmt_reg_type_mrpcim:
2300 if (!(hldev->access_rights &
2301 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2302 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2303 break;
2304 }
2305 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2306 status = VXGE_HW_ERR_INVALID_OFFSET;
2307 break;
2308 }
2309 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2310 break;
2311 case vxge_hw_mgmt_reg_type_srpcim:
2312 if (!(hldev->access_rights &
2313 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2314 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2315 break;
2316 }
2317 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2318 status = VXGE_HW_ERR_INVALID_INDEX;
2319 break;
2320 }
2321 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2322 status = VXGE_HW_ERR_INVALID_OFFSET;
2323 break;
2324 }
2325 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2326 offset);
2327 break;
2328 case vxge_hw_mgmt_reg_type_vpmgmt:
2329 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2330 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2331 status = VXGE_HW_ERR_INVALID_INDEX;
2332 break;
2333 }
2334 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2335 status = VXGE_HW_ERR_INVALID_OFFSET;
2336 break;
2337 }
2338 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2339 offset);
2340 break;
2341 case vxge_hw_mgmt_reg_type_vpath:
2342 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2343 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2344 status = VXGE_HW_ERR_INVALID_INDEX;
2345 break;
2346 }
2347 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2348 status = VXGE_HW_ERR_INVALID_INDEX;
2349 break;
2350 }
2351 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2352 status = VXGE_HW_ERR_INVALID_OFFSET;
2353 break;
2354 }
2355 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2356 offset);
2357 break;
2358 default:
2359 status = VXGE_HW_ERR_INVALID_TYPE;
2360 break;
2361 }
2362
2363exit:
2364 return status;
2365}
2366
2367/*
2368 * vxge_hw_mgmt_reg_Write - Write Titan register.
2369 */
2370enum vxge_hw_status
2371vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2372 enum vxge_hw_mgmt_reg_type type,
2373 u32 index, u32 offset, u64 value)
2374{
2375 enum vxge_hw_status status = VXGE_HW_OK;
2376
2377 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2378 status = VXGE_HW_ERR_INVALID_DEVICE;
2379 goto exit;
2380 }
2381
2382 switch (type) {
2383 case vxge_hw_mgmt_reg_type_legacy:
2384 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2385 status = VXGE_HW_ERR_INVALID_OFFSET;
2386 break;
2387 }
2388 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2389 break;
2390 case vxge_hw_mgmt_reg_type_toc:
2391 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2392 status = VXGE_HW_ERR_INVALID_OFFSET;
2393 break;
2394 }
2395 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2396 break;
2397 case vxge_hw_mgmt_reg_type_common:
2398 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2399 status = VXGE_HW_ERR_INVALID_OFFSET;
2400 break;
2401 }
2402 writeq(value, (void __iomem *)hldev->common_reg + offset);
2403 break;
2404 case vxge_hw_mgmt_reg_type_mrpcim:
2405 if (!(hldev->access_rights &
2406 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2407 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2408 break;
2409 }
2410 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2411 status = VXGE_HW_ERR_INVALID_OFFSET;
2412 break;
2413 }
2414 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2415 break;
2416 case vxge_hw_mgmt_reg_type_srpcim:
2417 if (!(hldev->access_rights &
2418 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2419 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2420 break;
2421 }
2422 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2423 status = VXGE_HW_ERR_INVALID_INDEX;
2424 break;
2425 }
2426 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2427 status = VXGE_HW_ERR_INVALID_OFFSET;
2428 break;
2429 }
2430 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2431 offset);
2432
2433 break;
2434 case vxge_hw_mgmt_reg_type_vpmgmt:
2435 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2436 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2437 status = VXGE_HW_ERR_INVALID_INDEX;
2438 break;
2439 }
2440 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2441 status = VXGE_HW_ERR_INVALID_OFFSET;
2442 break;
2443 }
2444 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2445 offset);
2446 break;
2447 case vxge_hw_mgmt_reg_type_vpath:
2448 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2449 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2450 status = VXGE_HW_ERR_INVALID_INDEX;
2451 break;
2452 }
2453 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2454 status = VXGE_HW_ERR_INVALID_OFFSET;
2455 break;
2456 }
2457 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2458 offset);
2459 break;
2460 default:
2461 status = VXGE_HW_ERR_INVALID_TYPE;
2462 break;
2463 }
2464exit:
2465 return status;
2466}
2467
2468/*
2469 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2470 * list callback
2471 * This function is callback passed to __vxge_hw_mempool_create to create memory
2472 * pool for TxD list
2473 */
2474static void
2475__vxge_hw_fifo_mempool_item_alloc(
2476 struct vxge_hw_mempool *mempoolh,
2477 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2478 u32 index, u32 is_last)
2479{
2480 u32 memblock_item_idx;
2481 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2482 struct vxge_hw_fifo_txd *txdp =
2483 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2484 struct __vxge_hw_fifo *fifo =
2485 (struct __vxge_hw_fifo *)mempoolh->userdata;
2486 void *memblock = mempoolh->memblocks_arr[memblock_index];
2487
2488 vxge_assert(txdp);
2489
2490 txdp->host_control = (u64) (size_t)
2491 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2492 &memblock_item_idx);
2493
2494 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2495
2496 vxge_assert(txdl_priv);
2497
2498 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2499
2500 /* pre-format HW's TxDL's private */
2501 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2502 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2503 txdl_priv->dma_handle = dma_object->handle;
2504 txdl_priv->memblock = memblock;
2505 txdl_priv->first_txdp = txdp;
2506 txdl_priv->next_txdl_priv = NULL;
2507 txdl_priv->alloc_frags = 0;
2508
2509 return;
2510}
2511
2512/*
2513 * __vxge_hw_fifo_create - Create a FIFO
2514 * This function creates FIFO and initializes it.
2515 */
2516enum vxge_hw_status
2517__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2518 struct vxge_hw_fifo_attr *attr)
2519{
2520 enum vxge_hw_status status = VXGE_HW_OK;
2521 struct __vxge_hw_fifo *fifo;
2522 struct vxge_hw_fifo_config *config;
2523 u32 txdl_size, txdl_per_memblock;
2524 struct vxge_hw_mempool_cbs fifo_mp_callback;
2525 struct __vxge_hw_virtualpath *vpath;
2526
2527 if ((vp == NULL) || (attr == NULL)) {
2528 status = VXGE_HW_ERR_INVALID_HANDLE;
2529 goto exit;
2530 }
2531 vpath = vp->vpath;
2532 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2533
2534 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2535
2536 txdl_per_memblock = config->memblock_size / txdl_size;
2537
2538 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2539 VXGE_HW_CHANNEL_TYPE_FIFO,
2540 config->fifo_blocks * txdl_per_memblock,
2541 attr->per_txdl_space, attr->userdata);
2542
2543 if (fifo == NULL) {
2544 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2545 goto exit;
2546 }
2547
2548 vpath->fifoh = fifo;
2549 fifo->nofl_db = vpath->nofl_db;
2550
2551 fifo->vp_id = vpath->vp_id;
2552 fifo->vp_reg = vpath->vp_reg;
2553 fifo->stats = &vpath->sw_stats->fifo_stats;
2554
2555 fifo->config = config;
2556
2557 /* apply "interrupts per txdl" attribute */
2558 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2559
2560 if (fifo->config->intr)
2561 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2562
2563 fifo->no_snoop_bits = config->no_snoop_bits;
2564
2565 /*
2566 * FIFO memory management strategy:
2567 *
2568 * TxDL split into three independent parts:
2569 * - set of TxD's
2570 * - TxD HW private part
2571 * - driver private part
2572 *
2573 * Adaptative memory allocation used. i.e. Memory allocated on
2574 * demand with the size which will fit into one memory block.
2575 * One memory block may contain more than one TxDL.
2576 *
2577 * During "reserve" operations more memory can be allocated on demand
2578 * for example due to FIFO full condition.
2579 *
2580 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2581 * routine which will essentially stop the channel and free resources.
2582 */
2583
2584 /* TxDL common private size == TxDL private + driver private */
2585 fifo->priv_size =
2586 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2587 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2588 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2589
2590 fifo->per_txdl_space = attr->per_txdl_space;
2591
2592 /* recompute txdl size to be cacheline aligned */
2593 fifo->txdl_size = txdl_size;
2594 fifo->txdl_per_memblock = txdl_per_memblock;
2595
2596 fifo->txdl_term = attr->txdl_term;
2597 fifo->callback = attr->callback;
2598
2599 if (fifo->txdl_per_memblock == 0) {
2600 __vxge_hw_fifo_delete(vp);
2601 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2602 goto exit;
2603 }
2604
2605 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2606
2607 fifo->mempool =
2608 __vxge_hw_mempool_create(vpath->hldev,
2609 fifo->config->memblock_size,
2610 fifo->txdl_size,
2611 fifo->priv_size,
2612 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2613 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2614 &fifo_mp_callback,
2615 fifo);
2616
2617 if (fifo->mempool == NULL) {
2618 __vxge_hw_fifo_delete(vp);
2619 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2620 goto exit;
2621 }
2622
2623 status = __vxge_hw_channel_initialize(&fifo->channel);
2624 if (status != VXGE_HW_OK) {
2625 __vxge_hw_fifo_delete(vp);
2626 goto exit;
2627 }
2628
2629 vxge_assert(fifo->channel.reserve_ptr);
2630exit:
2631 return status;
2632}
2633
2634/*
2635 * __vxge_hw_fifo_abort - Returns the TxD
2636 * This function terminates the TxDs of fifo
2637 */
2638enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2639{
2640 void *txdlh;
2641
2642 for (;;) {
2643 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2644
2645 if (txdlh == NULL)
2646 break;
2647
2648 vxge_hw_channel_dtr_complete(&fifo->channel);
2649
2650 if (fifo->txdl_term) {
2651 fifo->txdl_term(txdlh,
2652 VXGE_HW_TXDL_STATE_POSTED,
2653 fifo->channel.userdata);
2654 }
2655
2656 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2657 }
2658
2659 return VXGE_HW_OK;
2660}
2661
2662/*
2663 * __vxge_hw_fifo_reset - Resets the fifo
2664 * This function resets the fifo during vpath reset operation
2665 */
2666enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2667{
2668 enum vxge_hw_status status = VXGE_HW_OK;
2669
2670 __vxge_hw_fifo_abort(fifo);
2671 status = __vxge_hw_channel_reset(&fifo->channel);
2672
2673 return status;
2674}
2675
2676/*
2677 * __vxge_hw_fifo_delete - Removes the FIFO
2678 * This function freeup the memory pool and removes the FIFO
2679 */
2680enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2681{
2682 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2683
2684 __vxge_hw_fifo_abort(fifo);
2685
2686 if (fifo->mempool)
2687 __vxge_hw_mempool_destroy(fifo->mempool);
2688
2689 vp->vpath->fifoh = NULL;
2690
2691 __vxge_hw_channel_free(&fifo->channel);
2692
2693 return VXGE_HW_OK;
2694}
2695
2696/*
2697 * __vxge_hw_vpath_pci_read - Read the content of given address
2698 * in pci config space.
2699 * Read from the vpath pci config space.
2700 */
2701enum vxge_hw_status
2702__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2703 u32 phy_func_0, u32 offset, u32 *val)
2704{
2705 u64 val64;
2706 enum vxge_hw_status status = VXGE_HW_OK;
2707 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2708
2709 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2710
2711 if (phy_func_0)
2712 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2713
2714 writeq(val64, &vp_reg->pci_config_access_cfg1);
2715 wmb();
2716 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2717 &vp_reg->pci_config_access_cfg2);
2718 wmb();
2719
2720 status = __vxge_hw_device_register_poll(
2721 &vp_reg->pci_config_access_cfg2,
2722 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2723
2724 if (status != VXGE_HW_OK)
2725 goto exit;
2726
2727 val64 = readq(&vp_reg->pci_config_access_status);
2728
2729 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2730 status = VXGE_HW_FAIL;
2731 *val = 0;
2732 } else
2733 *val = (u32)vxge_bVALn(val64, 32, 32);
2734exit:
2735 return status;
2736}
2737
2738/*
2739 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2740 * Returns the function number of the vpath.
2741 */
2742u32
2743__vxge_hw_vpath_func_id_get(u32 vp_id,
2744 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2745{
2746 u64 val64;
2747
2748 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2749
2750 return
2751 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2752}
2753
2754/*
2755 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2756 */
2757static inline void
2758__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2759 u64 dta_struct_sel)
2760{
2761 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2762 wmb();
2763 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2764 writeq(0, &vpath_reg->rts_access_steer_data1);
2765 wmb();
2766 return;
2767}
2768
2769
2770/*
2771 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2772 * part number and product description.
2773 */
2774enum vxge_hw_status
2775__vxge_hw_vpath_card_info_get(
2776 u32 vp_id,
2777 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2778 struct vxge_hw_device_hw_info *hw_info)
2779{
2780 u32 i, j;
2781 u64 val64;
2782 u64 data1 = 0ULL;
2783 u64 data2 = 0ULL;
2784 enum vxge_hw_status status = VXGE_HW_OK;
2785 u8 *serial_number = hw_info->serial_number;
2786 u8 *part_number = hw_info->part_number;
2787 u8 *product_desc = hw_info->product_desc;
2788
2789 __vxge_hw_read_rts_ds(vpath_reg,
2790 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2791
2792 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2793 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2794 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2795 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2796 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2797 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2798
2799 status = __vxge_hw_pio_mem_write64(val64,
2800 &vpath_reg->rts_access_steer_ctrl,
2801 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2802 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2803
2804 if (status != VXGE_HW_OK)
2805 return status;
2806
2807 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2808
2809 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2810 data1 = readq(&vpath_reg->rts_access_steer_data0);
2811 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2812
2813 data2 = readq(&vpath_reg->rts_access_steer_data1);
2814 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2815 status = VXGE_HW_OK;
2816 } else
2817 *serial_number = 0;
2818
2819 __vxge_hw_read_rts_ds(vpath_reg,
2820 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2821
2822 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2823 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2824 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2825 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2826 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2827 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2828
2829 status = __vxge_hw_pio_mem_write64(val64,
2830 &vpath_reg->rts_access_steer_ctrl,
2831 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2832 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2833
2834 if (status != VXGE_HW_OK)
2835 return status;
2836
2837 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2838
2839 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2840
2841 data1 = readq(&vpath_reg->rts_access_steer_data0);
2842 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2843
2844 data2 = readq(&vpath_reg->rts_access_steer_data1);
2845 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2846
2847 status = VXGE_HW_OK;
2848
2849 } else
2850 *part_number = 0;
2851
2852 j = 0;
2853
2854 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2855 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2856
2857 __vxge_hw_read_rts_ds(vpath_reg, i);
2858
2859 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2860 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2861 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2862 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2863 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2864 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2865
2866 status = __vxge_hw_pio_mem_write64(val64,
2867 &vpath_reg->rts_access_steer_ctrl,
2868 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2869 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2870
2871 if (status != VXGE_HW_OK)
2872 return status;
2873
2874 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2875
2876 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2877
2878 data1 = readq(&vpath_reg->rts_access_steer_data0);
2879 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2880
2881 data2 = readq(&vpath_reg->rts_access_steer_data1);
2882 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2883
2884 status = VXGE_HW_OK;
2885 } else
2886 *product_desc = 0;
2887 }
2888
2889 return status;
2890}
2891
2892/*
2893 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2894 * Returns FW Version
2895 */
2896enum vxge_hw_status
2897__vxge_hw_vpath_fw_ver_get(
2898 u32 vp_id,
2899 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2900 struct vxge_hw_device_hw_info *hw_info)
2901{
2902 u64 val64;
2903 u64 data1 = 0ULL;
2904 u64 data2 = 0ULL;
2905 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2906 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2907 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2908 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2909 enum vxge_hw_status status = VXGE_HW_OK;
2910
2911 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2912 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2913 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2914 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2915 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2916 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2917
2918 status = __vxge_hw_pio_mem_write64(val64,
2919 &vpath_reg->rts_access_steer_ctrl,
2920 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2921 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2922
2923 if (status != VXGE_HW_OK)
2924 goto exit;
2925
2926 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2927
2928 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2929
2930 data1 = readq(&vpath_reg->rts_access_steer_data0);
2931 data2 = readq(&vpath_reg->rts_access_steer_data1);
2932
2933 fw_date->day =
2934 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2935 data1);
2936 fw_date->month =
2937 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2938 data1);
2939 fw_date->year =
2940 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2941 data1);
2942
2943 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2944 fw_date->month, fw_date->day, fw_date->year);
2945
2946 fw_version->major =
2947 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2948 fw_version->minor =
2949 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2950 fw_version->build =
2951 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2952
2953 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2954 fw_version->major, fw_version->minor, fw_version->build);
2955
2956 flash_date->day =
2957 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2958 flash_date->month =
2959 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2960 flash_date->year =
2961 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2962
2963 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2964 "%2.2d/%2.2d/%4.4d",
2965 flash_date->month, flash_date->day, flash_date->year);
2966
2967 flash_version->major =
2968 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2969 flash_version->minor =
2970 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2971 flash_version->build =
2972 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2973
2974 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2975 flash_version->major, flash_version->minor,
2976 flash_version->build);
2977
2978 status = VXGE_HW_OK;
2979
2980 } else
2981 status = VXGE_HW_FAIL;
2982exit:
2983 return status;
2984}
2985
2986/*
2987 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2988 * Returns pci function mode
2989 */
2990u64
2991__vxge_hw_vpath_pci_func_mode_get(
2992 u32 vp_id,
2993 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2994{
2995 u64 val64;
2996 u64 data1 = 0ULL;
2997 enum vxge_hw_status status = VXGE_HW_OK;
2998
2999 __vxge_hw_read_rts_ds(vpath_reg,
3000 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
3001
3002 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3003 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
3004 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3005 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3006 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3007 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3008
3009 status = __vxge_hw_pio_mem_write64(val64,
3010 &vpath_reg->rts_access_steer_ctrl,
3011 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3012 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3013
3014 if (status != VXGE_HW_OK)
3015 goto exit;
3016
3017 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3018
3019 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3020 data1 = readq(&vpath_reg->rts_access_steer_data0);
3021 status = VXGE_HW_OK;
3022 } else {
3023 data1 = 0;
3024 status = VXGE_HW_FAIL;
3025 }
3026exit:
3027 return data1;
3028}
3029
3030/**
3031 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3032 * @hldev: HW device.
3033 * @on_off: TRUE if flickering to be on, FALSE to be off
3034 *
3035 * Flicker the link LED.
3036 */
3037enum vxge_hw_status
3038vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
3039 u64 on_off)
3040{
3041 u64 val64;
3042 enum vxge_hw_status status = VXGE_HW_OK;
3043 struct vxge_hw_vpath_reg __iomem *vp_reg;
3044
3045 if (hldev == NULL) {
3046 status = VXGE_HW_ERR_INVALID_DEVICE;
3047 goto exit;
3048 }
3049
3050 vp_reg = hldev->vpath_reg[hldev->first_vp_id];
3051
3052 writeq(0, &vp_reg->rts_access_steer_ctrl);
3053 wmb();
3054 writeq(on_off, &vp_reg->rts_access_steer_data0);
3055 writeq(0, &vp_reg->rts_access_steer_data1);
3056 wmb();
3057
3058 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3059 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
3060 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3061 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3062 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3063 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3064
3065 status = __vxge_hw_pio_mem_write64(val64,
3066 &vp_reg->rts_access_steer_ctrl,
3067 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3068 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3069exit:
3070 return status;
3071}
3072
3073/*
3074 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3075 */
3076enum vxge_hw_status
3077__vxge_hw_vpath_rts_table_get(
3078 struct __vxge_hw_vpath_handle *vp,
3079 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3080{
3081 u64 val64;
3082 struct __vxge_hw_virtualpath *vpath;
3083 struct vxge_hw_vpath_reg __iomem *vp_reg;
3084
3085 enum vxge_hw_status status = VXGE_HW_OK;
3086
3087 if (vp == NULL) {
3088 status = VXGE_HW_ERR_INVALID_HANDLE;
3089 goto exit;
3090 }
3091
3092 vpath = vp->vpath;
3093 vp_reg = vpath->vp_reg;
3094
3095 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3096 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3097 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3098 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3099
3100 if ((rts_table ==
3101 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3102 (rts_table ==
3103 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3104 (rts_table ==
3105 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3106 (rts_table ==
3107 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3108 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3109 }
3110
3111 status = __vxge_hw_pio_mem_write64(val64,
3112 &vp_reg->rts_access_steer_ctrl,
3113 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3114 vpath->hldev->config.device_poll_millis);
3115
3116 if (status != VXGE_HW_OK)
3117 goto exit;
3118
3119 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3120
3121 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3122
3123 *data1 = readq(&vp_reg->rts_access_steer_data0);
3124
3125 if ((rts_table ==
3126 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3127 (rts_table ==
3128 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3129 *data2 = readq(&vp_reg->rts_access_steer_data1);
3130 }
3131 status = VXGE_HW_OK;
3132 } else
3133 status = VXGE_HW_FAIL;
3134exit:
3135 return status;
3136}
3137
3138/*
3139 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3140 */
3141enum vxge_hw_status
3142__vxge_hw_vpath_rts_table_set(
3143 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3144 u32 offset, u64 data1, u64 data2)
3145{
3146 u64 val64;
3147 struct __vxge_hw_virtualpath *vpath;
3148 enum vxge_hw_status status = VXGE_HW_OK;
3149 struct vxge_hw_vpath_reg __iomem *vp_reg;
3150
3151 if (vp == NULL) {
3152 status = VXGE_HW_ERR_INVALID_HANDLE;
3153 goto exit;
3154 }
3155
3156 vpath = vp->vpath;
3157 vp_reg = vpath->vp_reg;
3158
3159 writeq(data1, &vp_reg->rts_access_steer_data0);
3160 wmb();
3161
3162 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3163 (rts_table ==
3164 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3165 writeq(data2, &vp_reg->rts_access_steer_data1);
3166 wmb();
3167 }
3168
3169 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3170 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3171 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3172 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3173
3174 status = __vxge_hw_pio_mem_write64(val64,
3175 &vp_reg->rts_access_steer_ctrl,
3176 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3177 vpath->hldev->config.device_poll_millis);
3178
3179 if (status != VXGE_HW_OK)
3180 goto exit;
3181
3182 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3183
3184 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3185 status = VXGE_HW_OK;
3186 else
3187 status = VXGE_HW_FAIL;
3188exit:
3189 return status;
3190}
3191
3192/*
3193 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3194 * from MAC address table.
3195 */
3196enum vxge_hw_status
3197__vxge_hw_vpath_addr_get(
3198 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3199 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3200{
3201 u32 i;
3202 u64 val64;
3203 u64 data1 = 0ULL;
3204 u64 data2 = 0ULL;
3205 enum vxge_hw_status status = VXGE_HW_OK;
3206
3207 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3208 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3209 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3210 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3211 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3212 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3213
3214 status = __vxge_hw_pio_mem_write64(val64,
3215 &vpath_reg->rts_access_steer_ctrl,
3216 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3217 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3218
3219 if (status != VXGE_HW_OK)
3220 goto exit;
3221
3222 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3223
3224 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3225
3226 data1 = readq(&vpath_reg->rts_access_steer_data0);
3227 data2 = readq(&vpath_reg->rts_access_steer_data1);
3228
3229 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3230 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3231 data2);
3232
3233 for (i = ETH_ALEN; i > 0; i--) {
3234 macaddr[i-1] = (u8)(data1 & 0xFF);
3235 data1 >>= 8;
3236
3237 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3238 data2 >>= 8;
3239 }
3240 status = VXGE_HW_OK;
3241 } else
3242 status = VXGE_HW_FAIL;
3243exit:
3244 return status;
3245}
3246
3247/*
3248 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3249 */
3250enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3251 struct __vxge_hw_vpath_handle *vp,
3252 enum vxge_hw_rth_algoritms algorithm,
3253 struct vxge_hw_rth_hash_types *hash_type,
3254 u16 bucket_size)
3255{
3256 u64 data0, data1;
3257 enum vxge_hw_status status = VXGE_HW_OK;
3258
3259 if (vp == NULL) {
3260 status = VXGE_HW_ERR_INVALID_HANDLE;
3261 goto exit;
3262 }
3263
3264 status = __vxge_hw_vpath_rts_table_get(vp,
3265 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3266 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3267 0, &data0, &data1);
3268
3269 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3270 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3271
3272 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3273 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3274 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3275
3276 if (hash_type->hash_type_tcpipv4_en)
3277 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3278
3279 if (hash_type->hash_type_ipv4_en)
3280 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3281
3282 if (hash_type->hash_type_tcpipv6_en)
3283 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3284
3285 if (hash_type->hash_type_ipv6_en)
3286 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3287
3288 if (hash_type->hash_type_tcpipv6ex_en)
3289 data0 |=
3290 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3291
3292 if (hash_type->hash_type_ipv6ex_en)
3293 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3294
3295 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3296 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3297 else
3298 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3299
3300 status = __vxge_hw_vpath_rts_table_set(vp,
3301 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3302 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3303 0, data0, 0);
3304exit:
3305 return status;
3306}
3307
3308static void
3309vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3310 u16 flag, u8 *itable)
3311{
3312 switch (flag) {
3313 case 1:
3314 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3315 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3316 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3317 itable[j]);
3318 case 2:
3319 *data0 |=
3320 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3321 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3322 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3323 itable[j]);
3324 case 3:
3325 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3326 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3327 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3328 itable[j]);
3329 case 4:
3330 *data1 |=
3331 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3332 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3333 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3334 itable[j]);
3335 default:
3336 return;
3337 }
3338}
3339/*
3340 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3341 */
3342enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3343 struct __vxge_hw_vpath_handle **vpath_handles,
3344 u32 vpath_count,
3345 u8 *mtable,
3346 u8 *itable,
3347 u32 itable_size)
3348{
3349 u32 i, j, action, rts_table;
3350 u64 data0;
3351 u64 data1;
3352 u32 max_entries;
3353 enum vxge_hw_status status = VXGE_HW_OK;
3354 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3355
3356 if (vp == NULL) {
3357 status = VXGE_HW_ERR_INVALID_HANDLE;
3358 goto exit;
3359 }
3360
3361 max_entries = (((u32)1) << itable_size);
3362
3363 if (vp->vpath->hldev->config.rth_it_type
3364 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3365 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3366 rts_table =
3367 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3368
3369 for (j = 0; j < max_entries; j++) {
3370
3371 data1 = 0;
3372
3373 data0 =
3374 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3375 itable[j]);
3376
3377 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3378 action, rts_table, j, data0, data1);
3379
3380 if (status != VXGE_HW_OK)
3381 goto exit;
3382 }
3383
3384 for (j = 0; j < max_entries; j++) {
3385
3386 data1 = 0;
3387
3388 data0 =
3389 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3390 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3391 itable[j]);
3392
3393 status = __vxge_hw_vpath_rts_table_set(
3394 vpath_handles[mtable[itable[j]]], action,
3395 rts_table, j, data0, data1);
3396
3397 if (status != VXGE_HW_OK)
3398 goto exit;
3399 }
3400 } else {
3401 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3402 rts_table =
3403 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3404 for (i = 0; i < vpath_count; i++) {
3405
3406 for (j = 0; j < max_entries;) {
3407
3408 data0 = 0;
3409 data1 = 0;
3410
3411 while (j < max_entries) {
3412 if (mtable[itable[j]] != i) {
3413 j++;
3414 continue;
3415 }
3416 vxge_hw_rts_rth_data0_data1_get(j,
3417 &data0, &data1, 1, itable);
3418 j++;
3419 break;
3420 }
3421
3422 while (j < max_entries) {
3423 if (mtable[itable[j]] != i) {
3424 j++;
3425 continue;
3426 }
3427 vxge_hw_rts_rth_data0_data1_get(j,
3428 &data0, &data1, 2, itable);
3429 j++;
3430 break;
3431 }
3432
3433 while (j < max_entries) {
3434 if (mtable[itable[j]] != i) {
3435 j++;
3436 continue;
3437 }
3438 vxge_hw_rts_rth_data0_data1_get(j,
3439 &data0, &data1, 3, itable);
3440 j++;
3441 break;
3442 }
3443
3444 while (j < max_entries) {
3445 if (mtable[itable[j]] != i) {
3446 j++;
3447 continue;
3448 }
3449 vxge_hw_rts_rth_data0_data1_get(j,
3450 &data0, &data1, 4, itable);
3451 j++;
3452 break;
3453 }
3454
3455 if (data0 != 0) {
3456 status = __vxge_hw_vpath_rts_table_set(
3457 vpath_handles[i],
3458 action, rts_table,
3459 0, data0, data1);
3460
3461 if (status != VXGE_HW_OK)
3462 goto exit;
3463 }
3464 }
3465 }
3466 }
3467exit:
3468 return status;
3469}
3470
3471/**
3472 * vxge_hw_vpath_check_leak - Check for memory leak
3473 * @ringh: Handle to the ring object used for receive
3474 *
3475 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3476 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3477 * Returns: VXGE_HW_FAIL, if leak has occurred.
3478 *
3479 */
3480enum vxge_hw_status
3481vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3482{
3483 enum vxge_hw_status status = VXGE_HW_OK;
3484 u64 rxd_new_count, rxd_spat;
3485
3486 if (ring == NULL)
3487 return status;
3488
3489 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3490 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3491 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3492
3493 if (rxd_new_count >= rxd_spat)
3494 status = VXGE_HW_FAIL;
3495
3496 return status;
3497}
3498
3499/*
3500 * __vxge_hw_vpath_mgmt_read
3501 * This routine reads the vpath_mgmt registers
3502 */
3503static enum vxge_hw_status
3504__vxge_hw_vpath_mgmt_read(
3505 struct __vxge_hw_device *hldev,
3506 struct __vxge_hw_virtualpath *vpath)
3507{
3508 u32 i, mtu = 0, max_pyld = 0;
3509 u64 val64;
3510 enum vxge_hw_status status = VXGE_HW_OK;
3511
3512 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3513
3514 val64 = readq(&vpath->vpmgmt_reg->
3515 rxmac_cfg0_port_vpmgmt_clone[i]);
3516 max_pyld =
3517 (u32)
3518 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3519 (val64);
3520 if (mtu < max_pyld)
3521 mtu = max_pyld;
3522 }
3523
3524 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3525
3526 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3527
3528 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3529 if (val64 & vxge_mBIT(i))
3530 vpath->vsport_number = i;
3531 }
3532
3533 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3534
3535 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3536 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3537 else
3538 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3539
3540 return status;
3541}
3542
3543/*
3544 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3545 * This routine checks the vpath_rst_in_prog register to see if
3546 * adapter completed the reset process for the vpath
3547 */
3548enum vxge_hw_status
3549__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3550{
3551 enum vxge_hw_status status;
3552
3553 status = __vxge_hw_device_register_poll(
3554 &vpath->hldev->common_reg->vpath_rst_in_prog,
3555 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3556 1 << (16 - vpath->vp_id)),
3557 vpath->hldev->config.device_poll_millis);
3558
3559 return status;
3560}
3561
3562/*
3563 * __vxge_hw_vpath_reset
3564 * This routine resets the vpath on the device
3565 */
3566enum vxge_hw_status
3567__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3568{
3569 u64 val64;
3570 enum vxge_hw_status status = VXGE_HW_OK;
3571
3572 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3573
3574 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3575 &hldev->common_reg->cmn_rsthdlr_cfg0);
3576
3577 return status;
3578}
3579
3580/*
3581 * __vxge_hw_vpath_sw_reset
3582 * This routine resets the vpath structures
3583 */
3584enum vxge_hw_status
3585__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3586{
3587 enum vxge_hw_status status = VXGE_HW_OK;
3588 struct __vxge_hw_virtualpath *vpath;
3589
3590 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3591
3592 if (vpath->ringh) {
3593 status = __vxge_hw_ring_reset(vpath->ringh);
3594 if (status != VXGE_HW_OK)
3595 goto exit;
3596 }
3597
3598 if (vpath->fifoh)
3599 status = __vxge_hw_fifo_reset(vpath->fifoh);
3600exit:
3601 return status;
3602}
3603
3604/*
3605 * __vxge_hw_vpath_prc_configure
3606 * This routine configures the prc registers of virtual path using the config
3607 * passed
3608 */
3609void
3610__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3611{
3612 u64 val64;
3613 struct __vxge_hw_virtualpath *vpath;
3614 struct vxge_hw_vp_config *vp_config;
3615 struct vxge_hw_vpath_reg __iomem *vp_reg;
3616
3617 vpath = &hldev->virtual_paths[vp_id];
3618 vp_reg = vpath->vp_reg;
3619 vp_config = vpath->vp_config;
3620
3621 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3622 return;
3623
3624 val64 = readq(&vp_reg->prc_cfg1);
3625 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3626 writeq(val64, &vp_reg->prc_cfg1);
3627
3628 val64 = readq(&vpath->vp_reg->prc_cfg6);
3629 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3630 writeq(val64, &vpath->vp_reg->prc_cfg6);
3631
3632 val64 = readq(&vp_reg->prc_cfg7);
3633
3634 if (vpath->vp_config->ring.scatter_mode !=
3635 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3636
3637 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3638
3639 switch (vpath->vp_config->ring.scatter_mode) {
3640 case VXGE_HW_RING_SCATTER_MODE_A:
3641 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3642 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3643 break;
3644 case VXGE_HW_RING_SCATTER_MODE_B:
3645 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3646 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3647 break;
3648 case VXGE_HW_RING_SCATTER_MODE_C:
3649 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3650 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3651 break;
3652 }
3653 }
3654
3655 writeq(val64, &vp_reg->prc_cfg7);
3656
3657 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3658 __vxge_hw_ring_first_block_address_get(
3659 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3660
3661 val64 = readq(&vp_reg->prc_cfg4);
3662 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3663 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3664
3665 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3666 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3667
3668 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3669 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3670 else
3671 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3672
3673 writeq(val64, &vp_reg->prc_cfg4);
3674 return;
3675}
3676
3677/*
3678 * __vxge_hw_vpath_kdfc_configure
3679 * This routine configures the kdfc registers of virtual path using the
3680 * config passed
3681 */
3682enum vxge_hw_status
3683__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3684{
3685 u64 val64;
3686 u64 vpath_stride;
3687 enum vxge_hw_status status = VXGE_HW_OK;
3688 struct __vxge_hw_virtualpath *vpath;
3689 struct vxge_hw_vpath_reg __iomem *vp_reg;
3690
3691 vpath = &hldev->virtual_paths[vp_id];
3692 vp_reg = vpath->vp_reg;
3693 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3694
3695 if (status != VXGE_HW_OK)
3696 goto exit;
3697
3698 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3699
3700 vpath->max_kdfc_db =
3701 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3702 val64+1)/2;
3703
3704 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3705
3706 vpath->max_nofl_db = vpath->max_kdfc_db;
3707
3708 if (vpath->max_nofl_db <
3709 ((vpath->vp_config->fifo.memblock_size /
3710 (vpath->vp_config->fifo.max_frags *
3711 sizeof(struct vxge_hw_fifo_txd))) *
3712 vpath->vp_config->fifo.fifo_blocks)) {
3713
3714 return VXGE_HW_BADCFG_FIFO_BLOCKS;
3715 }
3716 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3717 (vpath->max_nofl_db*2)-1);
3718 }
3719
3720 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3721
3722 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3723 &vp_reg->kdfc_fifo_trpl_ctrl);
3724
3725 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3726
3727 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3728 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3729
3730 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3731 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3732#ifndef __BIG_ENDIAN
3733 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3734#endif
3735 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3736
3737 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3738 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3739 wmb();
3740 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3741
3742 vpath->nofl_db =
3743 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3744 (hldev->kdfc + (vp_id *
3745 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3746 vpath_stride)));
3747exit:
3748 return status;
3749}
3750
3751/*
3752 * __vxge_hw_vpath_mac_configure
3753 * This routine configures the mac of virtual path using the config passed
3754 */
3755enum vxge_hw_status
3756__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3757{
3758 u64 val64;
3759 enum vxge_hw_status status = VXGE_HW_OK;
3760 struct __vxge_hw_virtualpath *vpath;
3761 struct vxge_hw_vp_config *vp_config;
3762 struct vxge_hw_vpath_reg __iomem *vp_reg;
3763
3764 vpath = &hldev->virtual_paths[vp_id];
3765 vp_reg = vpath->vp_reg;
3766 vp_config = vpath->vp_config;
3767
3768 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3769 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3770
3771 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3772
3773 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3774
3775 if (vp_config->rpa_strip_vlan_tag !=
3776 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3777 if (vp_config->rpa_strip_vlan_tag)
3778 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3779 else
3780 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3781 }
3782
3783 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3784 val64 = readq(&vp_reg->rxmac_vcfg0);
3785
3786 if (vp_config->mtu !=
3787 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3788 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3789 if ((vp_config->mtu +
3790 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3791 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3792 vp_config->mtu +
3793 VXGE_HW_MAC_HEADER_MAX_SIZE);
3794 else
3795 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3796 vpath->max_mtu);
3797 }
3798
3799 writeq(val64, &vp_reg->rxmac_vcfg0);
3800
3801 val64 = readq(&vp_reg->rxmac_vcfg1);
3802
3803 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3804 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3805
3806 if (hldev->config.rth_it_type ==
3807 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3808 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3809 0x2) |
3810 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3811 }
3812
3813 writeq(val64, &vp_reg->rxmac_vcfg1);
3814 }
3815 return status;
3816}
3817
3818/*
3819 * __vxge_hw_vpath_tim_configure
3820 * This routine configures the tim registers of virtual path using the config
3821 * passed
3822 */
3823enum vxge_hw_status
3824__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3825{
3826 u64 val64;
3827 enum vxge_hw_status status = VXGE_HW_OK;
3828 struct __vxge_hw_virtualpath *vpath;
3829 struct vxge_hw_vpath_reg __iomem *vp_reg;
3830 struct vxge_hw_vp_config *config;
3831
3832 vpath = &hldev->virtual_paths[vp_id];
3833 vp_reg = vpath->vp_reg;
3834 config = vpath->vp_config;
3835
3836 writeq((u64)0, &vp_reg->tim_dest_addr);
3837 writeq((u64)0, &vp_reg->tim_vpath_map);
3838 writeq((u64)0, &vp_reg->tim_bitmap);
3839 writeq((u64)0, &vp_reg->tim_remap);
3840
3841 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3842 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3843 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3844 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3845
3846 val64 = readq(&vp_reg->tim_pci_cfg);
3847 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3848 writeq(val64, &vp_reg->tim_pci_cfg);
3849
3850 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3851
3852 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3853
3854 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3855 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3856 0x3ffffff);
3857 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3858 config->tti.btimer_val);
3859 }
3860
3861 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3862
3863 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3864 if (config->tti.timer_ac_en)
3865 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3866 else
3867 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3868 }
3869
3870 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3871 if (config->tti.timer_ci_en)
3872 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3873 else
3874 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3875 }
3876
3877 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3878 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3879 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3880 config->tti.urange_a);
3881 }
3882
3883 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3884 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3885 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3886 config->tti.urange_b);
3887 }
3888
3889 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3890 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3891 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3892 config->tti.urange_c);
3893 }
3894
3895 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3896 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3897
3898 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3899 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3900 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3901 config->tti.uec_a);
3902 }
3903
3904 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3905 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3906 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3907 config->tti.uec_b);
3908 }
3909
3910 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3911 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3912 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3913 config->tti.uec_c);
3914 }
3915
3916 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3917 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3918 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3919 config->tti.uec_d);
3920 }
3921
3922 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3923 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3924
3925 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3926 if (config->tti.timer_ri_en)
3927 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3928 else
3929 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3930 }
3931
3932 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3933 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3934 0x3ffffff);
3935 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3936 config->tti.rtimer_val);
3937 }
3938
3939 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3940 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3941 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3942 config->tti.util_sel);
3943 }
3944
3945 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3946 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3947 0x3ffffff);
3948 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3949 config->tti.ltimer_val);
3950 }
3951
3952 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3953 }
3954
3955 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3956
3957 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3958
3959 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3960 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3961 0x3ffffff);
3962 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3963 config->rti.btimer_val);
3964 }
3965
3966 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3967
3968 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3969 if (config->rti.timer_ac_en)
3970 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3971 else
3972 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3973 }
3974
3975 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3976 if (config->rti.timer_ci_en)
3977 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3978 else
3979 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3980 }
3981
3982 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3983 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3984 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3985 config->rti.urange_a);
3986 }
3987
3988 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3989 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3990 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3991 config->rti.urange_b);
3992 }
3993
3994 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3995 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3996 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3997 config->rti.urange_c);
3998 }
3999
4000 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4001 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4002
4003 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4004 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4005 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4006 config->rti.uec_a);
4007 }
4008
4009 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4010 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4011 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4012 config->rti.uec_b);
4013 }
4014
4015 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4016 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4017 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4018 config->rti.uec_c);
4019 }
4020
4021 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4022 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4023 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4024 config->rti.uec_d);
4025 }
4026
4027 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4028 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4029
4030 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4031 if (config->rti.timer_ri_en)
4032 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4033 else
4034 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4035 }
4036
4037 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4038 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4039 0x3ffffff);
4040 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4041 config->rti.rtimer_val);
4042 }
4043
4044 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4045 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4046 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4047 config->rti.util_sel);
4048 }
4049
4050 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4051 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4052 0x3ffffff);
4053 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4054 config->rti.ltimer_val);
4055 }
4056
4057 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4058 }
4059
4060 val64 = 0;
4061 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4062 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4063 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4064 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4065 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4066 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4067
4068 return status;
4069}
4070
4071/*
4072 * __vxge_hw_vpath_initialize
4073 * This routine is the final phase of init which initializes the
4074 * registers of the vpath using the configuration passed.
4075 */
4076enum vxge_hw_status
4077__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4078{
4079 u64 val64;
4080 u32 val32;
4081 enum vxge_hw_status status = VXGE_HW_OK;
4082 struct __vxge_hw_virtualpath *vpath;
4083 struct vxge_hw_vpath_reg __iomem *vp_reg;
4084
4085 vpath = &hldev->virtual_paths[vp_id];
4086
4087 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4088 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4089 goto exit;
4090 }
4091 vp_reg = vpath->vp_reg;
4092
4093 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4094
4095 if (status != VXGE_HW_OK)
4096 goto exit;
4097
4098 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4099
4100 if (status != VXGE_HW_OK)
4101 goto exit;
4102
4103 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4104
4105 if (status != VXGE_HW_OK)
4106 goto exit;
4107
4108 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4109
4110 if (status != VXGE_HW_OK)
4111 goto exit;
4112
4113 writeq(0, &vp_reg->gendma_int);
4114
4115 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4116
4117 /* Get MRRS value from device control */
4118 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4119
4120 if (status == VXGE_HW_OK) {
4121 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4122 val64 &=
4123 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4124 val64 |=
4125 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4126
4127 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4128 }
4129
4130 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4131 val64 |=
4132 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4133 VXGE_HW_MAX_PAYLOAD_SIZE_512);
4134
4135 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4136 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4137
4138exit:
4139 return status;
4140}
4141
4142/*
4143 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4144 * This routine is the initial phase of init which resets the vpath and
4145 * initializes the software support structures.
4146 */
4147enum vxge_hw_status
4148__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4149 struct vxge_hw_vp_config *config)
4150{
4151 struct __vxge_hw_virtualpath *vpath;
4152 enum vxge_hw_status status = VXGE_HW_OK;
4153
4154 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4155 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4156 goto exit;
4157 }
4158
4159 vpath = &hldev->virtual_paths[vp_id];
4160
4161 vpath->vp_id = vp_id;
4162 vpath->vp_open = VXGE_HW_VP_OPEN;
4163 vpath->hldev = hldev;
4164 vpath->vp_config = config;
4165 vpath->vp_reg = hldev->vpath_reg[vp_id];
4166 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4167
4168 __vxge_hw_vpath_reset(hldev, vp_id);
4169
4170 status = __vxge_hw_vpath_reset_check(vpath);
4171
4172 if (status != VXGE_HW_OK) {
4173 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4174 goto exit;
4175 }
4176
4177 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4178
4179 if (status != VXGE_HW_OK) {
4180 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4181 goto exit;
4182 }
4183
4184 INIT_LIST_HEAD(&vpath->vpath_handles);
4185
4186 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4187
4188 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4189 hldev->tim_int_mask1, vp_id);
4190
4191 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4192
4193 if (status != VXGE_HW_OK)
4194 __vxge_hw_vp_terminate(hldev, vp_id);
4195exit:
4196 return status;
4197}
4198
4199/*
4200 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4201 * This routine closes all channels it opened and freeup memory
4202 */
4203void
4204__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4205{
4206 struct __vxge_hw_virtualpath *vpath;
4207
4208 vpath = &hldev->virtual_paths[vp_id];
4209
4210 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4211 goto exit;
4212
4213 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4214 vpath->hldev->tim_int_mask1, vpath->vp_id);
4215 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4216
4217 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4218exit:
4219 return;
4220}
4221
4222/*
4223 * vxge_hw_vpath_mtu_set - Set MTU.
4224 * Set new MTU value. Example, to use jumbo frames:
4225 * vxge_hw_vpath_mtu_set(my_device, 9600);
4226 */
4227enum vxge_hw_status
4228vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4229{
4230 u64 val64;
4231 enum vxge_hw_status status = VXGE_HW_OK;
4232 struct __vxge_hw_virtualpath *vpath;
4233
4234 if (vp == NULL) {
4235 status = VXGE_HW_ERR_INVALID_HANDLE;
4236 goto exit;
4237 }
4238 vpath = vp->vpath;
4239
4240 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4241
4242 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4243 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4244
4245 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4246
4247 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4248 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4249
4250 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4251
4252 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4253
4254exit:
4255 return status;
4256}
4257
4258/*
4259 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4260 * This function is used to open access to virtual path of an
4261 * adapter for offload, GRO operations. This function returns
4262 * synchronously.
4263 */
4264enum vxge_hw_status
4265vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4266 struct vxge_hw_vpath_attr *attr,
4267 struct __vxge_hw_vpath_handle **vpath_handle)
4268{
4269 struct __vxge_hw_virtualpath *vpath;
4270 struct __vxge_hw_vpath_handle *vp;
4271 enum vxge_hw_status status;
4272
4273 vpath = &hldev->virtual_paths[attr->vp_id];
4274
4275 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4276 status = VXGE_HW_ERR_INVALID_STATE;
4277 goto vpath_open_exit1;
4278 }
4279
4280 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4281 &hldev->config.vp_config[attr->vp_id]);
4282
4283 if (status != VXGE_HW_OK)
4284 goto vpath_open_exit1;
4285
4286 vp = (struct __vxge_hw_vpath_handle *)
4287 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4288 if (vp == NULL) {
4289 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4290 goto vpath_open_exit2;
4291 }
4292
4293 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4294
4295 vp->vpath = vpath;
4296
4297 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4298 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4299 if (status != VXGE_HW_OK)
4300 goto vpath_open_exit6;
4301 }
4302
4303 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4304 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4305 if (status != VXGE_HW_OK)
4306 goto vpath_open_exit7;
4307
4308 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4309 }
4310
4311 vpath->fifoh->tx_intr_num =
4312 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4313 VXGE_HW_VPATH_INTR_TX;
4314
4315 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4316 VXGE_HW_BLOCK_SIZE);
4317
4318 if (vpath->stats_block == NULL) {
4319 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4320 goto vpath_open_exit8;
4321 }
4322
4323 vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4324 stats_block->memblock;
4325 memset(vpath->hw_stats, 0,
4326 sizeof(struct vxge_hw_vpath_stats_hw_info));
4327
4328 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4329 vpath->hw_stats;
4330
4331 vpath->hw_stats_sav =
4332 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4333 memset(vpath->hw_stats_sav, 0,
4334 sizeof(struct vxge_hw_vpath_stats_hw_info));
4335
4336 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4337
4338 status = vxge_hw_vpath_stats_enable(vp);
4339 if (status != VXGE_HW_OK)
4340 goto vpath_open_exit8;
4341
4342 list_add(&vp->item, &vpath->vpath_handles);
4343
4344 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4345
4346 *vpath_handle = vp;
4347
4348 attr->fifo_attr.userdata = vpath->fifoh;
4349 attr->ring_attr.userdata = vpath->ringh;
4350
4351 return VXGE_HW_OK;
4352
4353vpath_open_exit8:
4354 if (vpath->ringh != NULL)
4355 __vxge_hw_ring_delete(vp);
4356vpath_open_exit7:
4357 if (vpath->fifoh != NULL)
4358 __vxge_hw_fifo_delete(vp);
4359vpath_open_exit6:
4360 vfree(vp);
4361vpath_open_exit2:
4362 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4363vpath_open_exit1:
4364
4365 return status;
4366}
4367
4368/**
4369 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4370 * (vpath) open
4371 * @vp: Handle got from previous vpath open
4372 *
4373 * This function is used to close access to virtual path opened
4374 * earlier.
4375 */
4376void
4377vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4378{
4379 struct __vxge_hw_virtualpath *vpath = NULL;
4380 u64 new_count, val64, val164;
4381 struct __vxge_hw_ring *ring;
4382
4383 vpath = vp->vpath;
4384 ring = vpath->ringh;
4385
4386 new_count = readq(&vpath->vp_reg->rxdmem_size);
4387 new_count &= 0x1fff;
4388 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4389
4390 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4391 &vpath->vp_reg->prc_rxd_doorbell);
4392 readl(&vpath->vp_reg->prc_rxd_doorbell);
4393
4394 val164 /= 2;
4395 val64 = readq(&vpath->vp_reg->prc_cfg6);
4396 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4397 val64 &= 0x1ff;
4398
4399 /*
4400 * Each RxD is of 4 qwords
4401 */
4402 new_count -= (val64 + 1);
4403 val64 = min(val164, new_count) / 4;
4404
4405 ring->rxds_limit = min(ring->rxds_limit, val64);
4406 if (ring->rxds_limit < 4)
4407 ring->rxds_limit = 4;
4408}
4409
4410/*
4411 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4412 * This function is used to close access to virtual path opened
4413 * earlier.
4414 */
4415enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4416{
4417 struct __vxge_hw_virtualpath *vpath = NULL;
4418 struct __vxge_hw_device *devh = NULL;
4419 u32 vp_id = vp->vpath->vp_id;
4420 u32 is_empty = TRUE;
4421 enum vxge_hw_status status = VXGE_HW_OK;
4422
4423 vpath = vp->vpath;
4424 devh = vpath->hldev;
4425
4426 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4427 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4428 goto vpath_close_exit;
4429 }
4430
4431 list_del(&vp->item);
4432
4433 if (!list_empty(&vpath->vpath_handles)) {
4434 list_add(&vp->item, &vpath->vpath_handles);
4435 is_empty = FALSE;
4436 }
4437
4438 if (!is_empty) {
4439 status = VXGE_HW_FAIL;
4440 goto vpath_close_exit;
4441 }
4442
4443 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4444
4445 if (vpath->ringh != NULL)
4446 __vxge_hw_ring_delete(vp);
4447
4448 if (vpath->fifoh != NULL)
4449 __vxge_hw_fifo_delete(vp);
4450
4451 if (vpath->stats_block != NULL)
4452 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4453
4454 vfree(vp);
4455
4456 __vxge_hw_vp_terminate(devh, vp_id);
4457
4458 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4459
4460vpath_close_exit:
4461 return status;
4462}
4463
4464/*
4465 * vxge_hw_vpath_reset - Resets vpath
4466 * This function is used to request a reset of vpath
4467 */
4468enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4469{
4470 enum vxge_hw_status status;
4471 u32 vp_id;
4472 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4473
4474 vp_id = vpath->vp_id;
4475
4476 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4477 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4478 goto exit;
4479 }
4480
4481 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4482 if (status == VXGE_HW_OK)
4483 vpath->sw_stats->soft_reset_cnt++;
4484exit:
4485 return status;
4486}
4487
4488/*
4489 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4490 * This function poll's for the vpath reset completion and re initializes
4491 * the vpath.
4492 */
4493enum vxge_hw_status
4494vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4495{
4496 struct __vxge_hw_virtualpath *vpath = NULL;
4497 enum vxge_hw_status status;
4498 struct __vxge_hw_device *hldev;
4499 u32 vp_id;
4500
4501 vp_id = vp->vpath->vp_id;
4502 vpath = vp->vpath;
4503 hldev = vpath->hldev;
4504
4505 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4506 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4507 goto exit;
4508 }
4509
4510 status = __vxge_hw_vpath_reset_check(vpath);
4511 if (status != VXGE_HW_OK)
4512 goto exit;
4513
4514 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4515 if (status != VXGE_HW_OK)
4516 goto exit;
4517
4518 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4519 if (status != VXGE_HW_OK)
4520 goto exit;
4521
4522 if (vpath->ringh != NULL)
4523 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4524
4525 memset(vpath->hw_stats, 0,
4526 sizeof(struct vxge_hw_vpath_stats_hw_info));
4527
4528 memset(vpath->hw_stats_sav, 0,
4529 sizeof(struct vxge_hw_vpath_stats_hw_info));
4530
4531 writeq(vpath->stats_block->dma_addr,
4532 &vpath->vp_reg->stats_cfg);
4533
4534 status = vxge_hw_vpath_stats_enable(vp);
4535
4536exit:
4537 return status;
4538}
4539
4540/*
4541 * vxge_hw_vpath_enable - Enable vpath.
4542 * This routine clears the vpath reset thereby enabling a vpath
4543 * to start forwarding frames and generating interrupts.
4544 */
4545void
4546vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4547{
4548 struct __vxge_hw_device *hldev;
4549 u64 val64;
4550
4551 hldev = vp->vpath->hldev;
4552
4553 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4554 1 << (16 - vp->vpath->vp_id));
4555
4556 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4557 &hldev->common_reg->cmn_rsthdlr_cfg1);
4558}
4559
4560/*
4561 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4562 * Enable the DMA vpath statistics. The function is to be called to re-enable
4563 * the adapter to update stats into the host memory
4564 */
4565enum vxge_hw_status
4566vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4567{
4568 enum vxge_hw_status status = VXGE_HW_OK;
4569 struct __vxge_hw_virtualpath *vpath;
4570
4571 vpath = vp->vpath;
4572
4573 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4574 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4575 goto exit;
4576 }
4577
4578 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4579 sizeof(struct vxge_hw_vpath_stats_hw_info));
4580
4581 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4582exit:
4583 return status;
4584}
4585
4586/*
4587 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4588 * and offset and perform an operation
4589 */
4590enum vxge_hw_status
4591__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4592 u32 operation, u32 offset, u64 *stat)
4593{
4594 u64 val64;
4595 enum vxge_hw_status status = VXGE_HW_OK;
4596 struct vxge_hw_vpath_reg __iomem *vp_reg;
4597
4598 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4599 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4600 goto vpath_stats_access_exit;
4601 }
4602
4603 vp_reg = vpath->vp_reg;
4604
4605 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4606 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4607 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4608
4609 status = __vxge_hw_pio_mem_write64(val64,
4610 &vp_reg->xmac_stats_access_cmd,
4611 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4612 vpath->hldev->config.device_poll_millis);
4613
4614 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4615 *stat = readq(&vp_reg->xmac_stats_access_data);
4616 else
4617 *stat = 0;
4618
4619vpath_stats_access_exit:
4620 return status;
4621}
4622
4623/*
4624 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4625 */
4626enum vxge_hw_status
4627__vxge_hw_vpath_xmac_tx_stats_get(
4628 struct __vxge_hw_virtualpath *vpath,
4629 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4630{
4631 u64 *val64;
4632 int i;
4633 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4634 enum vxge_hw_status status = VXGE_HW_OK;
4635
4636 val64 = (u64 *) vpath_tx_stats;
4637
4638 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4639 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4640 goto exit;
4641 }
4642
4643 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4644 status = __vxge_hw_vpath_stats_access(vpath,
4645 VXGE_HW_STATS_OP_READ,
4646 offset, val64);
4647 if (status != VXGE_HW_OK)
4648 goto exit;
4649 offset++;
4650 val64++;
4651 }
4652exit:
4653 return status;
4654}
4655
4656/*
4657 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4658 */
4659enum vxge_hw_status
4660__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4661 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4662{
4663 u64 *val64;
4664 enum vxge_hw_status status = VXGE_HW_OK;
4665 int i;
4666 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4667 val64 = (u64 *) vpath_rx_stats;
4668
4669 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4670 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4671 goto exit;
4672 }
4673 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4674 status = __vxge_hw_vpath_stats_access(vpath,
4675 VXGE_HW_STATS_OP_READ,
4676 offset >> 3, val64);
4677 if (status != VXGE_HW_OK)
4678 goto exit;
4679
4680 offset += 8;
4681 val64++;
4682 }
4683exit:
4684 return status;
4685}
4686
4687/*
4688 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4689 */
4690enum vxge_hw_status __vxge_hw_vpath_stats_get(
4691 struct __vxge_hw_virtualpath *vpath,
4692 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4693{
4694 u64 val64;
4695 enum vxge_hw_status status = VXGE_HW_OK;
4696 struct vxge_hw_vpath_reg __iomem *vp_reg;
4697
4698 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4699 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4700 goto exit;
4701 }
4702 vp_reg = vpath->vp_reg;
4703
4704 val64 = readq(&vp_reg->vpath_debug_stats0);
4705 hw_stats->ini_num_mwr_sent =
4706 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4707
4708 val64 = readq(&vp_reg->vpath_debug_stats1);
4709 hw_stats->ini_num_mrd_sent =
4710 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4711
4712 val64 = readq(&vp_reg->vpath_debug_stats2);
4713 hw_stats->ini_num_cpl_rcvd =
4714 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4715
4716 val64 = readq(&vp_reg->vpath_debug_stats3);
4717 hw_stats->ini_num_mwr_byte_sent =
4718 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4719
4720 val64 = readq(&vp_reg->vpath_debug_stats4);
4721 hw_stats->ini_num_cpl_byte_rcvd =
4722 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4723
4724 val64 = readq(&vp_reg->vpath_debug_stats5);
4725 hw_stats->wrcrdtarb_xoff =
4726 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4727
4728 val64 = readq(&vp_reg->vpath_debug_stats6);
4729 hw_stats->rdcrdtarb_xoff =
4730 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4731
4732 val64 = readq(&vp_reg->vpath_genstats_count01);
4733 hw_stats->vpath_genstats_count0 =
4734 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4735 val64);
4736
4737 val64 = readq(&vp_reg->vpath_genstats_count01);
4738 hw_stats->vpath_genstats_count1 =
4739 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4740 val64);
4741
4742 val64 = readq(&vp_reg->vpath_genstats_count23);
4743 hw_stats->vpath_genstats_count2 =
4744 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4745 val64);
4746
4747 val64 = readq(&vp_reg->vpath_genstats_count01);
4748 hw_stats->vpath_genstats_count3 =
4749 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4750 val64);
4751
4752 val64 = readq(&vp_reg->vpath_genstats_count4);
4753 hw_stats->vpath_genstats_count4 =
4754 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4755 val64);
4756
4757 val64 = readq(&vp_reg->vpath_genstats_count5);
4758 hw_stats->vpath_genstats_count5 =
4759 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4760 val64);
4761
4762 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4763 if (status != VXGE_HW_OK)
4764 goto exit;
4765
4766 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4767 if (status != VXGE_HW_OK)
4768 goto exit;
4769
4770 VXGE_HW_VPATH_STATS_PIO_READ(
4771 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4772
4773 hw_stats->prog_event_vnum0 =
4774 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4775
4776 hw_stats->prog_event_vnum1 =
4777 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4778
4779 VXGE_HW_VPATH_STATS_PIO_READ(
4780 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4781
4782 hw_stats->prog_event_vnum2 =
4783 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4784
4785 hw_stats->prog_event_vnum3 =
4786 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4787
4788 val64 = readq(&vp_reg->rx_multi_cast_stats);
4789 hw_stats->rx_multi_cast_frame_discard =
4790 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4791
4792 val64 = readq(&vp_reg->rx_frm_transferred);
4793 hw_stats->rx_frm_transferred =
4794 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4795
4796 val64 = readq(&vp_reg->rxd_returned);
4797 hw_stats->rxd_returned =
4798 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4799
4800 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4801 hw_stats->rx_mpa_len_fail_frms =
4802 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4803 hw_stats->rx_mpa_mrk_fail_frms =
4804 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4805 hw_stats->rx_mpa_crc_fail_frms =
4806 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4807
4808 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4809 hw_stats->rx_permitted_frms =
4810 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4811 hw_stats->rx_vp_reset_discarded_frms =
4812 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4813 hw_stats->rx_wol_frms =
4814 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4815
4816 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4817 hw_stats->tx_vp_reset_discarded_frms =
4818 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4819 val64);
4820exit:
4821 return status;
4822}
4823
4824/*
4825 * __vxge_hw_blockpool_create - Create block pool
4826 */
4827
4828enum vxge_hw_status
4829__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4830 struct __vxge_hw_blockpool *blockpool,
4831 u32 pool_size,
4832 u32 pool_max)
4833{
4834 u32 i;
4835 struct __vxge_hw_blockpool_entry *entry = NULL;
4836 void *memblock;
4837 dma_addr_t dma_addr;
4838 struct pci_dev *dma_handle;
4839 struct pci_dev *acc_handle;
4840 enum vxge_hw_status status = VXGE_HW_OK;
4841
4842 if (blockpool == NULL) {
4843 status = VXGE_HW_FAIL;
4844 goto blockpool_create_exit;
4845 }
4846
4847 blockpool->hldev = hldev;
4848 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4849 blockpool->pool_size = 0;
4850 blockpool->pool_max = pool_max;
4851 blockpool->req_out = 0;
4852
4853 INIT_LIST_HEAD(&blockpool->free_block_list);
4854 INIT_LIST_HEAD(&blockpool->free_entry_list);
4855
4856 for (i = 0; i < pool_size + pool_max; i++) {
4857 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4858 GFP_KERNEL);
4859 if (entry == NULL) {
4860 __vxge_hw_blockpool_destroy(blockpool);
4861 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4862 goto blockpool_create_exit;
4863 }
4864 list_add(&entry->item, &blockpool->free_entry_list);
4865 }
4866
4867 for (i = 0; i < pool_size; i++) {
4868
4869 memblock = vxge_os_dma_malloc(
4870 hldev->pdev,
4871 VXGE_HW_BLOCK_SIZE,
4872 &dma_handle,
4873 &acc_handle);
4874
4875 if (memblock == NULL) {
4876 __vxge_hw_blockpool_destroy(blockpool);
4877 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4878 goto blockpool_create_exit;
4879 }
4880
4881 dma_addr = pci_map_single(hldev->pdev, memblock,
4882 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4883
4884 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4885 dma_addr))) {
4886
4887 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4888 __vxge_hw_blockpool_destroy(blockpool);
4889 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4890 goto blockpool_create_exit;
4891 }
4892
4893 if (!list_empty(&blockpool->free_entry_list))
4894 entry = (struct __vxge_hw_blockpool_entry *)
4895 list_first_entry(&blockpool->free_entry_list,
4896 struct __vxge_hw_blockpool_entry,
4897 item);
4898
4899 if (entry == NULL)
4900 entry =
4901 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4902 GFP_KERNEL);
4903 if (entry != NULL) {
4904 list_del(&entry->item);
4905 entry->length = VXGE_HW_BLOCK_SIZE;
4906 entry->memblock = memblock;
4907 entry->dma_addr = dma_addr;
4908 entry->acc_handle = acc_handle;
4909 entry->dma_handle = dma_handle;
4910 list_add(&entry->item,
4911 &blockpool->free_block_list);
4912 blockpool->pool_size++;
4913 } else {
4914 __vxge_hw_blockpool_destroy(blockpool);
4915 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4916 goto blockpool_create_exit;
4917 }
4918 }
4919
4920blockpool_create_exit:
4921 return status;
4922}
4923
4924/*
4925 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4926 */
4927
4928void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4929{
4930
4931 struct __vxge_hw_device *hldev;
4932 struct list_head *p, *n;
4933 u16 ret;
4934
4935 if (blockpool == NULL) {
4936 ret = 1;
4937 goto exit;
4938 }
4939
4940 hldev = blockpool->hldev;
4941
4942 list_for_each_safe(p, n, &blockpool->free_block_list) {
4943
4944 pci_unmap_single(hldev->pdev,
4945 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4946 ((struct __vxge_hw_blockpool_entry *)p)->length,
4947 PCI_DMA_BIDIRECTIONAL);
4948
4949 vxge_os_dma_free(hldev->pdev,
4950 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4951 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4952
4953 list_del(
4954 &((struct __vxge_hw_blockpool_entry *)p)->item);
4955 kfree(p);
4956 blockpool->pool_size--;
4957 }
4958
4959 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4960 list_del(
4961 &((struct __vxge_hw_blockpool_entry *)p)->item);
4962 kfree((void *)p);
4963 }
4964 ret = 0;
4965exit:
4966 return;
4967}
4968
4969/*
4970 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4971 */
4972static
4973void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4974{
4975 u32 nreq = 0, i;
4976
4977 if ((blockpool->pool_size + blockpool->req_out) <
4978 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4979 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4980 blockpool->req_out += nreq;
4981 }
4982
4983 for (i = 0; i < nreq; i++)
4984 vxge_os_dma_malloc_async(
4985 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4986 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4987}
4988
4989/*
4990 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4991 */
4992static
4993void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4994{
4995 struct list_head *p, *n;
4996
4997 list_for_each_safe(p, n, &blockpool->free_block_list) {
4998
4999 if (blockpool->pool_size < blockpool->pool_max)
5000 break;
5001
5002 pci_unmap_single(
5003 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5004 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5005 ((struct __vxge_hw_blockpool_entry *)p)->length,
5006 PCI_DMA_BIDIRECTIONAL);
5007
5008 vxge_os_dma_free(
5009 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5010 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5011 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5012
5013 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5014
5015 list_add(p, &blockpool->free_entry_list);
5016
5017 blockpool->pool_size--;
5018
5019 }
5020}
5021
5022/*
5023 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5024 * Adds a block to block pool
5025 */
5026void vxge_hw_blockpool_block_add(
5027 struct __vxge_hw_device *devh,
5028 void *block_addr,
5029 u32 length,
5030 struct pci_dev *dma_h,
5031 struct pci_dev *acc_handle)
5032{
5033 struct __vxge_hw_blockpool *blockpool;
5034 struct __vxge_hw_blockpool_entry *entry = NULL;
5035 dma_addr_t dma_addr;
5036 enum vxge_hw_status status = VXGE_HW_OK;
5037 u32 req_out;
5038
5039 blockpool = &devh->block_pool;
5040
5041 if (block_addr == NULL) {
5042 blockpool->req_out--;
5043 status = VXGE_HW_FAIL;
5044 goto exit;
5045 }
5046
5047 dma_addr = pci_map_single(devh->pdev, block_addr, length,
5048 PCI_DMA_BIDIRECTIONAL);
5049
5050 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5051
5052 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5053 blockpool->req_out--;
5054 status = VXGE_HW_FAIL;
5055 goto exit;
5056 }
5057
5058
5059 if (!list_empty(&blockpool->free_entry_list))
5060 entry = (struct __vxge_hw_blockpool_entry *)
5061 list_first_entry(&blockpool->free_entry_list,
5062 struct __vxge_hw_blockpool_entry,
5063 item);
5064
5065 if (entry == NULL)
5066 entry = (struct __vxge_hw_blockpool_entry *)
5067 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5068 else
5069 list_del(&entry->item);
5070
5071 if (entry != NULL) {
5072 entry->length = length;
5073 entry->memblock = block_addr;
5074 entry->dma_addr = dma_addr;
5075 entry->acc_handle = acc_handle;
5076 entry->dma_handle = dma_h;
5077 list_add(&entry->item, &blockpool->free_block_list);
5078 blockpool->pool_size++;
5079 status = VXGE_HW_OK;
5080 } else
5081 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5082
5083 blockpool->req_out--;
5084
5085 req_out = blockpool->req_out;
5086exit:
5087 return;
5088}
5089
5090/*
5091 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5092 * Allocates a block of memory of given size, either from block pool
5093 * or by calling vxge_os_dma_malloc()
5094 */
5095void *
5096__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5097 struct vxge_hw_mempool_dma *dma_object)
5098{
5099 struct __vxge_hw_blockpool_entry *entry = NULL;
5100 struct __vxge_hw_blockpool *blockpool;
5101 void *memblock = NULL;
5102 enum vxge_hw_status status = VXGE_HW_OK;
5103
5104 blockpool = &devh->block_pool;
5105
5106 if (size != blockpool->block_size) {
5107
5108 memblock = vxge_os_dma_malloc(devh->pdev, size,
5109 &dma_object->handle,
5110 &dma_object->acc_handle);
5111
5112 if (memblock == NULL) {
5113 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5114 goto exit;
5115 }
5116
5117 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5118 PCI_DMA_BIDIRECTIONAL);
5119
5120 if (unlikely(pci_dma_mapping_error(devh->pdev,
5121 dma_object->addr))) {
5122 vxge_os_dma_free(devh->pdev, memblock,
5123 &dma_object->acc_handle);
5124 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5125 goto exit;
5126 }
5127
5128 } else {
5129
5130 if (!list_empty(&blockpool->free_block_list))
5131 entry = (struct __vxge_hw_blockpool_entry *)
5132 list_first_entry(&blockpool->free_block_list,
5133 struct __vxge_hw_blockpool_entry,
5134 item);
5135
5136 if (entry != NULL) {
5137 list_del(&entry->item);
5138 dma_object->addr = entry->dma_addr;
5139 dma_object->handle = entry->dma_handle;
5140 dma_object->acc_handle = entry->acc_handle;
5141 memblock = entry->memblock;
5142
5143 list_add(&entry->item,
5144 &blockpool->free_entry_list);
5145 blockpool->pool_size--;
5146 }
5147
5148 if (memblock != NULL)
5149 __vxge_hw_blockpool_blocks_add(blockpool);
5150 }
5151exit:
5152 return memblock;
5153}
5154
5155/*
5156 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5157 __vxge_hw_blockpool_malloc
5158 */
5159void
5160__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5161 void *memblock, u32 size,
5162 struct vxge_hw_mempool_dma *dma_object)
5163{
5164 struct __vxge_hw_blockpool_entry *entry = NULL;
5165 struct __vxge_hw_blockpool *blockpool;
5166 enum vxge_hw_status status = VXGE_HW_OK;
5167
5168 blockpool = &devh->block_pool;
5169
5170 if (size != blockpool->block_size) {
5171 pci_unmap_single(devh->pdev, dma_object->addr, size,
5172 PCI_DMA_BIDIRECTIONAL);
5173 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5174 } else {
5175
5176 if (!list_empty(&blockpool->free_entry_list))
5177 entry = (struct __vxge_hw_blockpool_entry *)
5178 list_first_entry(&blockpool->free_entry_list,
5179 struct __vxge_hw_blockpool_entry,
5180 item);
5181
5182 if (entry == NULL)
5183 entry = (struct __vxge_hw_blockpool_entry *)
5184 vmalloc(sizeof(
5185 struct __vxge_hw_blockpool_entry));
5186 else
5187 list_del(&entry->item);
5188
5189 if (entry != NULL) {
5190 entry->length = size;
5191 entry->memblock = memblock;
5192 entry->dma_addr = dma_object->addr;
5193 entry->acc_handle = dma_object->acc_handle;
5194 entry->dma_handle = dma_object->handle;
5195 list_add(&entry->item,
5196 &blockpool->free_block_list);
5197 blockpool->pool_size++;
5198 status = VXGE_HW_OK;
5199 } else
5200 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5201
5202 if (status == VXGE_HW_OK)
5203 __vxge_hw_blockpool_blocks_remove(blockpool);
5204 }
5205
5206 return;
5207}
5208
5209/*
5210 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5211 * This function allocates a block from block pool or from the system
5212 */
5213struct __vxge_hw_blockpool_entry *
5214__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5215{
5216 struct __vxge_hw_blockpool_entry *entry = NULL;
5217 struct __vxge_hw_blockpool *blockpool;
5218
5219 blockpool = &devh->block_pool;
5220
5221 if (size == blockpool->block_size) {
5222
5223 if (!list_empty(&blockpool->free_block_list))
5224 entry = (struct __vxge_hw_blockpool_entry *)
5225 list_first_entry(&blockpool->free_block_list,
5226 struct __vxge_hw_blockpool_entry,
5227 item);
5228
5229 if (entry != NULL) {
5230 list_del(&entry->item);
5231 blockpool->pool_size--;
5232 }
5233 }
5234
5235 if (entry != NULL)
5236 __vxge_hw_blockpool_blocks_add(blockpool);
5237
5238 return entry;
5239}
5240
5241/*
5242 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5243 * @devh: Hal device
5244 * @entry: Entry of block to be freed
5245 *
5246 * This function frees a block from block pool
5247 */
5248void
5249__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5250 struct __vxge_hw_blockpool_entry *entry)
5251{
5252 struct __vxge_hw_blockpool *blockpool;
5253
5254 blockpool = &devh->block_pool;
5255
5256 if (entry->length == blockpool->block_size) {
5257 list_add(&entry->item, &blockpool->free_block_list);
5258 blockpool->pool_size++;
5259 }
5260
5261 __vxge_hw_blockpool_blocks_remove(blockpool);
5262
5263 return;
5264}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
new file mode 100644
index 000000000000..afbdf6f4d224
--- /dev/null
+++ b/drivers/net/vxge/vxge-config.h
@@ -0,0 +1,2259 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/list.h>
17
18#ifndef VXGE_CACHE_LINE_SIZE
19#define VXGE_CACHE_LINE_SIZE 128
20#endif
21
22#define vxge_os_vaprintf(level, mask, fmt, ...) { \
23 char buff[255]; \
24 snprintf(buff, 255, fmt, __VA_ARGS__); \
25 printk(buff); \
26 printk("\n"); \
27}
28
29#ifndef VXGE_ALIGN
30#define VXGE_ALIGN(adrs, size) \
31 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
32#endif
33
34#define VXGE_HW_MIN_MTU 68
35#define VXGE_HW_MAX_MTU 9600
36#define VXGE_HW_DEFAULT_MTU 1500
37
38#ifdef VXGE_DEBUG_ASSERT
39
40/**
41 * vxge_assert
42 * @test: C-condition to check
43 * @fmt: printf like format string
44 *
45 * This function implements traditional assert. By default assertions
46 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
47 * compilation
48 * time.
49 */
50#define vxge_assert(test) { \
51 if (!(test)) \
52 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
53 __FILE__, __LINE__); }
54#else
55#define vxge_assert(test)
56#endif /* end of VXGE_DEBUG_ASSERT */
57
58/**
59 * enum enum vxge_debug_level
60 * @VXGE_NONE: debug disabled
61 * @VXGE_ERR: all errors going to be logged out
62 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
63 * going to be logged out. Very noisy.
64 *
65 * This enumeration going to be used to switch between different
66 * debug levels during runtime if DEBUG macro defined during
67 * compilation. If DEBUG macro not defined than code will be
68 * compiled out.
69 */
70enum vxge_debug_level {
71 VXGE_NONE = 0,
72 VXGE_TRACE = 1,
73 VXGE_ERR = 2
74};
75
76#define NULL_VPID 0xFFFFFFFF
77#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
78#define VXGE_DEBUG_MODULE_MASK 0xffffffff
79#define VXGE_DEBUG_TRACE_MASK 0xffffffff
80#define VXGE_DEBUG_ERR_MASK 0xffffffff
81#define VXGE_DEBUG_MASK 0x000001ff
82#else
83#define VXGE_DEBUG_MODULE_MASK 0x20000000
84#define VXGE_DEBUG_TRACE_MASK 0x20000000
85#define VXGE_DEBUG_ERR_MASK 0x20000000
86#define VXGE_DEBUG_MASK 0x00000001
87#endif
88
89/*
90 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
91 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
92 *
93 * This enumeration going to be used to distinguish modules
94 * or libraries during compilation and runtime. Makefile must declare
95 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
96 */
97#define VXGE_COMPONENT_LL 0x20000000
98#define VXGE_COMPONENT_ALL 0xffffffff
99
100#define VXGE_HW_BASE_INF 100
101#define VXGE_HW_BASE_ERR 200
102#define VXGE_HW_BASE_BADCFG 300
103
104enum vxge_hw_status {
105 VXGE_HW_OK = 0,
106 VXGE_HW_FAIL = 1,
107 VXGE_HW_PENDING = 2,
108 VXGE_HW_COMPLETIONS_REMAIN = 3,
109
110 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
111 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
112
113 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
114 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
115 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
116 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
117 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
118 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
119 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
120 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
121 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
122 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
123 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
124 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
125 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
126 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
127 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
128 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
129 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
130 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
131 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
132 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
133 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
134 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
135
136 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
137 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
138 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
139 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
140 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
141 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
142 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
143
144 VXGE_HW_EOF_TRACE_BUF = -1
145};
146
147/**
148 * enum enum vxge_hw_device_link_state - Link state enumeration.
149 * @VXGE_HW_LINK_NONE: Invalid link state.
150 * @VXGE_HW_LINK_DOWN: Link is down.
151 * @VXGE_HW_LINK_UP: Link is up.
152 *
153 */
154enum vxge_hw_device_link_state {
155 VXGE_HW_LINK_NONE,
156 VXGE_HW_LINK_DOWN,
157 VXGE_HW_LINK_UP
158};
159
160/**
161 * struct vxge_hw_device_date - Date Format
162 * @day: Day
163 * @month: Month
164 * @year: Year
165 * @date: Date in string format
166 *
167 * Structure for returning date
168 */
169
170#define VXGE_HW_FW_STRLEN 32
171struct vxge_hw_device_date {
172 u32 day;
173 u32 month;
174 u32 year;
175 char date[VXGE_HW_FW_STRLEN];
176};
177
178struct vxge_hw_device_version {
179 u32 major;
180 u32 minor;
181 u32 build;
182 char version[VXGE_HW_FW_STRLEN];
183};
184
185u64
186__vxge_hw_vpath_pci_func_mode_get(
187 u32 vp_id,
188 struct vxge_hw_vpath_reg __iomem *vpath_reg);
189
190/**
191 * struct vxge_hw_fifo_config - Configuration of fifo.
192 * @enable: Is this fifo to be commissioned
193 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
194 * blocks per queue.
195 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
196 * transmit operation).
197 * No more than 256 transmit buffers can be specified.
198 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
199 * bytes. Setting @memblock_size to page size ensures
200 * by-page allocation of descriptors. 128K bytes is the
201 * maximum supported block size.
202 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
203 * (e.g., to align on a cache line).
204 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
205 * Use 0 otherwise.
206 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
207 * which generally improves latency of the host bridge operation
208 * (see PCI specification). For valid values please refer
209 * to struct vxge_hw_fifo_config{} in the driver sources.
210 * Configuration of all Titan fifos.
211 * Note: Valid (min, max) range for each attribute is specified in the body of
212 * the struct vxge_hw_fifo_config{} structure.
213 */
214struct vxge_hw_fifo_config {
215 u32 enable;
216#define VXGE_HW_FIFO_ENABLE 1
217#define VXGE_HW_FIFO_DISABLE 0
218
219 u32 fifo_blocks;
220#define VXGE_HW_MIN_FIFO_BLOCKS 2
221#define VXGE_HW_MAX_FIFO_BLOCKS 128
222
223 u32 max_frags;
224#define VXGE_HW_MIN_FIFO_FRAGS 1
225#define VXGE_HW_MAX_FIFO_FRAGS 256
226
227 u32 memblock_size;
228#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
229#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
230#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
231
232 u32 alignment_size;
233#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
234#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
235#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
236
237 u32 intr;
238#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
239#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
240#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
241
242 u32 no_snoop_bits;
243#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
244#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
245#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
246#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
247#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
248
249};
250/**
251 * struct vxge_hw_ring_config - Ring configurations.
252 * @enable: Is this ring to be commissioned
253 * @ring_blocks: Numbers of RxD blocks in the ring
254 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
255 * to Titan User Guide.
256 * @scatter_mode: Titan supports two receive scatter modes: A and B.
257 * For details please refer to Titan User Guide.
258 * @rx_timer_val: The number of 32ns periods that would be counted between two
259 * timer interrupts.
260 * @greedy_return: If Set it forces the device to return absolutely all RxD
261 * that are consumed and still on board when a timer interrupt
262 * triggers. If Clear, then if the device has already returned
263 * RxD before current timer interrupt trigerred and after the
264 * previous timer interrupt triggered, then the device is not
265 * forced to returned the rest of the consumed RxD that it has
266 * on board which account for a byte count less than the one
267 * programmed into PRC_CFG6.RXD_CRXDT field
268 * @rx_timer_ci: TBD
269 * @backoff_interval_us: Time (in microseconds), after which Titan
270 * tries to download RxDs posted by the host.
271 * Note that the "backoff" does not happen if host posts receive
272 * descriptors in the timely fashion.
273 * Ring configuration.
274 */
275struct vxge_hw_ring_config {
276 u32 enable;
277#define VXGE_HW_RING_ENABLE 1
278#define VXGE_HW_RING_DISABLE 0
279#define VXGE_HW_RING_DEFAULT 1
280
281 u32 ring_blocks;
282#define VXGE_HW_MIN_RING_BLOCKS 1
283#define VXGE_HW_MAX_RING_BLOCKS 128
284#define VXGE_HW_DEF_RING_BLOCKS 2
285
286 u32 buffer_mode;
287#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
288#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
289#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
290#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
291
292 u32 scatter_mode;
293#define VXGE_HW_RING_SCATTER_MODE_A 0
294#define VXGE_HW_RING_SCATTER_MODE_B 1
295#define VXGE_HW_RING_SCATTER_MODE_C 2
296#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
297
298 u64 rxds_limit;
299#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
300};
301
302/**
303 * struct vxge_hw_vp_config - Configuration of virtual path
304 * @vp_id: Virtual Path Id
305 * @min_bandwidth: Minimum Guaranteed bandwidth
306 * @ring: See struct vxge_hw_ring_config{}.
307 * @fifo: See struct vxge_hw_fifo_config{}.
308 * @tti: Configuration of interrupt associated with Transmit.
309 * see struct vxge_hw_tim_intr_config();
310 * @rti: Configuration of interrupt associated with Receive.
311 * see struct vxge_hw_tim_intr_config();
312 * @mtu: mtu size used on this port.
313 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
314 * remove the VLAN tag from all received tagged frames that are not
315 * replicated at the internal L2 switch.
316 * 0 - Do not strip the VLAN tag.
317 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
318 * always placed into the RxDMA descriptor.
319 *
320 * This structure is used by the driver to pass the configuration parameters to
321 * configure Virtual Path.
322 */
323struct vxge_hw_vp_config {
324 u32 vp_id;
325
326#define VXGE_HW_VPATH_PRIORITY_MIN 0
327#define VXGE_HW_VPATH_PRIORITY_MAX 16
328#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
329
330 u32 min_bandwidth;
331#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
332#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
333#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
334
335 struct vxge_hw_ring_config ring;
336 struct vxge_hw_fifo_config fifo;
337 struct vxge_hw_tim_intr_config tti;
338 struct vxge_hw_tim_intr_config rti;
339
340 u32 mtu;
341#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
342#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
343#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
344
345 u32 rpa_strip_vlan_tag;
346#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
347#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
348#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
349
350};
351/**
352 * struct vxge_hw_device_config - Device configuration.
353 * @dma_blockpool_initial: Initial size of DMA Pool
354 * @dma_blockpool_max: Maximum blocks in DMA pool
355 * @intr_mode: Line, or MSI-X interrupt.
356 *
357 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
358 * @rth_it_type: RTH IT table programming type
359 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
360 * @vp_config: Configuration for virtual paths
361 * @device_poll_millis: Specify the interval (in mulliseconds)
362 * to wait for register reads
363 *
364 * Titan configuration.
365 * Contains per-device configuration parameters, including:
366 * - stats sampling interval, etc.
367 *
368 * In addition, struct vxge_hw_device_config{} includes "subordinate"
369 * configurations, including:
370 * - fifos and rings;
371 * - MAC (done at firmware level).
372 *
373 * See Titan User Guide for more details.
374 * Note: Valid (min, max) range for each attribute is specified in the body of
375 * the struct vxge_hw_device_config{} structure. Please refer to the
376 * corresponding include file.
377 * See also: struct vxge_hw_tim_intr_config{}.
378 */
379struct vxge_hw_device_config {
380 u32 dma_blockpool_initial;
381 u32 dma_blockpool_max;
382#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
383#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
384#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
385#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
386
387#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
388
389 u32 intr_mode;
390#define VXGE_HW_INTR_MODE_IRQLINE 0
391#define VXGE_HW_INTR_MODE_MSIX 1
392#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
393
394#define VXGE_HW_INTR_MODE_DEF 0
395
396 u32 rth_en;
397#define VXGE_HW_RTH_DISABLE 0
398#define VXGE_HW_RTH_ENABLE 1
399#define VXGE_HW_RTH_DEFAULT 0
400
401 u32 rth_it_type;
402#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
403#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
404#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
405
406 u32 rts_mac_en;
407#define VXGE_HW_RTS_MAC_DISABLE 0
408#define VXGE_HW_RTS_MAC_ENABLE 1
409#define VXGE_HW_RTS_MAC_DEFAULT 0
410
411 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
412
413 u32 device_poll_millis;
414#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
415#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
416#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
417
418};
419
420/**
421 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
422 * @devh: HW device handle.
423 * Link-up notification callback provided by the driver.
424 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
425 *
426 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
427 * vxge_hw_driver_initialize().
428 */
429
430/**
431 * function vxge_uld_link_down_f - Link-Down callback provided by
432 * driver.
433 * @devh: HW device handle.
434 *
435 * Link-Down notification callback provided by the driver.
436 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
437 *
438 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
439 * vxge_hw_driver_initialize().
440 */
441
442/**
443 * function vxge_uld_crit_err_f - Critical Error notification callback.
444 * @devh: HW device handle.
445 * (typically - at HW device iinitialization time).
446 * @type: Enumerated hw error, e.g.: double ECC.
447 * @serr_data: Titan status.
448 * @ext_data: Extended data. The contents depends on the @type.
449 *
450 * Link-Down notification callback provided by the driver.
451 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
452 *
453 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
454 * vxge_hw_driver_initialize().
455 */
456
457/**
458 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
459 * @link_up: See vxge_uld_link_up_f{}.
460 * @link_down: See vxge_uld_link_down_f{}.
461 * @crit_err: See vxge_uld_crit_err_f{}.
462 *
463 * Driver slow-path (per-driver) callbacks.
464 * Implemented by driver and provided to HW via
465 * vxge_hw_driver_initialize().
466 * Note that these callbacks are not mandatory: HW will not invoke
467 * a callback if NULL is specified.
468 *
469 * See also: vxge_hw_driver_initialize().
470 */
471struct vxge_hw_uld_cbs {
472
473 void (*link_up)(struct __vxge_hw_device *devh);
474 void (*link_down)(struct __vxge_hw_device *devh);
475 void (*crit_err)(struct __vxge_hw_device *devh,
476 enum vxge_hw_event type, u64 ext_data);
477};
478
479/*
480 * struct __vxge_hw_blockpool_entry - Block private data structure
481 * @item: List header used to link.
482 * @length: Length of the block
483 * @memblock: Virtual address block
484 * @dma_addr: DMA Address of the block.
485 * @dma_handle: DMA handle of the block.
486 * @acc_handle: DMA acc handle
487 *
488 * Block is allocated with a header to put the blocks into list.
489 *
490 */
491struct __vxge_hw_blockpool_entry {
492 struct list_head item;
493 u32 length;
494 void *memblock;
495 dma_addr_t dma_addr;
496 struct pci_dev *dma_handle;
497 struct pci_dev *acc_handle;
498};
499
500/*
501 * struct __vxge_hw_blockpool - Block Pool
502 * @hldev: HW device
503 * @block_size: size of each block.
504 * @Pool_size: Number of blocks in the pool
505 * @pool_max: Maximum number of blocks above which to free additional blocks
506 * @req_out: Number of block requests with OS out standing
507 * @free_block_list: List of free blocks
508 *
509 * Block pool contains the DMA blocks preallocated.
510 *
511 */
512struct __vxge_hw_blockpool {
513 struct __vxge_hw_device *hldev;
514 u32 block_size;
515 u32 pool_size;
516 u32 pool_max;
517 u32 req_out;
518 struct list_head free_block_list;
519 struct list_head free_entry_list;
520};
521
522/*
523 * enum enum __vxge_hw_channel_type - Enumerated channel types.
524 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
525 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
526 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
527 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
528 * (and recognized) channel types. Currently: 2.
529 *
530 * Enumerated channel types. Currently there are only two link-layer
531 * channels - Titan fifo and Titan ring. In the future the list will grow.
532 */
533enum __vxge_hw_channel_type {
534 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
535 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
536 VXGE_HW_CHANNEL_TYPE_RING = 2,
537 VXGE_HW_CHANNEL_TYPE_MAX = 3
538};
539
540/*
541 * struct __vxge_hw_channel
542 * @item: List item; used to maintain a list of open channels.
543 * @type: Channel type. See enum vxge_hw_channel_type{}.
544 * @devh: Device handle. HW device object that contains _this_ channel.
545 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
546 * @length: Channel length. Currently allocated number of descriptors.
547 * The channel length "grows" when more descriptors get allocated.
548 * See _hw_mempool_grow.
549 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
550 * by driver for the subsequent send or receive operation.
551 * See vxge_hw_fifo_txdl_reserve(),
552 * vxge_hw_ring_rxd_reserve().
553 * @reserve_ptr: Current pointer in the resrve array
554 * @reserve_top: Reserve top gives the maximum number of dtrs available in
555 * reserve array.
556 * @work_arr: Work array. Contains descriptors posted to the channel.
557 * Note that at any point in time @work_arr contains 3 types of
558 * descriptors:
559 * 1) posted but not yet consumed by Titan device;
560 * 2) consumed but not yet completed;
561 * 3) completed but not yet freed
562 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
563 * @post_index: Post index. At any point in time points on the
564 * position in the channel, which'll contain next to-be-posted
565 * descriptor.
566 * @compl_index: Completion index. At any point in time points on the
567 * position in the channel, which will contain next
568 * to-be-completed descriptor.
569 * @free_arr: Free array. Contains completed descriptors that were freed
570 * (i.e., handed over back to HW) by driver.
571 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
572 * @free_ptr: current pointer in free array
573 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
574 * to store per-operation control information.
575 * @stats: Pointer to common statistics
576 * @userdata: Per-channel opaque (void*) user-defined context, which may be
577 * driver object, ULP connection, etc.
578 * Once channel is open, @userdata is passed back to user via
579 * vxge_hw_channel_callback_f.
580 *
581 * HW channel object.
582 *
583 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
584 */
585struct __vxge_hw_channel {
586 struct list_head item;
587 enum __vxge_hw_channel_type type;
588 struct __vxge_hw_device *devh;
589 struct __vxge_hw_vpath_handle *vph;
590 u32 length;
591 u32 vp_id;
592 void **reserve_arr;
593 u32 reserve_ptr;
594 u32 reserve_top;
595 void **work_arr;
596 u32 post_index ____cacheline_aligned;
597 u32 compl_index ____cacheline_aligned;
598 void **free_arr;
599 u32 free_ptr;
600 void **orig_arr;
601 u32 per_dtr_space;
602 void *userdata;
603 struct vxge_hw_common_reg __iomem *common_reg;
604 u32 first_vp_id;
605 struct vxge_hw_vpath_stats_sw_common_info *stats;
606
607} ____cacheline_aligned;
608
609/*
610 * struct __vxge_hw_virtualpath - Virtual Path
611 *
612 * @vp_id: Virtual path id
613 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
614 * @hldev: Hal device
615 * @vp_config: Virtual Path Config
616 * @vp_reg: VPATH Register map address in BAR0
617 * @vpmgmt_reg: VPATH_MGMT register map address
618 * @max_mtu: Max mtu that can be supported
619 * @vsport_number: vsport attached to this vpath
620 * @max_kdfc_db: Maximum kernel mode doorbells
621 * @max_nofl_db: Maximum non offload doorbells
622 * @tx_intr_num: Interrupt Number associated with the TX
623
624 * @ringh: Ring Queue
625 * @fifoh: FIFO Queue
626 * @vpath_handles: Virtual Path handles list
627 * @stats_block: Memory for DMAing stats
628 * @stats: Vpath statistics
629 *
630 * Virtual path structure to encapsulate the data related to a virtual path.
631 * Virtual paths are allocated by the HW upon getting configuration from the
632 * driver and inserted into the list of virtual paths.
633 */
634struct __vxge_hw_virtualpath {
635 u32 vp_id;
636
637 u32 vp_open;
638#define VXGE_HW_VP_NOT_OPEN 0
639#define VXGE_HW_VP_OPEN 1
640
641 struct __vxge_hw_device *hldev;
642 struct vxge_hw_vp_config *vp_config;
643 struct vxge_hw_vpath_reg __iomem *vp_reg;
644 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
645 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
646
647 u32 max_mtu;
648 u32 vsport_number;
649 u32 max_kdfc_db;
650 u32 max_nofl_db;
651
652 struct __vxge_hw_ring *____cacheline_aligned ringh;
653 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
654 struct list_head vpath_handles;
655 struct __vxge_hw_blockpool_entry *stats_block;
656 struct vxge_hw_vpath_stats_hw_info *hw_stats;
657 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
658 struct vxge_hw_vpath_stats_sw_info *sw_stats;
659};
660
661/*
662 * struct __vxge_hw_vpath_handle - List item to store callback information
663 * @item: List head to keep the item in linked list
664 * @vpath: Virtual path to which this item belongs
665 *
666 * This structure is used to store the callback information.
667 */
668struct __vxge_hw_vpath_handle{
669 struct list_head item;
670 struct __vxge_hw_virtualpath *vpath;
671};
672
673/*
674 * struct __vxge_hw_device
675 *
676 * HW device object.
677 */
678/**
679 * struct __vxge_hw_device - Hal device object
680 * @magic: Magic Number
681 * @device_id: PCI Device Id of the adapter
682 * @major_revision: PCI Device major revision
683 * @minor_revision: PCI Device minor revision
684 * @bar0: BAR0 virtual address.
685 * @bar1: BAR1 virtual address.
686 * @bar2: BAR2 virtual address.
687 * @pdev: Physical device handle
688 * @config: Confguration passed by the LL driver at initialization
689 * @link_state: Link state
690 *
691 * HW device object. Represents Titan adapter
692 */
693struct __vxge_hw_device {
694 u32 magic;
695#define VXGE_HW_DEVICE_MAGIC 0x12345678
696#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
697 u16 device_id;
698 u8 major_revision;
699 u8 minor_revision;
700 void __iomem *bar0;
701 void __iomem *bar1;
702 void __iomem *bar2;
703 struct pci_dev *pdev;
704 struct net_device *ndev;
705 struct vxge_hw_device_config config;
706 enum vxge_hw_device_link_state link_state;
707
708 struct vxge_hw_uld_cbs uld_callbacks;
709
710 u32 host_type;
711 u32 func_id;
712 u32 access_rights;
713#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
714#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
715#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
716 struct vxge_hw_legacy_reg __iomem *legacy_reg;
717 struct vxge_hw_toc_reg __iomem *toc_reg;
718 struct vxge_hw_common_reg __iomem *common_reg;
719 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
720 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
721 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
722 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
723 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
724 struct vxge_hw_vpath_reg __iomem *vpath_reg \
725 [VXGE_HW_TITAN_VPATH_REG_SPACES];
726 u8 __iomem *kdfc;
727 u8 __iomem *usdc;
728 struct __vxge_hw_virtualpath virtual_paths \
729 [VXGE_HW_MAX_VIRTUAL_PATHS];
730 u64 vpath_assignments;
731 u64 vpaths_deployed;
732 u32 first_vp_id;
733 u64 tim_int_mask0[4];
734 u32 tim_int_mask1[4];
735
736 struct __vxge_hw_blockpool block_pool;
737 struct vxge_hw_device_stats stats;
738 u32 debug_module_mask;
739 u32 debug_level;
740 u32 level_err;
741 u32 level_trace;
742};
743
744#define VXGE_HW_INFO_LEN 64
745/**
746 * struct vxge_hw_device_hw_info - Device information
747 * @host_type: Host Type
748 * @func_id: Function Id
749 * @vpath_mask: vpath bit mask
750 * @fw_version: Firmware version
751 * @fw_date: Firmware Date
752 * @flash_version: Firmware version
753 * @flash_date: Firmware Date
754 * @mac_addrs: Mac addresses for each vpath
755 * @mac_addr_masks: Mac address masks for each vpath
756 *
757 * Returns the vpath mask that has the bits set for each vpath allocated
758 * for the driver and the first mac address for each vpath
759 */
760struct vxge_hw_device_hw_info {
761 u32 host_type;
762#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
763#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
764#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
765#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
766#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
767#define VXGE_HW_SR_VH_FUNCTION0 5
768#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
769#define VXGE_HW_VH_NORMAL_FUNCTION 7
770 u64 function_mode;
771#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
772#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
773#define VXGE_HW_FUNCTION_MODE_SRIOV 2
774#define VXGE_HW_FUNCTION_MODE_MRIOV 3
775 u32 func_id;
776 u64 vpath_mask;
777 struct vxge_hw_device_version fw_version;
778 struct vxge_hw_device_date fw_date;
779 struct vxge_hw_device_version flash_version;
780 struct vxge_hw_device_date flash_date;
781 u8 serial_number[VXGE_HW_INFO_LEN];
782 u8 part_number[VXGE_HW_INFO_LEN];
783 u8 product_desc[VXGE_HW_INFO_LEN];
784 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
785 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
786};
787
788/**
789 * struct vxge_hw_device_attr - Device memory spaces.
790 * @bar0: BAR0 virtual address.
791 * @bar1: BAR1 virtual address.
792 * @bar2: BAR2 virtual address.
793 * @pdev: PCI device object.
794 *
795 * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device
796 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
797 */
798struct vxge_hw_device_attr {
799 void __iomem *bar0;
800 void __iomem *bar1;
801 void __iomem *bar2;
802 struct pci_dev *pdev;
803 struct vxge_hw_uld_cbs uld_callbacks;
804};
805
806#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
807
808#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
809 if (i < 16) { \
810 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
811 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
812 } \
813 else { \
814 m1[0] = 0x80000000; \
815 m1[1] = 0x40000000; \
816 } \
817}
818
819#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
820 if (i < 16) { \
821 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
822 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
823 } \
824 else { \
825 m1[0] = 0; \
826 m1[1] = 0; \
827 } \
828}
829
830#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
831 status = vxge_hw_mrpcim_stats_access(hldev, \
832 VXGE_HW_STATS_OP_READ, \
833 loc, \
834 offset, \
835 &val64); \
836 \
837 if (status != VXGE_HW_OK) \
838 return status; \
839}
840
841#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
842 status = __vxge_hw_vpath_stats_access(vpath, \
843 VXGE_HW_STATS_OP_READ, \
844 offset, \
845 &val64); \
846 if (status != VXGE_HW_OK) \
847 return status; \
848}
849
850/*
851 * struct __vxge_hw_ring - Ring channel.
852 * @channel: Channel "base" of this ring, the common part of all HW
853 * channels.
854 * @mempool: Memory pool, the pool from which descriptors get allocated.
855 * (See vxge_hw_mm.h).
856 * @config: Ring configuration, part of device configuration
857 * (see struct vxge_hw_device_config{}).
858 * @ring_length: Length of the ring
859 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
860 * as per Titan User Guide.
861 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
862 * 1-buffer mode descriptor is 32 byte long, etc.
863 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
864 * per-descriptor data (e.g., DMA handle for Solaris)
865 * @per_rxd_space: Per rxd space requested by driver
866 * @rxds_per_block: Number of descriptors per hardware-defined RxD
867 * block. Depends on the (1-, 3-, 5-) buffer mode.
868 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
869 * usage. Not to confuse with @rxd_priv_size.
870 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
871 * @callback: Channel completion callback. HW invokes the callback when there
872 * are new completions on that channel. In many implementations
873 * the @callback executes in the hw interrupt context.
874 * @rxd_init: Channel's descriptor-initialize callback.
875 * See vxge_hw_ring_rxd_init_f{}.
876 * If not NULL, HW invokes the callback when opening
877 * the ring.
878 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
879 * HW invokes the callback when closing the corresponding channel.
880 * See also vxge_hw_channel_rxd_term_f{}.
881 * @stats: Statistics for ring
882 * Ring channel.
883 *
884 * Note: The structure is cache line aligned to better utilize
885 * CPU cache performance.
886 */
887struct __vxge_hw_ring {
888 struct __vxge_hw_channel channel;
889 struct vxge_hw_mempool *mempool;
890 struct vxge_hw_vpath_reg __iomem *vp_reg;
891 struct vxge_hw_common_reg __iomem *common_reg;
892 u32 ring_length;
893 u32 buffer_mode;
894 u32 rxd_size;
895 u32 rxd_priv_size;
896 u32 per_rxd_space;
897 u32 rxds_per_block;
898 u32 rxdblock_priv_size;
899 u32 cmpl_cnt;
900 u32 vp_id;
901 u32 doorbell_cnt;
902 u32 total_db_cnt;
903 u64 rxds_limit;
904
905 enum vxge_hw_status (*callback)(
906 struct __vxge_hw_ring *ringh,
907 void *rxdh,
908 u8 t_code,
909 void *userdata);
910
911 enum vxge_hw_status (*rxd_init)(
912 void *rxdh,
913 void *userdata);
914
915 void (*rxd_term)(
916 void *rxdh,
917 enum vxge_hw_rxd_state state,
918 void *userdata);
919
920 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
921 struct vxge_hw_ring_config *config;
922} ____cacheline_aligned;
923
924/**
925 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
926 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
927 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
928 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
929 * device.
930 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
931 * filling-in and posting later.
932 *
933 * Titan/HW descriptor states.
934 *
935 */
936enum vxge_hw_txdl_state {
937 VXGE_HW_TXDL_STATE_NONE = 0,
938 VXGE_HW_TXDL_STATE_AVAIL = 1,
939 VXGE_HW_TXDL_STATE_POSTED = 2,
940 VXGE_HW_TXDL_STATE_FREED = 3
941};
942/*
943 * struct __vxge_hw_fifo - Fifo.
944 * @channel: Channel "base" of this fifo, the common part of all HW
945 * channels.
946 * @mempool: Memory pool, from which descriptors get allocated.
947 * @config: Fifo configuration, part of device configuration
948 * (see struct vxge_hw_device_config{}).
949 * @interrupt_type: Interrupt type to be used
950 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
951 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
952 * on TxDL please refer to Titan UG.
953 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
954 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
955 * @priv_size: Per-Tx descriptor space reserved for driver
956 * usage.
957 * @per_txdl_space: Per txdl private space for the driver
958 * @callback: Fifo completion callback. HW invokes the callback when there
959 * are new completions on that fifo. In many implementations
960 * the @callback executes in the hw interrupt context.
961 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
962 * HW invokes the callback when closing the corresponding fifo.
963 * See also vxge_hw_fifo_txdl_term_f{}.
964 * @stats: Statistics of this fifo
965 *
966 * Fifo channel.
967 * Note: The structure is cache line aligned.
968 */
969struct __vxge_hw_fifo {
970 struct __vxge_hw_channel channel;
971 struct vxge_hw_mempool *mempool;
972 struct vxge_hw_fifo_config *config;
973 struct vxge_hw_vpath_reg __iomem *vp_reg;
974 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
975 u64 interrupt_type;
976 u32 no_snoop_bits;
977 u32 txdl_per_memblock;
978 u32 txdl_size;
979 u32 priv_size;
980 u32 per_txdl_space;
981 u32 vp_id;
982 u32 tx_intr_num;
983
984 enum vxge_hw_status (*callback)(
985 struct __vxge_hw_fifo *fifo_handle,
986 void *txdlh,
987 enum vxge_hw_fifo_tcode t_code,
988 void *userdata,
989 void **skb_ptr);
990
991 void (*txdl_term)(
992 void *txdlh,
993 enum vxge_hw_txdl_state state,
994 void *userdata);
995
996 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
997} ____cacheline_aligned;
998
999/*
1000 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1001 * @dma_addr: DMA (mapped) address of _this_ descriptor.
1002 * @dma_handle: DMA handle used to map the descriptor onto device.
1003 * @dma_offset: Descriptor's offset in the memory block. HW allocates
1004 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1005 * Each memblock is a contiguous block of DMA-able memory.
1006 * @frags: Total number of fragments (that is, contiguous data buffers)
1007 * carried by this TxDL.
1008 * @align_vaddr_start: Aligned virtual address start
1009 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1010 * alignement. Used to place one or more mis-aligned fragments
1011 * @align_dma_addr: DMA address translated from the @align_vaddr.
1012 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1013 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1014 * @align_dma_offset: The current offset into the @align_vaddr area.
1015 * Grows while filling the descriptor, gets reset.
1016 * @align_used_frags: Number of fragments used.
1017 * @alloc_frags: Total number of fragments allocated.
1018 * @unused: TODO
1019 * @next_txdl_priv: (TODO).
1020 * @first_txdp: (TODO).
1021 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1022 * TxDL list.
1023 * @txdlh: Corresponding txdlh to this TxDL.
1024 * @memblock: Pointer to the TxDL memory block or memory page.
1025 * on the next send operation.
1026 * @dma_object: DMA address and handle of the memory block that contains
1027 * the descriptor. This member is used only in the "checked"
1028 * version of the HW (to enforce certain assertions);
1029 * otherwise it gets compiled out.
1030 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1031 *
1032 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1033 * information associated with the descriptor. Note that driver can ask HW
1034 * to allocate additional per-descriptor space for its own (driver-specific)
1035 * purposes.
1036 *
1037 * See also: struct vxge_hw_ring_rxd_priv{}.
1038 */
1039struct __vxge_hw_fifo_txdl_priv {
1040 dma_addr_t dma_addr;
1041 struct pci_dev *dma_handle;
1042 ptrdiff_t dma_offset;
1043 u32 frags;
1044 u8 *align_vaddr_start;
1045 u8 *align_vaddr;
1046 dma_addr_t align_dma_addr;
1047 struct pci_dev *align_dma_handle;
1048 struct pci_dev *align_dma_acch;
1049 ptrdiff_t align_dma_offset;
1050 u32 align_used_frags;
1051 u32 alloc_frags;
1052 u32 unused;
1053 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1054 struct vxge_hw_fifo_txd *first_txdp;
1055 void *memblock;
1056};
1057
1058/*
1059 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1060 * @control_0: Bits 0 to 7 - Doorbell type.
1061 * Bits 8 to 31 - Reserved.
1062 * Bits 32 to 39 - The highest TxD in this TxDL.
1063 * Bits 40 to 47 - Reserved.
1064 * Bits 48 to 55 - Reserved.
1065 * Bits 56 to 63 - No snoop flags.
1066 * @txdl_ptr: The starting location of the TxDL in host memory.
1067 *
1068 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1069 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1070 * part of a doorbell write. Consumed by the adapter but is not written by the
1071 * adapter.
1072 */
1073struct __vxge_hw_non_offload_db_wrapper {
1074 u64 control_0;
1075#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1076#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1077#define VXGE_HW_NODBW_TYPE_NODBW 0
1078
1079#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1080#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1081
1082#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1083#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1084#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1085#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1086
1087 u64 txdl_ptr;
1088};
1089
1090/*
1091 * TX Descriptor
1092 */
1093
1094/**
1095 * struct vxge_hw_fifo_txd - Transmit Descriptor
1096 * @control_0: Bits 0 to 6 - Reserved.
1097 * Bit 7 - List Ownership. This field should be initialized
1098 * to '1' by the driver before the transmit list pointer is
1099 * written to the adapter. This field will be set to '0' by the
1100 * adapter once it has completed transmitting the frame or frames in
1101 * the list. Note - This field is only valid in TxD0. Additionally,
1102 * for multi-list sequences, the driver should not release any
1103 * buffers until the ownership of the last list in the multi-list
1104 * sequence has been returned to the host.
1105 * Bits 8 to 11 - Reserved
1106 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1107 * TxD0. It is used to describe the status of the transmit data
1108 * buffer transfer. This field is always overwritten by the
1109 * adapter, so this field may be initialized to any value.
1110 * Bits 16 to 17 - Host steering. This field allows the host to
1111 * override the selection of the physical transmit port.
1112 * Attention:
1113 * Normal sounds as if learned from the switch rather than from
1114 * the aggregation algorythms.
1115 * 00: Normal. Use Destination/MAC Address
1116 * lookup to determine the transmit port.
1117 * 01: Send on physical Port1.
1118 * 10: Send on physical Port0.
1119 * 11: Send on both ports.
1120 * Bits 18 to 21 - Reserved
1121 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1122 * is used to describe how individual buffers comprise a frame.
1123 * 10: First descriptor of a frame.
1124 * 00: Middle of a multi-descriptor frame.
1125 * 01: Last descriptor of a frame.
1126 * 11: First and last descriptor of a frame (the entire frame
1127 * resides in a single buffer).
1128 * For multi-descriptor frames, the only valid gather code sequence
1129 * is {10, [00], 01}. In other words, the descriptors must be placed
1130 * in the list in the correct order.
1131 * Bits 24 to 27 - Reserved
1132 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1133 * definition. Only valid in TxD0. This field allows the host to
1134 * indicate the Ethernet encapsulation of an outbound LSO packet.
1135 * 00 - classic mode (best guess)
1136 * 01 - LLC
1137 * 10 - SNAP
1138 * 11 - DIX
1139 * If "classic mode" is selected, the adapter will attempt to
1140 * decode the frame's Ethernet encapsulation by examining the L/T
1141 * field as follows:
1142 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1143 * if packet is IPv4 or IPv6.
1144 * 0x8870 Jumbo-SNAP encoding.
1145 * 0x0800 IPv4 DIX encoding
1146 * 0x86DD IPv6 DIX encoding
1147 * others illegal encapsulation
1148 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1149 * Set to 1 to perform segmentation offload for TCP/UDP.
1150 * This field is valid only in TxD0.
1151 * Bits 31 to 33 - Reserved.
1152 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1153 * This field is meaningful only when LSO_Control is non-zero.
1154 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1155 * TCP segment described by this TxDL will be sent as a series of
1156 * TCP segments each of which contains no more than LSO_MSS
1157 * payload bytes.
1158 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1159 * UDP datagram described by this TxDL will be sent as a series of
1160 * UDP datagrams each of which contains no more than LSO_MSS
1161 * payload bytes.
1162 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1163 * or TCP payload, with the exception of the last, which will have
1164 * <= LSO_MSS bytes of payload.
1165 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1166 * buffer to be read by the adapter. This field is written by the
1167 * host. A value of 0 is illegal.
1168 * Bits 32 to 63 - This value is written by the adapter upon
1169 * completion of a UDP or TCP LSO operation and indicates the number
1170 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1171 * returned for any non-LSO operation.
1172 * @control_1: Bits 0 to 4 - Reserved.
1173 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1174 * offload. This field is only valid in the first TxD of a frame.
1175 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1176 * This field is only valid in the first TxD of a frame (the TxD's
1177 * gather code must be 10 or 11). The driver should only set this
1178 * bit if it can guarantee that TCP is present.
1179 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1180 * This field is only valid in the first TxD of a frame (the TxD's
1181 * gather code must be 10 or 11). The driver should only set this
1182 * bit if it can guarantee that UDP is present.
1183 * Bits 8 to 14 - Reserved.
1184 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1185 * instruct the adapter to insert the VLAN tag specified by the
1186 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1187 * a frame.
1188 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1189 * to be inserted into the frame by the adapter (the first two bytes
1190 * of a VLAN tag are always 0x8100). This field is only valid if the
1191 * Tx_VLAN_Enable field is set to '1'.
1192 * Bits 32 to 33 - Reserved.
1193 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1194 * number the frame associated with. This field is written by the
1195 * host. It is only valid in the first TxD of a frame.
1196 * Bits 40 to 42 - Reserved.
1197 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1198 * functions. This field is valid only in the first TxD
1199 * of a frame.
1200 * Bits 44 to 45 - Reserved.
1201 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1202 * generate an interrupt as soon as all of the frames in the list
1203 * have been transmitted. In order to have per-frame interrupts,
1204 * the driver should place a maximum of one frame per list. This
1205 * field is only valid in the first TxD of a frame.
1206 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1207 * to count the frame toward the utilization interrupt specified in
1208 * the Tx_Int_Number field. This field is only valid in the first
1209 * TxD of a frame.
1210 * Bits 48 to 63 - Reserved.
1211 * @buffer_pointer: Buffer start address.
1212 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1213 * Titan descriptor prior to posting the latter on the fifo
1214 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1215 * to the driver with each completed descriptor.
1216 *
1217 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1218 * (list) of TxDs. * For more details please refer to Titan User Guide,
1219 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1220 */
1221struct vxge_hw_fifo_txd {
1222 u64 control_0;
1223#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1224
1225#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1226#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1227#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1228
1229
1230#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1231#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1232#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1233
1234
1235#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1236
1237#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1238
1239#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1240
1241 u64 control_1;
1242#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1243#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1244#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1245#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1246
1247#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1248
1249#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1250
1251#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1252#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1253
1254 u64 buffer_pointer;
1255
1256 u64 host_control;
1257};
1258
1259/**
1260 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1261 * @host_control: This field is exclusively for host use and is "readonly"
1262 * from the adapter's perspective.
1263 * @control_0:Bits 0 to 6 - RTH_Bucket get
1264 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1265 * by the host, and is set to 0 by the adapter.
1266 * 0 - Host owns RxD and buffer.
1267 * 1 - The adapter owns RxD and buffer.
1268 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1269 * received frame meets all of the criteria for fast path processing.
1270 * The required criteria are as follows:
1271 * !SYN &
1272 * (Transfer_Code == "Transfer OK") &
1273 * (!Is_IP_Fragment) &
1274 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1275 * (Is_IPv6)) &
1276 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1277 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1278 * computed _L4_checksum == 0x0000)))
1279 * (same meaning for all RxD buffer modes)
1280 * Bit 9 - L3 Checksum Correct
1281 * Bit 10 - L4 Checksum Correct
1282 * Bit 11 - Reserved
1283 * Bit 12 to 15 - This field is written by the adapter. It is
1284 * used to report the status of the frame transfer to the host.
1285 * 0x0 - Transfer OK
1286 * 0x4 - RDA Failure During Transfer
1287 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1288 * 0x6 - Frame integrity error (FCS or ECC).
1289 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1290 * appropriately sized and data loss occurred.
1291 * 0x8 - Internal ECC Error. RxD corrupted.
1292 * 0x9 - IPv4 Checksum error
1293 * 0xA - TCP/UDP Checksum error
1294 * 0xF - Unknown Error or Multiple Error. Indicates an
1295 * unknown problem or that more than one of transfer codes is set.
1296 * Bit 16 - SYN The adapter sets this field to indicate that
1297 * the incoming frame contained a TCP segment with its SYN bit
1298 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1299 * modes)
1300 * Bit 17 - Is ICMP
1301 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1302 * Socket Pair Direct Match Table and the frame was steered based
1303 * on SPDM.
1304 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1305 * Indirection Table and the frame was steered based on hash
1306 * indirection.
1307 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1308 * type) that was used to calculate the hash.
1309 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1310 * tagged.
1311 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1312 * of the received frame.
1313 * 0x0 - Ethernet DIX
1314 * 0x1 - LLC
1315 * 0x2 - SNAP (includes Jumbo-SNAP)
1316 * 0x3 - IPX
1317 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1318 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1319 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1320 * IP packet.
1321 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1322 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1323 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1324 * arrived with the frame. If the resulting computed IPv4 header
1325 * checksum for the frame did not produce the expected 0xFFFF value,
1326 * then the transfer code would be set to 0x9.
1327 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1328 * arrived with the frame. If the resulting computed TCP/UDP checksum
1329 * for the frame did not produce the expected 0xFFFF value, then the
1330 * transfer code would be set to 0xA.
1331 * @control_1:Bits 0 to 1 - Reserved
1332 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1333 * eventually overwritten by the adapter. The host writes the
1334 * available buffer size in bytes when it passes the descriptor to
1335 * the adapter. When a frame is delivered the host, the adapter
1336 * populates this field with the number of bytes written into the
1337 * buffer. The largest supported buffer is 16, 383 bytes.
1338 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1339 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1340 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1341 * of the VLAN tag, if one was detected by the adapter. This field is
1342 * populated even if VLAN-tag stripping is enabled.
1343 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1344 *
1345 * One buffer mode RxD for ring structure
1346 */
1347struct vxge_hw_ring_rxd_1 {
1348 u64 host_control;
1349 u64 control_0;
1350#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1351
1352#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1353
1354#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1355
1356#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1357
1358#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1359
1360#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1361#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1362
1363#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1364
1365#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1366
1367#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1368
1369#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1370
1371#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1372
1373#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1374
1375#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1376
1377#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1378
1379#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1380
1381#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1382
1383#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1384
1385 u64 control_1;
1386
1387#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1388#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1389#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1390
1391#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1392
1393#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1394
1395 u64 buffer0_ptr;
1396};
1397
1398enum vxge_hw_rth_algoritms {
1399 RTH_ALG_JENKINS = 0,
1400 RTH_ALG_MS_RSS = 1,
1401 RTH_ALG_CRC32C = 2
1402};
1403
1404/**
1405 * struct vxge_hw_rth_hash_types - RTH hash types.
1406 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1407 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1408 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1409 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1410 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1411 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1412 *
1413 * Used to pass RTH hash types to rts_rts_set.
1414 *
1415 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1416 */
1417struct vxge_hw_rth_hash_types {
1418 u8 hash_type_tcpipv4_en;
1419 u8 hash_type_ipv4_en;
1420 u8 hash_type_tcpipv6_en;
1421 u8 hash_type_ipv6_en;
1422 u8 hash_type_tcpipv6ex_en;
1423 u8 hash_type_ipv6ex_en;
1424};
1425
1426u32
1427vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1428
1429void vxge_hw_device_debug_set(
1430 struct __vxge_hw_device *devh,
1431 enum vxge_debug_level level,
1432 u32 mask);
1433
1434u32
1435vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1436
1437u32
1438vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1439
1440u32
1441vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1442
1443/**
1444 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1445 * @buf_mode: Buffer mode (1, 3 or 5)
1446 *
1447 * This function returns the size of RxD for given buffer mode
1448 */
1449static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1450{
1451 return sizeof(struct vxge_hw_ring_rxd_1);
1452}
1453
1454/**
1455 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1456 * @buf_mode: Buffer mode (1 buffer mode only)
1457 *
1458 * This function returns the number of RxD for RxD block for given buffer mode
1459 */
1460static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1461{
1462 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1463 sizeof(struct vxge_hw_ring_rxd_1));
1464}
1465
1466/**
1467 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1468 * @rxdh: Descriptor handle.
1469 * @dma_pointer: DMA address of a single receive buffer this descriptor
1470 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1471 * the receive buffer should be already mapped to the device
1472 * @size: Size of the receive @dma_pointer buffer.
1473 *
1474 * Prepare 1-buffer-mode Rx descriptor for posting
1475 * (via vxge_hw_ring_rxd_post()).
1476 *
1477 * This inline helper-function does not return any parameters and always
1478 * succeeds.
1479 *
1480 */
1481static inline
1482void vxge_hw_ring_rxd_1b_set(
1483 void *rxdh,
1484 dma_addr_t dma_pointer,
1485 u32 size)
1486{
1487 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1488 rxdp->buffer0_ptr = dma_pointer;
1489 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1490 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1491}
1492
1493/**
1494 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1495 * descriptor.
1496 * @vpath_handle: Virtual Path handle.
1497 * @rxdh: Descriptor handle.
1498 * @dma_pointer: DMA address of a single receive buffer this descriptor
1499 * carries. Returned by HW.
1500 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1501 *
1502 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1503 * This inline helper-function uses completed descriptor to populate receive
1504 * buffer pointer and other "out" parameters. The function always succeeds.
1505 *
1506 */
1507static inline
1508void vxge_hw_ring_rxd_1b_get(
1509 struct __vxge_hw_ring *ring_handle,
1510 void *rxdh,
1511 u32 *pkt_length)
1512{
1513 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1514
1515 *pkt_length =
1516 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1517}
1518
1519/**
1520 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1521 * a completed receive descriptor for 1b mode.
1522 * @vpath_handle: Virtual Path handle.
1523 * @rxdh: Descriptor handle.
1524 * @rxd_info: Descriptor information
1525 *
1526 * Retrieve extended information associated with a completed receive descriptor.
1527 *
1528 */
1529static inline
1530void vxge_hw_ring_rxd_1b_info_get(
1531 struct __vxge_hw_ring *ring_handle,
1532 void *rxdh,
1533 struct vxge_hw_ring_rxd_info *rxd_info)
1534{
1535
1536 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1537 rxd_info->syn_flag =
1538 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1539 rxd_info->is_icmp =
1540 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1541 rxd_info->fast_path_eligible =
1542 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1543 rxd_info->l3_cksum_valid =
1544 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1545 rxd_info->l3_cksum =
1546 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1547 rxd_info->l4_cksum_valid =
1548 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1549 rxd_info->l4_cksum =
1550 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);;
1551 rxd_info->frame =
1552 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1553 rxd_info->proto =
1554 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1555 rxd_info->is_vlan =
1556 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1557 rxd_info->vlan =
1558 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1559 rxd_info->rth_bucket =
1560 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1561 rxd_info->rth_it_hit =
1562 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1563 rxd_info->rth_spdm_hit =
1564 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1565 rxd_info->rth_hash_type =
1566 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1567 rxd_info->rth_value =
1568 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1569}
1570
1571/**
1572 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1573 * of 1b mode 3b mode ring.
1574 * @rxdh: Descriptor handle.
1575 *
1576 * Returns: private driver info associated with the descriptor.
1577 * driver requests per-descriptor space via vxge_hw_ring_attr.
1578 *
1579 */
1580static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1581{
1582 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1583 return (void *)(size_t)rxdp->host_control;
1584}
1585
1586/**
1587 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1588 * @txdlh: Descriptor handle.
1589 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1590 * and/or TCP and/or UDP.
1591 *
1592 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1593 * descriptor.
1594 * This API is part of the preparation of the transmit descriptor for posting
1595 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1596 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1597 * and vxge_hw_fifo_txdl_buffer_set().
1598 * All these APIs fill in the fields of the fifo descriptor,
1599 * in accordance with the Titan specification.
1600 *
1601 */
1602static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1603{
1604 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1605 txdp->control_1 |= cksum_bits;
1606}
1607
1608/**
1609 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1610 * @txdlh: Descriptor handle.
1611 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1612 * driver, which in turn inserts the MSS into the @txdlh.
1613 *
1614 * This API is part of the preparation of the transmit descriptor for posting
1615 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1616 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1617 * and vxge_hw_fifo_txdl_cksum_set_bits().
1618 * All these APIs fill in the fields of the fifo descriptor,
1619 * in accordance with the Titan specification.
1620 *
1621 */
1622static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1623{
1624 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1625
1626 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1627 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1628}
1629
1630/**
1631 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1632 * @txdlh: Descriptor handle.
1633 * @vlan_tag: 16bit VLAN tag.
1634 *
1635 * Insert VLAN tag into specified transmit descriptor.
1636 * The actual insertion of the tag into outgoing frame is done by the hardware.
1637 */
1638static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1639{
1640 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1641
1642 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1643 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1644}
1645
1646/**
1647 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1648 * @txdlh: Descriptor handle.
1649 *
1650 * Retrieve per-descriptor private data.
1651 * Note that driver requests per-descriptor space via
1652 * struct vxge_hw_fifo_attr passed to
1653 * vxge_hw_vpath_open().
1654 *
1655 * Returns: private driver data associated with the descriptor.
1656 */
1657static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1658{
1659 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1660
1661 return (void *)(size_t)txdp->host_control;
1662}
1663
1664/**
1665 * struct vxge_hw_ring_attr - Ring open "template".
1666 * @callback: Ring completion callback. HW invokes the callback when there
1667 * are new completions on that ring. In many implementations
1668 * the @callback executes in the hw interrupt context.
1669 * @rxd_init: Ring's descriptor-initialize callback.
1670 * See vxge_hw_ring_rxd_init_f{}.
1671 * If not NULL, HW invokes the callback when opening
1672 * the ring.
1673 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1674 * HW invokes the callback when closing the corresponding ring.
1675 * See also vxge_hw_ring_rxd_term_f{}.
1676 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1677 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1678 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1679 * reserved by HW per each receive descriptor.
1680 * Can be used to store
1681 * and retrieve on completion, information specific
1682 * to the driver.
1683 *
1684 * Ring open "template". User fills the structure with ring
1685 * attributes and passes it to vxge_hw_vpath_open().
1686 */
1687struct vxge_hw_ring_attr {
1688 enum vxge_hw_status (*callback)(
1689 struct __vxge_hw_ring *ringh,
1690 void *rxdh,
1691 u8 t_code,
1692 void *userdata);
1693
1694 enum vxge_hw_status (*rxd_init)(
1695 void *rxdh,
1696 void *userdata);
1697
1698 void (*rxd_term)(
1699 void *rxdh,
1700 enum vxge_hw_rxd_state state,
1701 void *userdata);
1702
1703 void *userdata;
1704 u32 per_rxd_space;
1705};
1706
1707/**
1708 * function vxge_hw_fifo_callback_f - FIFO callback.
1709 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1710 * descriptors.
1711 * @txdlh: First completed descriptor.
1712 * @txdl_priv: Pointer to per txdl space allocated
1713 * @t_code: Transfer code, as per Titan User Guide.
1714 * Returned by HW.
1715 * @host_control: Opaque 64bit data stored by driver inside the Titan
1716 * descriptor prior to posting the latter on the fifo
1717 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1718 * as is to the driver with each completed descriptor.
1719 * @userdata: Opaque per-fifo data specified at fifo open
1720 * time, via vxge_hw_vpath_open().
1721 *
1722 * Fifo completion callback (type declaration). A single per-fifo
1723 * callback is specified at fifo open time, via
1724 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1725 * of the Interrupt Service Routine.
1726 *
1727 * Fifo callback gets called by HW if, and only if, there is at least
1728 * one new completion on a given fifo. Upon processing the first @txdlh driver
1729 * is _supposed_ to continue consuming completions using:
1730 * - vxge_hw_fifo_txdl_next_completed()
1731 *
1732 * Note that failure to process new completions in a timely fashion
1733 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1734 *
1735 * Non-zero @t_code means failure to process transmit descriptor.
1736 *
1737 * In the "transmit" case the failure could happen, for instance, when the
1738 * link is down, in which case Titan completes the descriptor because it
1739 * is not able to send the data out.
1740 *
1741 * For details please refer to Titan User Guide.
1742 *
1743 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1744 */
1745/**
1746 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1747 * @txdlh: First completed descriptor.
1748 * @txdl_priv: Pointer to per txdl space allocated
1749 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1750 * @userdata: Per-fifo user data (a.k.a. context) specified at
1751 * fifo open time, via vxge_hw_vpath_open().
1752 *
1753 * Terminate descriptor callback. Unless NULL is specified in the
1754 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1755 * HW invokes the callback as part of closing fifo, prior to
1756 * de-allocating the ring and associated data structures
1757 * (including descriptors).
1758 * driver should utilize the callback to (for instance) unmap
1759 * and free DMA data buffers associated with the posted (state =
1760 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1761 * as well as other relevant cleanup functions.
1762 *
1763 * See also: struct vxge_hw_fifo_attr{}
1764 */
1765/**
1766 * struct vxge_hw_fifo_attr - Fifo open "template".
1767 * @callback: Fifo completion callback. HW invokes the callback when there
1768 * are new completions on that fifo. In many implementations
1769 * the @callback executes in the hw interrupt context.
1770 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1771 * HW invokes the callback when closing the corresponding fifo.
1772 * See also vxge_hw_fifo_txdl_term_f{}.
1773 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1774 * user as one of the @callback, and @txdl_term arguments.
1775 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1776 * reserved by HW per each transmit descriptor. Can be used to
1777 * store, and retrieve on completion, information specific
1778 * to the driver.
1779 *
1780 * Fifo open "template". User fills the structure with fifo
1781 * attributes and passes it to vxge_hw_vpath_open().
1782 */
1783struct vxge_hw_fifo_attr {
1784
1785 enum vxge_hw_status (*callback)(
1786 struct __vxge_hw_fifo *fifo_handle,
1787 void *txdlh,
1788 enum vxge_hw_fifo_tcode t_code,
1789 void *userdata,
1790 void **skb_ptr);
1791
1792 void (*txdl_term)(
1793 void *txdlh,
1794 enum vxge_hw_txdl_state state,
1795 void *userdata);
1796
1797 void *userdata;
1798 u32 per_txdl_space;
1799};
1800
1801/**
1802 * struct vxge_hw_vpath_attr - Attributes of virtual path
1803 * @vp_id: Identifier of Virtual Path
1804 * @ring_attr: Attributes of ring for non-offload receive
1805 * @fifo_attr: Attributes of fifo for non-offload transmit
1806 *
1807 * Attributes of virtual path. This structure is passed as parameter
1808 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1809 */
1810struct vxge_hw_vpath_attr {
1811 u32 vp_id;
1812 struct vxge_hw_ring_attr ring_attr;
1813 struct vxge_hw_fifo_attr fifo_attr;
1814};
1815
1816enum vxge_hw_status
1817__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1818 struct __vxge_hw_blockpool *blockpool,
1819 u32 pool_size,
1820 u32 pool_max);
1821
1822void
1823__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
1824
1825struct __vxge_hw_blockpool_entry *
1826__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
1827 u32 size);
1828
1829void
1830__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
1831 struct __vxge_hw_blockpool_entry *entry);
1832
1833void *
1834__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
1835 u32 size,
1836 struct vxge_hw_mempool_dma *dma_object);
1837
1838void
1839__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
1840 void *memblock,
1841 u32 size,
1842 struct vxge_hw_mempool_dma *dma_object);
1843
1844enum vxge_hw_status
1845__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
1846
1847enum vxge_hw_status
1848__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
1849
1850enum vxge_hw_status
1851vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
1852 struct vxge_hw_device_config *dev_config, int size);
1853
1854enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1855 void __iomem *bar0,
1856 struct vxge_hw_device_hw_info *hw_info);
1857
1858enum vxge_hw_status
1859__vxge_hw_vpath_fw_ver_get(
1860 u32 vp_id,
1861 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1862 struct vxge_hw_device_hw_info *hw_info);
1863
1864enum vxge_hw_status
1865__vxge_hw_vpath_card_info_get(
1866 u32 vp_id,
1867 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1868 struct vxge_hw_device_hw_info *hw_info);
1869
1870enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1871 struct vxge_hw_device_config *device_config);
1872
1873/**
1874 * vxge_hw_device_link_state_get - Get link state.
1875 * @devh: HW device handle.
1876 *
1877 * Get link state.
1878 * Returns: link state.
1879 */
1880static inline
1881enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1882 struct __vxge_hw_device *devh)
1883{
1884 return devh->link_state;
1885}
1886
1887void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1888
1889const u8 *
1890vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1891
1892u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1893
1894const u8 *
1895vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1896
1897enum vxge_hw_status __devinit vxge_hw_device_initialize(
1898 struct __vxge_hw_device **devh,
1899 struct vxge_hw_device_attr *attr,
1900 struct vxge_hw_device_config *device_config);
1901
1902enum vxge_hw_status vxge_hw_device_getpause_data(
1903 struct __vxge_hw_device *devh,
1904 u32 port,
1905 u32 *tx,
1906 u32 *rx);
1907
1908enum vxge_hw_status vxge_hw_device_setpause_data(
1909 struct __vxge_hw_device *devh,
1910 u32 port,
1911 u32 tx,
1912 u32 rx);
1913
1914static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1915 unsigned long size,
1916 struct pci_dev **p_dmah,
1917 struct pci_dev **p_dma_acch)
1918{
1919 gfp_t flags;
1920 void *vaddr;
1921 unsigned long misaligned = 0;
1922 *p_dma_acch = *p_dmah = NULL;
1923
1924 if (in_interrupt())
1925 flags = GFP_ATOMIC | GFP_DMA;
1926 else
1927 flags = GFP_KERNEL | GFP_DMA;
1928
1929 size += VXGE_CACHE_LINE_SIZE;
1930
1931 vaddr = kmalloc((size), flags);
1932 if (vaddr == NULL)
1933 return vaddr;
1934 misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
1935 VXGE_CACHE_LINE_SIZE);
1936 *(unsigned long *)p_dma_acch = misaligned;
1937 vaddr = (void *)((u8 *)vaddr + misaligned);
1938 return vaddr;
1939}
1940
1941extern void vxge_hw_blockpool_block_add(
1942 struct __vxge_hw_device *devh,
1943 void *block_addr,
1944 u32 length,
1945 struct pci_dev *dma_h,
1946 struct pci_dev *acc_handle);
1947
1948static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
1949 unsigned long size)
1950{
1951 gfp_t flags;
1952 void *vaddr;
1953
1954 if (in_interrupt())
1955 flags = GFP_ATOMIC | GFP_DMA;
1956 else
1957 flags = GFP_KERNEL | GFP_DMA;
1958
1959 vaddr = kmalloc((size), flags);
1960
1961 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1962}
1963
1964static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1965 struct pci_dev **p_dma_acch)
1966{
1967 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1968 u8 *tmp = (u8 *)vaddr;
1969 tmp -= misaligned;
1970 kfree((void *)tmp);
1971}
1972
1973/*
1974 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1975 */
1976static inline void*
1977__vxge_hw_mempool_item_priv(
1978 struct vxge_hw_mempool *mempool,
1979 u32 memblock_idx,
1980 void *item,
1981 u32 *memblock_item_idx)
1982{
1983 ptrdiff_t offset;
1984 void *memblock = mempool->memblocks_arr[memblock_idx];
1985
1986
1987 offset = (u32)((u8 *)item - (u8 *)memblock);
1988 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1989
1990 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1991 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1992
1993 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1994 (*memblock_item_idx) * mempool->items_priv_size;
1995}
1996
1997enum vxge_hw_status
1998__vxge_hw_mempool_grow(
1999 struct vxge_hw_mempool *mempool,
2000 u32 num_allocate,
2001 u32 *num_allocated);
2002
2003struct vxge_hw_mempool*
2004__vxge_hw_mempool_create(
2005 struct __vxge_hw_device *devh,
2006 u32 memblock_size,
2007 u32 item_size,
2008 u32 private_size,
2009 u32 items_initial,
2010 u32 items_max,
2011 struct vxge_hw_mempool_cbs *mp_callback,
2012 void *userdata);
2013
2014struct __vxge_hw_channel*
2015__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2016 enum __vxge_hw_channel_type type, u32 length,
2017 u32 per_dtr_space, void *userdata);
2018
2019void
2020__vxge_hw_channel_free(
2021 struct __vxge_hw_channel *channel);
2022
2023enum vxge_hw_status
2024__vxge_hw_channel_initialize(
2025 struct __vxge_hw_channel *channel);
2026
2027enum vxge_hw_status
2028__vxge_hw_channel_reset(
2029 struct __vxge_hw_channel *channel);
2030
2031/*
2032 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
2033 * for the fifo.
2034 * @fifo: Fifo
2035 * @txdp: Poniter to a TxD
2036 */
2037static inline struct __vxge_hw_fifo_txdl_priv *
2038__vxge_hw_fifo_txdl_priv(
2039 struct __vxge_hw_fifo *fifo,
2040 struct vxge_hw_fifo_txd *txdp)
2041{
2042 return (struct __vxge_hw_fifo_txdl_priv *)
2043 (((char *)((ulong)txdp->host_control)) +
2044 fifo->per_txdl_space);
2045}
2046
2047enum vxge_hw_status vxge_hw_vpath_open(
2048 struct __vxge_hw_device *devh,
2049 struct vxge_hw_vpath_attr *attr,
2050 struct __vxge_hw_vpath_handle **vpath_handle);
2051
2052enum vxge_hw_status
2053__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
2054
2055enum vxge_hw_status vxge_hw_vpath_close(
2056 struct __vxge_hw_vpath_handle *vpath_handle);
2057
2058enum vxge_hw_status
2059vxge_hw_vpath_reset(
2060 struct __vxge_hw_vpath_handle *vpath_handle);
2061
2062enum vxge_hw_status
2063vxge_hw_vpath_recover_from_reset(
2064 struct __vxge_hw_vpath_handle *vpath_handle);
2065
2066void
2067vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
2068
2069enum vxge_hw_status
2070vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
2071
2072enum vxge_hw_status vxge_hw_vpath_mtu_set(
2073 struct __vxge_hw_vpath_handle *vpath_handle,
2074 u32 new_mtu);
2075
2076enum vxge_hw_status vxge_hw_vpath_stats_enable(
2077 struct __vxge_hw_vpath_handle *vpath_handle);
2078
2079enum vxge_hw_status
2080__vxge_hw_vpath_stats_access(
2081 struct __vxge_hw_virtualpath *vpath,
2082 u32 operation,
2083 u32 offset,
2084 u64 *stat);
2085
2086enum vxge_hw_status
2087__vxge_hw_vpath_xmac_tx_stats_get(
2088 struct __vxge_hw_virtualpath *vpath,
2089 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
2090
2091enum vxge_hw_status
2092__vxge_hw_vpath_xmac_rx_stats_get(
2093 struct __vxge_hw_virtualpath *vpath,
2094 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
2095
2096enum vxge_hw_status
2097__vxge_hw_vpath_stats_get(
2098 struct __vxge_hw_virtualpath *vpath,
2099 struct vxge_hw_vpath_stats_hw_info *hw_stats);
2100
2101void
2102vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2103
2104enum vxge_hw_status
2105__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
2106
2107void
2108__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
2109
2110enum vxge_hw_status
2111__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
2112
2113enum vxge_hw_status
2114__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
2115
2116enum vxge_hw_status
2117__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2118 struct vxge_hw_vpath_reg __iomem *vpath_reg);
2119
2120enum vxge_hw_status
2121__vxge_hw_device_register_poll(
2122 void __iomem *reg,
2123 u64 mask, u32 max_millis);
2124
2125#ifndef readq
2126static inline u64 readq(void __iomem *addr)
2127{
2128 u64 ret = 0;
2129 ret = readl(addr + 4);
2130 ret <<= 32;
2131 ret |= readl(addr);
2132
2133 return ret;
2134}
2135#endif
2136
2137#ifndef writeq
2138static inline void writeq(u64 val, void __iomem *addr)
2139{
2140 writel((u32) (val), addr);
2141 writel((u32) (val >> 32), (addr + 4));
2142}
2143#endif
2144
2145static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2146{
2147 writel(val, addr + 4);
2148}
2149
2150static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2151{
2152 writel(val, addr);
2153}
2154
2155static inline enum vxge_hw_status
2156__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
2157 u64 mask, u32 max_millis)
2158{
2159 enum vxge_hw_status status = VXGE_HW_OK;
2160
2161 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
2162 wmb();
2163 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
2164 wmb();
2165
2166 status = __vxge_hw_device_register_poll(addr, mask, max_millis);
2167 return status;
2168}
2169
2170struct vxge_hw_toc_reg __iomem *
2171__vxge_hw_device_toc_get(void __iomem *bar0);
2172
2173enum vxge_hw_status
2174__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
2175
2176void
2177__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
2178
2179void
2180__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
2181
2182enum vxge_hw_status
2183vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2184
2185enum vxge_hw_status
2186__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
2187
2188enum vxge_hw_status
2189__vxge_hw_vpath_pci_read(
2190 struct __vxge_hw_virtualpath *vpath,
2191 u32 phy_func_0,
2192 u32 offset,
2193 u32 *val);
2194
2195enum vxge_hw_status
2196__vxge_hw_vpath_addr_get(
2197 u32 vp_id,
2198 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2199 u8 (macaddr)[ETH_ALEN],
2200 u8 (macaddr_mask)[ETH_ALEN]);
2201
2202u32
2203__vxge_hw_vpath_func_id_get(
2204 u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
2205
2206enum vxge_hw_status
2207__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2208
2209/**
2210 * vxge_debug
2211 * @level: level of debug verbosity.
2212 * @mask: mask for the debug
2213 * @buf: Circular buffer for tracing
2214 * @fmt: printf like format string
2215 *
2216 * Provides logging facilities. Can be customized on per-module
2217 * basis or/and with debug levels. Input parameters, except
2218 * module and level, are the same as posix printf. This function
2219 * may be compiled out if DEBUG macro was never defined.
2220 * See also: enum vxge_debug_level{}.
2221 */
2222
2223#define vxge_trace_aux(level, mask, fmt, ...) \
2224{\
2225 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2226}
2227
2228#define vxge_debug(module, level, mask, fmt, ...) { \
2229if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2230 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2231 if ((mask & VXGE_DEBUG_MASK) == mask)\
2232 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2233} \
2234}
2235
2236#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2237#define vxge_debug_ll(level, mask, fmt, ...) \
2238{\
2239 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
2240}
2241
2242#else
2243#define vxge_debug_ll(level, mask, fmt, ...)
2244#endif
2245
2246enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2247 struct __vxge_hw_vpath_handle **vpath_handles,
2248 u32 vpath_count,
2249 u8 *mtable,
2250 u8 *itable,
2251 u32 itable_size);
2252
2253enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2254 struct __vxge_hw_vpath_handle *vpath_handle,
2255 enum vxge_hw_rth_algoritms algorithm,
2256 struct vxge_hw_rth_hash_types *hash_type,
2257 u16 bucket_size);
2258
2259#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
new file mode 100644
index 000000000000..c6736b972635
--- /dev/null
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -0,0 +1,1148 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include<linux/ethtool.h>
15#include <linux/pci.h>
16#include <linux/etherdevice.h>
17
18#include "vxge-ethtool.h"
19
20/**
21 * vxge_ethtool_sset - Sets different link parameters.
22 * @dev: device pointer.
23 * @info: pointer to the structure with parameters given by ethtool to set
24 * link information.
25 *
26 * The function sets different link parameters provided by the user onto
27 * the NIC.
28 * Return value:
29 * 0 on success.
30 */
31
32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
33{
34 /* We currently only support 10Gb/FULL */
35 if ((info->autoneg == AUTONEG_ENABLE) ||
36 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
37 return -EINVAL;
38
39 return 0;
40}
41
42/**
43 * vxge_ethtool_gset - Return link specific information.
44 * @dev: device pointer.
45 * @info: pointer to the structure with parameters given by ethtool
46 * to return link information.
47 *
48 * Returns link specific information like speed, duplex etc.. to ethtool.
49 * Return value :
50 * return 0 on success.
51 */
52static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
53{
54 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
55 info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
56 info->port = PORT_FIBRE;
57
58 info->transceiver = XCVR_EXTERNAL;
59
60 if (netif_carrier_ok(dev)) {
61 info->speed = SPEED_10000;
62 info->duplex = DUPLEX_FULL;
63 } else {
64 info->speed = -1;
65 info->duplex = -1;
66 }
67
68 info->autoneg = AUTONEG_DISABLE;
69 return 0;
70}
71
72/**
73 * vxge_ethtool_gdrvinfo - Returns driver specific information.
74 * @dev: device pointer.
75 * @info: pointer to the structure with parameters given by ethtool to
76 * return driver information.
77 *
78 * Returns driver specefic information like name, version etc.. to ethtool.
79 */
80static void vxge_ethtool_gdrvinfo(struct net_device *dev,
81 struct ethtool_drvinfo *info)
82{
83 struct vxgedev *vdev;
84 vdev = (struct vxgedev *)netdev_priv(dev);
85 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
86 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
87 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
88 strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
89 info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
90 * vdev->no_of_vpath;
91
92 info->n_stats = STAT_LEN;
93}
94
95/**
96 * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
97 * @dev: device pointer.
98 * @regs: pointer to the structure with parameters given by ethtool for
99 * dumping the registers.
100 * @reg_space: The input argumnet into which all the registers are dumped.
101 *
102 * Dumps the vpath register space of Titan NIC into the user given
103 * buffer area.
104 */
105static void vxge_ethtool_gregs(struct net_device *dev,
106 struct ethtool_regs *regs, void *space)
107{
108 int index, offset;
109 enum vxge_hw_status status;
110 u64 reg;
111 u8 *reg_space = (u8 *) space;
112 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
113 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
114 pci_get_drvdata(vdev->pdev);
115
116 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
117 regs->version = vdev->pdev->subsystem_device;
118 for (index = 0; index < vdev->no_of_vpath; index++) {
119 for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
120 offset += 8) {
121 status = vxge_hw_mgmt_reg_read(hldev,
122 vxge_hw_mgmt_reg_type_vpath,
123 vdev->vpaths[index].device_id,
124 offset, &reg);
125 if (status != VXGE_HW_OK) {
126 vxge_debug_init(VXGE_ERR,
127 "%s:%d Getting reg dump Failed",
128 __func__, __LINE__);
129 return;
130 }
131
132 memcpy((reg_space + offset), &reg, 8);
133 }
134 }
135}
136
137/**
138 * vxge_ethtool_idnic - To physically identify the nic on the system.
139 * @dev : device pointer.
140 * @id : pointer to the structure with identification parameters given by
141 * ethtool.
142 *
143 * Used to physically identify the NIC on the system.
144 * The Link LED will blink for a time specified by the user.
145 * Return value:
146 * 0 on success
147 */
148static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
149{
150 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
151 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
152 pci_get_drvdata(vdev->pdev);
153
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
155 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
156 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
157
158 return 0;
159}
160
161/**
162 * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
163 * @dev : device pointer.
164 * @ep : pointer to the structure with pause parameters given by ethtool.
165 * Description:
166 * Returns the Pause frame generation and reception capability of the NIC.
167 * Return value:
168 * void
169 */
170static void vxge_ethtool_getpause_data(struct net_device *dev,
171 struct ethtool_pauseparam *ep)
172{
173 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
174 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
175 pci_get_drvdata(vdev->pdev);
176
177 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
178}
179
180/**
181 * vxge_ethtool_setpause_data - set/reset pause frame generation.
182 * @dev : device pointer.
183 * @ep : pointer to the structure with pause parameters given by ethtool.
184 * Description:
185 * It can be used to set or reset Pause frame generation or reception
186 * support of the NIC.
187 * Return value:
188 * int, returns 0 on Success
189 */
190static int vxge_ethtool_setpause_data(struct net_device *dev,
191 struct ethtool_pauseparam *ep)
192{
193 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
194 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
195 pci_get_drvdata(vdev->pdev);
196
197 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
198
199 vdev->config.tx_pause_enable = ep->tx_pause;
200 vdev->config.rx_pause_enable = ep->rx_pause;
201
202 return 0;
203}
204
205static void vxge_get_ethtool_stats(struct net_device *dev,
206 struct ethtool_stats *estats, u64 *tmp_stats)
207{
208 int j, k;
209 enum vxge_hw_status status;
210 enum vxge_hw_status swstatus;
211 struct vxge_vpath *vpath = NULL;
212
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
214 struct __vxge_hw_device *hldev = vdev->devh;
215 struct vxge_hw_xmac_stats *xmac_stats;
216 struct vxge_hw_device_stats_sw_info *sw_stats;
217 struct vxge_hw_device_stats_hw_info *hw_stats;
218
219 u64 *ptr = tmp_stats;
220
221 memset(tmp_stats, 0,
222 vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
223
224 xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
225 if (xmac_stats == NULL) {
226 vxge_debug_init(VXGE_ERR,
227 "%s : %d Memory Allocation failed for xmac_stats",
228 __func__, __LINE__);
229 return;
230 }
231
232 sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
233 GFP_KERNEL);
234 if (sw_stats == NULL) {
235 kfree(xmac_stats);
236 vxge_debug_init(VXGE_ERR,
237 "%s : %d Memory Allocation failed for sw_stats",
238 __func__, __LINE__);
239 return;
240 }
241
242 hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
243 GFP_KERNEL);
244 if (hw_stats == NULL) {
245 kfree(xmac_stats);
246 kfree(sw_stats);
247 vxge_debug_init(VXGE_ERR,
248 "%s : %d Memory Allocation failed for hw_stats",
249 __func__, __LINE__);
250 return;
251 }
252
253 *ptr++ = 0;
254 status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
255 if (status != VXGE_HW_OK) {
256 if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
257 vxge_debug_init(VXGE_ERR,
258 "%s : %d Failure in getting xmac stats",
259 __func__, __LINE__);
260 }
261 }
262 swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
263 if (swstatus != VXGE_HW_OK) {
264 vxge_debug_init(VXGE_ERR,
265 "%s : %d Failure in getting sw stats",
266 __func__, __LINE__);
267 }
268
269 status = vxge_hw_device_stats_get(hldev, hw_stats);
270 if (status != VXGE_HW_OK) {
271 vxge_debug_init(VXGE_ERR,
272 "%s : %d hw_stats_get error", __func__, __LINE__);
273 }
274
275 for (k = 0; k < vdev->no_of_vpath; k++) {
276 struct vxge_hw_vpath_stats_hw_info *vpath_info;
277
278 vpath = &vdev->vpaths[k];
279 j = vpath->device_id;
280 vpath_info = hw_stats->vpath_info[j];
281 if (!vpath_info) {
282 memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
283 VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
284 ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
285 VXGE_HW_VPATH_RX_STATS_LEN);
286 continue;
287 }
288
289 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
290 *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
291 *ptr++ = vpath_info->tx_stats.tx_data_octets;
292 *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
293 *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
294 *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
295 *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
296 *ptr++ = vpath_info->tx_stats.tx_vld_ip;
297 *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
298 *ptr++ = vpath_info->tx_stats.tx_icmp;
299 *ptr++ = vpath_info->tx_stats.tx_tcp;
300 *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
301 *ptr++ = vpath_info->tx_stats.tx_udp;
302 *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
303 *ptr++ = vpath_info->tx_stats.tx_lost_ip;
304 *ptr++ = vpath_info->tx_stats.tx_parse_error;
305 *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
306 *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
307 *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
308 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
309 *ptr++ = vpath_info->rx_stats.rx_vld_frms;
310 *ptr++ = vpath_info->rx_stats.rx_offload_frms;
311 *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
312 *ptr++ = vpath_info->rx_stats.rx_data_octets;
313 *ptr++ = vpath_info->rx_stats.rx_offload_octets;
314 *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
315 *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
316 *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
317 *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
318 *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
319 *ptr++ = vpath_info->rx_stats.rx_long_frms;
320 *ptr++ = vpath_info->rx_stats.rx_usized_frms;
321 *ptr++ = vpath_info->rx_stats.rx_osized_frms;
322 *ptr++ = vpath_info->rx_stats.rx_frag_frms;
323 *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
324 *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
325 *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
326 *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
327 *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
328 *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
329 *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
330 *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
331 *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
332 *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
333 *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
334 *ptr++ = vpath_info->rx_stats.rx_ip;
335 *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
336 *ptr++ = vpath_info->rx_stats.rx_ip_octets;
337 *ptr++ = vpath_info->rx_stats.rx_err_ip;
338 *ptr++ = vpath_info->rx_stats.rx_icmp;
339 *ptr++ = vpath_info->rx_stats.rx_tcp;
340 *ptr++ = vpath_info->rx_stats.rx_udp;
341 *ptr++ = vpath_info->rx_stats.rx_err_tcp;
342 *ptr++ = vpath_info->rx_stats.rx_lost_frms;
343 *ptr++ = vpath_info->rx_stats.rx_lost_ip;
344 *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
345 *ptr++ = vpath_info->rx_stats.rx_various_discard;
346 *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
347 *ptr++ = vpath_info->rx_stats.rx_red_discard;
348 *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
349 *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
350 }
351 *ptr++ = 0;
352 for (k = 0; k < vdev->max_config_port; k++) {
353 *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
354 *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
355 *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
356 *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
357 *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
358 *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
359 *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
360 *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
361 *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
362 *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
363 *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
364 *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
365 *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
366 }
367 *ptr++ = 0;
368 for (k = 0; k < vdev->max_config_port; k++) {
369 *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
370 *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
371 *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
372 *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
373 *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
374 *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
375 *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
376 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
377 *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
378 *ptr++ = xmac_stats->port_stats[k].tx_icmp;
379 *ptr++ = xmac_stats->port_stats[k].tx_tcp;
380 *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
381 *ptr++ = xmac_stats->port_stats[k].tx_udp;
382 *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
383 *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
384 *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
385 *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
386 *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
387 *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
388 *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
389 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
390 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
391 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
392 *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
393 *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
394 *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
395 *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
396 *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
397 *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
398 *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
399 *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
400 *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
401 *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
402 *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
403 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
404 *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
405 *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
406 *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
407 *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
408 *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
409 *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
410 *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
411 *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
412 *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
413 *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
414 *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
415 *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
416 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
417 *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
418 *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
419 *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
420 *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
421 *ptr++ = xmac_stats->port_stats[k].rx_ip;
422 *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
423 *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
424 *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
425 *ptr++ = xmac_stats->port_stats[k].rx_icmp;
426 *ptr++ = xmac_stats->port_stats[k].rx_tcp;
427 *ptr++ = xmac_stats->port_stats[k].rx_udp;
428 *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
429 *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
430 *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
431 *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
432 *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
433 *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
434 *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
435 *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
436 *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
437 *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
438 *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
439 *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
440 *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
441 *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
442 *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
443 *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
444 *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
445 *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
446 *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
447 *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
448 *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
449 *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
450 *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
451 *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
452 *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
453 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
454 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
455 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
456 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
457 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
458 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
459 *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
460 *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
461 *ptr++ = xmac_stats->port_stats[k].rx_jettison;
462 *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
463 }
464
465 *ptr++ = 0;
466 for (k = 0; k < vdev->no_of_vpath; k++) {
467 struct vxge_hw_vpath_stats_sw_info *vpath_info;
468
469 vpath = &vdev->vpaths[k];
470 j = vpath->device_id;
471 vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
472 &sw_stats->vpath_info[j];
473 *ptr++ = vpath_info->soft_reset_cnt;
474 *ptr++ = vpath_info->error_stats.unknown_alarms;
475 *ptr++ = vpath_info->error_stats.network_sustained_fault;
476 *ptr++ = vpath_info->error_stats.network_sustained_ok;
477 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
478 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
479 *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
480 *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
481 *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
482 *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
483 *ptr++ = vpath_info->error_stats.target_illegal_access;
484 *ptr++ = vpath_info->error_stats.ini_serr_det;
485 *ptr++ = vpath_info->error_stats.prc_ring_bumps;
486 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
487 *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
488 *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
489 *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
490 *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
491 *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
492 *ptr++ = vpath_info->ring_stats.common_stats.
493 reserve_free_swaps_cnt;
494 *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
495 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
496 *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
497 *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
498 *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
499 *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
500 *ptr++ = vpath_info->fifo_stats.common_stats.
501 reserve_free_swaps_cnt;
502 *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
503 *ptr++ = vpath_info->fifo_stats.total_posts;
504 *ptr++ = vpath_info->fifo_stats.total_buffers;
505 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
506 *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
507 }
508
509 *ptr++ = 0;
510 for (k = 0; k < vdev->no_of_vpath; k++) {
511 struct vxge_hw_vpath_stats_hw_info *vpath_info;
512 vpath = &vdev->vpaths[k];
513 j = vpath->device_id;
514 vpath_info = hw_stats->vpath_info[j];
515 if (!vpath_info) {
516 memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
517 ptr += VXGE_HW_VPATH_STATS_LEN;
518 continue;
519 }
520 *ptr++ = vpath_info->ini_num_mwr_sent;
521 *ptr++ = vpath_info->ini_num_mrd_sent;
522 *ptr++ = vpath_info->ini_num_cpl_rcvd;
523 *ptr++ = vpath_info->ini_num_mwr_byte_sent;
524 *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
525 *ptr++ = vpath_info->wrcrdtarb_xoff;
526 *ptr++ = vpath_info->rdcrdtarb_xoff;
527 *ptr++ = vpath_info->vpath_genstats_count0;
528 *ptr++ = vpath_info->vpath_genstats_count1;
529 *ptr++ = vpath_info->vpath_genstats_count2;
530 *ptr++ = vpath_info->vpath_genstats_count3;
531 *ptr++ = vpath_info->vpath_genstats_count4;
532 *ptr++ = vpath_info->vpath_genstats_count5;
533 *ptr++ = vpath_info->prog_event_vnum0;
534 *ptr++ = vpath_info->prog_event_vnum1;
535 *ptr++ = vpath_info->prog_event_vnum2;
536 *ptr++ = vpath_info->prog_event_vnum3;
537 *ptr++ = vpath_info->rx_multi_cast_frame_discard;
538 *ptr++ = vpath_info->rx_frm_transferred;
539 *ptr++ = vpath_info->rxd_returned;
540 *ptr++ = vpath_info->rx_mpa_len_fail_frms;
541 *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
542 *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
543 *ptr++ = vpath_info->rx_permitted_frms;
544 *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
545 *ptr++ = vpath_info->rx_wol_frms;
546 *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
547 }
548
549 *ptr++ = 0;
550 *ptr++ = vdev->stats.vpaths_open;
551 *ptr++ = vdev->stats.vpath_open_fail;
552 *ptr++ = vdev->stats.link_up;
553 *ptr++ = vdev->stats.link_down;
554
555 for (k = 0; k < vdev->no_of_vpath; k++) {
556 *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
557 *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
558 *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
559 *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
560 *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
561 *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
562 *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
563 *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
564 *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
565 *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
566 vdev->vpaths[k].ring.stats.pci_map_fail;
567 *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
568 }
569
570 ptr += 12;
571
572 kfree(xmac_stats);
573 kfree(sw_stats);
574 kfree(hw_stats);
575}
576
577static void vxge_ethtool_get_strings(struct net_device *dev,
578 u32 stringset, u8 *data)
579{
580 int stat_size = 0;
581 int i, j;
582 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
583 switch (stringset) {
584 case ETH_SS_STATS:
585 vxge_add_string("VPATH STATISTICS%s\t\t\t",
586 &stat_size, data, "");
587 for (i = 0; i < vdev->no_of_vpath; i++) {
588 vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
589 &stat_size, data, i);
590 vxge_add_string("tx_ttl_eth_octects_%d\t\t",
591 &stat_size, data, i);
592 vxge_add_string("tx_data_octects_%d\t\t\t",
593 &stat_size, data, i);
594 vxge_add_string("tx_mcast_frms_%d\t\t\t",
595 &stat_size, data, i);
596 vxge_add_string("tx_bcast_frms_%d\t\t\t",
597 &stat_size, data, i);
598 vxge_add_string("tx_ucast_frms_%d\t\t\t",
599 &stat_size, data, i);
600 vxge_add_string("tx_tagged_frms_%d\t\t\t",
601 &stat_size, data, i);
602 vxge_add_string("tx_vld_ip_%d\t\t\t",
603 &stat_size, data, i);
604 vxge_add_string("tx_vld_ip_octects_%d\t\t",
605 &stat_size, data, i);
606 vxge_add_string("tx_icmp_%d\t\t\t\t",
607 &stat_size, data, i);
608 vxge_add_string("tx_tcp_%d\t\t\t\t",
609 &stat_size, data, i);
610 vxge_add_string("tx_rst_tcp_%d\t\t\t",
611 &stat_size, data, i);
612 vxge_add_string("tx_udp_%d\t\t\t\t",
613 &stat_size, data, i);
614 vxge_add_string("tx_unknown_proto_%d\t\t\t",
615 &stat_size, data, i);
616 vxge_add_string("tx_lost_ip_%d\t\t\t",
617 &stat_size, data, i);
618 vxge_add_string("tx_parse_error_%d\t\t\t",
619 &stat_size, data, i);
620 vxge_add_string("tx_tcp_offload_%d\t\t\t",
621 &stat_size, data, i);
622 vxge_add_string("tx_retx_tcp_offload_%d\t\t",
623 &stat_size, data, i);
624 vxge_add_string("tx_lost_ip_offload_%d\t\t",
625 &stat_size, data, i);
626 vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
627 &stat_size, data, i);
628 vxge_add_string("rx_vld_frms_%d\t\t\t",
629 &stat_size, data, i);
630 vxge_add_string("rx_offload_frms_%d\t\t\t",
631 &stat_size, data, i);
632 vxge_add_string("rx_ttl_eth_octects_%d\t\t",
633 &stat_size, data, i);
634 vxge_add_string("rx_data_octects_%d\t\t\t",
635 &stat_size, data, i);
636 vxge_add_string("rx_offload_octects_%d\t\t",
637 &stat_size, data, i);
638 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
639 &stat_size, data, i);
640 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
641 &stat_size, data, i);
642 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
643 &stat_size, data, i);
644 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
645 &stat_size, data, i);
646 vxge_add_string("rx_tagged_frms_%d\t\t\t",
647 &stat_size, data, i);
648 vxge_add_string("rx_long_frms_%d\t\t\t",
649 &stat_size, data, i);
650 vxge_add_string("rx_usized_frms_%d\t\t\t",
651 &stat_size, data, i);
652 vxge_add_string("rx_osized_frms_%d\t\t\t",
653 &stat_size, data, i);
654 vxge_add_string("rx_frag_frms_%d\t\t\t",
655 &stat_size, data, i);
656 vxge_add_string("rx_jabber_frms_%d\t\t\t",
657 &stat_size, data, i);
658 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
659 &stat_size, data, i);
660 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
661 &stat_size, data, i);
662 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
663 &stat_size, data, i);
664 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
665 &stat_size, data, i);
666 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
667 &stat_size, data, i);
668 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
669 &stat_size, data, i);
670 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
671 &stat_size, data, i);
672 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
673 &stat_size, data, i);
674 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
675 &stat_size, data, i);
676 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
677 &stat_size, data, i);
678 vxge_add_string("rx_ip%d\t\t\t\t",
679 &stat_size, data, i);
680 vxge_add_string("rx_accepted_ip_%d\t\t\t",
681 &stat_size, data, i);
682 vxge_add_string("rx_ip_octects_%d\t\t\t",
683 &stat_size, data, i);
684 vxge_add_string("rx_err_ip_%d\t\t\t",
685 &stat_size, data, i);
686 vxge_add_string("rx_icmp_%d\t\t\t\t",
687 &stat_size, data, i);
688 vxge_add_string("rx_tcp_%d\t\t\t\t",
689 &stat_size, data, i);
690 vxge_add_string("rx_udp_%d\t\t\t\t",
691 &stat_size, data, i);
692 vxge_add_string("rx_err_tcp_%d\t\t\t",
693 &stat_size, data, i);
694 vxge_add_string("rx_lost_frms_%d\t\t\t",
695 &stat_size, data, i);
696 vxge_add_string("rx_lost_ip_%d\t\t\t",
697 &stat_size, data, i);
698 vxge_add_string("rx_lost_ip_offload_%d\t\t",
699 &stat_size, data, i);
700 vxge_add_string("rx_various_discard_%d\t\t",
701 &stat_size, data, i);
702 vxge_add_string("rx_sleep_discard_%d\t\t\t",
703 &stat_size, data, i);
704 vxge_add_string("rx_red_discard_%d\t\t\t",
705 &stat_size, data, i);
706 vxge_add_string("rx_queue_full_discard_%d\t\t",
707 &stat_size, data, i);
708 vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
709 &stat_size, data, i);
710 }
711
712 vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
713 &stat_size, data, "");
714 for (i = 0; i < vdev->max_config_port; i++) {
715 vxge_add_string("tx_frms_%d\t\t\t\t",
716 &stat_size, data, i);
717 vxge_add_string("tx_data_octects_%d\t\t\t",
718 &stat_size, data, i);
719 vxge_add_string("tx_mcast_frms_%d\t\t\t",
720 &stat_size, data, i);
721 vxge_add_string("tx_bcast_frms_%d\t\t\t",
722 &stat_size, data, i);
723 vxge_add_string("tx_discarded_frms_%d\t\t",
724 &stat_size, data, i);
725 vxge_add_string("tx_errored_frms_%d\t\t\t",
726 &stat_size, data, i);
727 vxge_add_string("rx_frms_%d\t\t\t\t",
728 &stat_size, data, i);
729 vxge_add_string("rx_data_octects_%d\t\t\t",
730 &stat_size, data, i);
731 vxge_add_string("rx_mcast_frms_%d\t\t\t",
732 &stat_size, data, i);
733 vxge_add_string("rx_bcast_frms_%d\t\t\t",
734 &stat_size, data, i);
735 vxge_add_string("rx_discarded_frms_%d\t\t",
736 &stat_size, data, i);
737 vxge_add_string("rx_errored_frms_%d\t\t\t",
738 &stat_size, data, i);
739 vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
740 &stat_size, data, i);
741 }
742
743 vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
744 &stat_size, data, "");
745 for (i = 0; i < vdev->max_config_port; i++) {
746 vxge_add_string("tx_ttl_frms_%d\t\t\t",
747 &stat_size, data, i);
748 vxge_add_string("tx_ttl_octects_%d\t\t\t",
749 &stat_size, data, i);
750 vxge_add_string("tx_data_octects_%d\t\t\t",
751 &stat_size, data, i);
752 vxge_add_string("tx_mcast_frms_%d\t\t\t",
753 &stat_size, data, i);
754 vxge_add_string("tx_bcast_frms_%d\t\t\t",
755 &stat_size, data, i);
756 vxge_add_string("tx_ucast_frms_%d\t\t\t",
757 &stat_size, data, i);
758 vxge_add_string("tx_tagged_frms_%d\t\t\t",
759 &stat_size, data, i);
760 vxge_add_string("tx_vld_ip_%d\t\t\t",
761 &stat_size, data, i);
762 vxge_add_string("tx_vld_ip_octects_%d\t\t",
763 &stat_size, data, i);
764 vxge_add_string("tx_icmp_%d\t\t\t\t",
765 &stat_size, data, i);
766 vxge_add_string("tx_tcp_%d\t\t\t\t",
767 &stat_size, data, i);
768 vxge_add_string("tx_rst_tcp_%d\t\t\t",
769 &stat_size, data, i);
770 vxge_add_string("tx_udp_%d\t\t\t\t",
771 &stat_size, data, i);
772 vxge_add_string("tx_parse_error_%d\t\t\t",
773 &stat_size, data, i);
774 vxge_add_string("tx_unknown_protocol_%d\t\t",
775 &stat_size, data, i);
776 vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
777 &stat_size, data, i);
778 vxge_add_string("tx_marker_pdu_frms_%d\t\t",
779 &stat_size, data, i);
780 vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
781 &stat_size, data, i);
782 vxge_add_string("tx_drop_ip_%d\t\t\t",
783 &stat_size, data, i);
784 vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
785 &stat_size, data, i);
786 vxge_add_string("tx_xgmii_char2_match_%d\t\t",
787 &stat_size, data, i);
788 vxge_add_string("tx_xgmii_char1_match_%d\t\t",
789 &stat_size, data, i);
790 vxge_add_string("tx_xgmii_column2_match_%d\t\t",
791 &stat_size, data, i);
792 vxge_add_string("tx_xgmii_column1_match_%d\t\t",
793 &stat_size, data, i);
794 vxge_add_string("tx_any_err_frms_%d\t\t\t",
795 &stat_size, data, i);
796 vxge_add_string("tx_drop_frms_%d\t\t\t",
797 &stat_size, data, i);
798 vxge_add_string("rx_ttl_frms_%d\t\t\t",
799 &stat_size, data, i);
800 vxge_add_string("rx_vld_frms_%d\t\t\t",
801 &stat_size, data, i);
802 vxge_add_string("rx_offload_frms_%d\t\t\t",
803 &stat_size, data, i);
804 vxge_add_string("rx_ttl_octects_%d\t\t\t",
805 &stat_size, data, i);
806 vxge_add_string("rx_data_octects_%d\t\t\t",
807 &stat_size, data, i);
808 vxge_add_string("rx_offload_octects_%d\t\t",
809 &stat_size, data, i);
810 vxge_add_string("rx_vld_mcast_frms_%d\t\t",
811 &stat_size, data, i);
812 vxge_add_string("rx_vld_bcast_frms_%d\t\t",
813 &stat_size, data, i);
814 vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
815 &stat_size, data, i);
816 vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
817 &stat_size, data, i);
818 vxge_add_string("rx_tagged_frms_%d\t\t\t",
819 &stat_size, data, i);
820 vxge_add_string("rx_long_frms_%d\t\t\t",
821 &stat_size, data, i);
822 vxge_add_string("rx_usized_frms_%d\t\t\t",
823 &stat_size, data, i);
824 vxge_add_string("rx_osized_frms_%d\t\t\t",
825 &stat_size, data, i);
826 vxge_add_string("rx_frag_frms_%d\t\t\t",
827 &stat_size, data, i);
828 vxge_add_string("rx_jabber_frms_%d\t\t\t",
829 &stat_size, data, i);
830 vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
831 &stat_size, data, i);
832 vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
833 &stat_size, data, i);
834 vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
835 &stat_size, data, i);
836 vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
837 &stat_size, data, i);
838 vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
839 &stat_size, data, i);
840 vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
841 &stat_size, data, i);
842 vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
843 &stat_size, data, i);
844 vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
845 &stat_size, data, i);
846 vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
847 &stat_size, data, i);
848 vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
849 &stat_size, data, i);
850 vxge_add_string("rx_ip_%d\t\t\t\t",
851 &stat_size, data, i);
852 vxge_add_string("rx_accepted_ip_%d\t\t\t",
853 &stat_size, data, i);
854 vxge_add_string("rx_ip_octets_%d\t\t\t",
855 &stat_size, data, i);
856 vxge_add_string("rx_err_ip_%d\t\t\t",
857 &stat_size, data, i);
858 vxge_add_string("rx_icmp_%d\t\t\t\t",
859 &stat_size, data, i);
860 vxge_add_string("rx_tcp_%d\t\t\t\t",
861 &stat_size, data, i);
862 vxge_add_string("rx_udp_%d\t\t\t\t",
863 &stat_size, data, i);
864 vxge_add_string("rx_err_tcp_%d\t\t\t",
865 &stat_size, data, i);
866 vxge_add_string("rx_pause_count_%d\t\t\t",
867 &stat_size, data, i);
868 vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
869 &stat_size, data, i);
870 vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
871 &stat_size, data, i);
872 vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
873 &stat_size, data, i);
874 vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
875 &stat_size, data, i);
876 vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
877 &stat_size, data, i);
878 vxge_add_string("rx_drop_frms_%d\t\t\t",
879 &stat_size, data, i);
880 vxge_add_string("rx_discard_frms_%d\t\t\t",
881 &stat_size, data, i);
882 vxge_add_string("rx_drop_ip_%d\t\t\t",
883 &stat_size, data, i);
884 vxge_add_string("rx_drop_udp_%d\t\t\t",
885 &stat_size, data, i);
886 vxge_add_string("rx_marker_pdu_frms_%d\t\t",
887 &stat_size, data, i);
888 vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
889 &stat_size, data, i);
890 vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
891 &stat_size, data, i);
892 vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
893 &stat_size, data, i);
894 vxge_add_string("rx_fcs_discard_%d\t\t\t",
895 &stat_size, data, i);
896 vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
897 &stat_size, data, i);
898 vxge_add_string("rx_switch_discard_%d\t\t",
899 &stat_size, data, i);
900 vxge_add_string("rx_len_discard_%d\t\t\t",
901 &stat_size, data, i);
902 vxge_add_string("rx_rpa_discard_%d\t\t\t",
903 &stat_size, data, i);
904 vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
905 &stat_size, data, i);
906 vxge_add_string("rx_rts_discard_%d\t\t\t",
907 &stat_size, data, i);
908 vxge_add_string("rx_trash_discard_%d\t\t\t",
909 &stat_size, data, i);
910 vxge_add_string("rx_buff_full_discard_%d\t\t",
911 &stat_size, data, i);
912 vxge_add_string("rx_red_discard_%d\t\t\t",
913 &stat_size, data, i);
914 vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
915 &stat_size, data, i);
916 vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
917 &stat_size, data, i);
918 vxge_add_string("rx_xgmii_char1_match_%d\t\t",
919 &stat_size, data, i);
920 vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
921 &stat_size, data, i);
922 vxge_add_string("rx_xgmii_column1_match_%d\t\t",
923 &stat_size, data, i);
924 vxge_add_string("rx_xgmii_char2_match_%d\t\t",
925 &stat_size, data, i);
926 vxge_add_string("rx_local_fault_%d\t\t\t",
927 &stat_size, data, i);
928 vxge_add_string("rx_xgmii_column2_match_%d\t\t",
929 &stat_size, data, i);
930 vxge_add_string("rx_jettison_%d\t\t\t",
931 &stat_size, data, i);
932 vxge_add_string("rx_remote_fault_%d\t\t\t",
933 &stat_size, data, i);
934 }
935
936 vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
937 &stat_size, data, "");
938 for (i = 0; i < vdev->no_of_vpath; i++) {
939 vxge_add_string("soft_reset_cnt_%d\t\t\t",
940 &stat_size, data, i);
941 vxge_add_string("unknown_alarms_%d\t\t\t",
942 &stat_size, data, i);
943 vxge_add_string("network_sustained_fault_%d\t\t",
944 &stat_size, data, i);
945 vxge_add_string("network_sustained_ok_%d\t\t",
946 &stat_size, data, i);
947 vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
948 &stat_size, data, i);
949 vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
950 &stat_size, data, i);
951 vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
952 &stat_size, data, i);
953 vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
954 &stat_size, data, i);
955 vxge_add_string("statsb_pif_chain_error_%d\t\t",
956 &stat_size, data, i);
957 vxge_add_string("statsb_drop_timeout_%d\t\t",
958 &stat_size, data, i);
959 vxge_add_string("target_illegal_access_%d\t\t",
960 &stat_size, data, i);
961 vxge_add_string("ini_serr_det_%d\t\t\t",
962 &stat_size, data, i);
963 vxge_add_string("prc_ring_bumps_%d\t\t\t",
964 &stat_size, data, i);
965 vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
966 &stat_size, data, i);
967 vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
968 &stat_size, data, i);
969 vxge_add_string("prc_quanta_size_err_%d\t\t",
970 &stat_size, data, i);
971 vxge_add_string("ring_full_cnt_%d\t\t\t",
972 &stat_size, data, i);
973 vxge_add_string("ring_usage_cnt_%d\t\t\t",
974 &stat_size, data, i);
975 vxge_add_string("ring_usage_max_%d\t\t\t",
976 &stat_size, data, i);
977 vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
978 &stat_size, data, i);
979 vxge_add_string("ring_total_compl_cnt_%d\t\t",
980 &stat_size, data, i);
981 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
982 vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
983 &stat_size, data, j, i);
984 vxge_add_string("fifo_full_cnt_%d\t\t\t",
985 &stat_size, data, i);
986 vxge_add_string("fifo_usage_cnt_%d\t\t\t",
987 &stat_size, data, i);
988 vxge_add_string("fifo_usage_max_%d\t\t\t",
989 &stat_size, data, i);
990 vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
991 &stat_size, data, i);
992 vxge_add_string("fifo_total_compl_cnt_%d\t\t",
993 &stat_size, data, i);
994 vxge_add_string("fifo_total_posts_%d\t\t\t",
995 &stat_size, data, i);
996 vxge_add_string("fifo_total_buffers_%d\t\t",
997 &stat_size, data, i);
998 for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
999 vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
1000 &stat_size, data, j, i);
1001 }
1002
1003 vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
1004 &stat_size, data, "");
1005 for (i = 0; i < vdev->no_of_vpath; i++) {
1006 vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
1007 &stat_size, data, i);
1008 vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
1009 &stat_size, data, i);
1010 vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
1011 &stat_size, data, i);
1012 vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
1013 &stat_size, data, i);
1014 vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
1015 &stat_size, data, i);
1016 vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
1017 &stat_size, data, i);
1018 vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
1019 &stat_size, data, i);
1020 vxge_add_string("vpath_genstats_count0_%d\t\t",
1021 &stat_size, data, i);
1022 vxge_add_string("vpath_genstats_count1_%d\t\t",
1023 &stat_size, data, i);
1024 vxge_add_string("vpath_genstats_count2_%d\t\t",
1025 &stat_size, data, i);
1026 vxge_add_string("vpath_genstats_count3_%d\t\t",
1027 &stat_size, data, i);
1028 vxge_add_string("vpath_genstats_count4_%d\t\t",
1029 &stat_size, data, i);
1030 vxge_add_string("vpath_genstats_count5_%d\t\t",
1031 &stat_size, data, i);
1032 vxge_add_string("prog_event_vnum0_%d\t\t\t",
1033 &stat_size, data, i);
1034 vxge_add_string("prog_event_vnum1_%d\t\t\t",
1035 &stat_size, data, i);
1036 vxge_add_string("prog_event_vnum2_%d\t\t\t",
1037 &stat_size, data, i);
1038 vxge_add_string("prog_event_vnum3_%d\t\t\t",
1039 &stat_size, data, i);
1040 vxge_add_string("rx_multi_cast_frame_discard_%d\t",
1041 &stat_size, data, i);
1042 vxge_add_string("rx_frm_transferred_%d\t\t",
1043 &stat_size, data, i);
1044 vxge_add_string("rxd_returned_%d\t\t\t",
1045 &stat_size, data, i);
1046 vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
1047 &stat_size, data, i);
1048 vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
1049 &stat_size, data, i);
1050 vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
1051 &stat_size, data, i);
1052 vxge_add_string("rx_permitted_frms_%d\t\t",
1053 &stat_size, data, i);
1054 vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
1055 &stat_size, data, i);
1056 vxge_add_string("rx_wol_frms_%d\t\t\t",
1057 &stat_size, data, i);
1058 vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
1059 &stat_size, data, i);
1060 }
1061
1062 memcpy(data + stat_size, &ethtool_driver_stats_keys,
1063 sizeof(ethtool_driver_stats_keys));
1064 }
1065}
1066
1067static int vxge_ethtool_get_regs_len(struct net_device *dev)
1068{
1069 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1070
1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1072}
1073
1074static u32 vxge_get_rx_csum(struct net_device *dev)
1075{
1076 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1077
1078 return vdev->rx_csum;
1079}
1080
1081static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1082{
1083 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1084
1085 if (data)
1086 vdev->rx_csum = 1;
1087 else
1088 vdev->rx_csum = 0;
1089
1090 return 0;
1091}
1092
1093static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1094{
1095 if (data)
1096 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1097 else
1098 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1099
1100 return 0;
1101}
1102
1103static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1104{
1105 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1106
1107 switch (sset) {
1108 case ETH_SS_STATS:
1109 return VXGE_TITLE_LEN +
1110 (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
1111 (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
1112 (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
1113 (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
1114 (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
1115 (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
1116 DRIVER_STAT_LEN;
1117 default:
1118 return -EOPNOTSUPP;
1119 }
1120}
1121
1122static const struct ethtool_ops vxge_ethtool_ops = {
1123 .get_settings = vxge_ethtool_gset,
1124 .set_settings = vxge_ethtool_sset,
1125 .get_drvinfo = vxge_ethtool_gdrvinfo,
1126 .get_regs_len = vxge_ethtool_get_regs_len,
1127 .get_regs = vxge_ethtool_gregs,
1128 .get_link = ethtool_op_get_link,
1129 .get_pauseparam = vxge_ethtool_getpause_data,
1130 .set_pauseparam = vxge_ethtool_setpause_data,
1131 .get_rx_csum = vxge_get_rx_csum,
1132 .set_rx_csum = vxge_set_rx_csum,
1133 .get_tx_csum = ethtool_op_get_tx_csum,
1134 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1135 .get_sg = ethtool_op_get_sg,
1136 .set_sg = ethtool_op_set_sg,
1137 .get_tso = ethtool_op_get_tso,
1138 .set_tso = vxge_ethtool_op_set_tso,
1139 .get_strings = vxge_ethtool_get_strings,
1140 .phys_id = vxge_ethtool_idnic,
1141 .get_sset_count = vxge_ethtool_get_sset_count,
1142 .get_ethtool_stats = vxge_get_ethtool_stats,
1143};
1144
1145void initialize_ethtool_ops(struct net_device *ndev)
1146{
1147 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
1148}
diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/vxge/vxge-ethtool.h
new file mode 100644
index 000000000000..1c3df0a34acc
--- /dev/null
+++ b/drivers/net/vxge/vxge-ethtool.h
@@ -0,0 +1,67 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-ethtool.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef _VXGE_ETHTOOL_H
15#define _VXGE_ETHTOOL_H
16
17#include "vxge-main.h"
18
19/* Ethtool related variables and Macros. */
20static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
21
22static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
23 {"\n DRIVER STATISTICS"},
24 {"vpaths_opened"},
25 {"vpath_open_fail_cnt"},
26 {"link_up_cnt"},
27 {"link_down_cnt"},
28 {"tx_frms"},
29 {"tx_errors"},
30 {"tx_bytes"},
31 {"txd_not_free"},
32 {"txd_out_of_desc"},
33 {"rx_frms"},
34 {"rx_errors"},
35 {"rx_bytes"},
36 {"rx_mcast"},
37 {"pci_map_fail_cnt"},
38 {"skb_alloc_fail_cnt"}
39};
40
41#define VXGE_TITLE_LEN 5
42#define VXGE_HW_VPATH_STATS_LEN 27
43#define VXGE_HW_AGGR_STATS_LEN 13
44#define VXGE_HW_PORT_STATS_LEN 94
45#define VXGE_HW_VPATH_TX_STATS_LEN 19
46#define VXGE_HW_VPATH_RX_STATS_LEN 42
47#define VXGE_SW_STATS_LEN 60
48#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
49 VXGE_HW_AGGR_STATS_LEN +\
50 VXGE_HW_PORT_STATS_LEN +\
51 VXGE_HW_VPATH_TX_STATS_LEN +\
52 VXGE_HW_VPATH_RX_STATS_LEN)
53
54#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
55#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
56
57/* Maximum flicker time of adapter LED */
58#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
59#define VXGE_FLICKER_ON 1
60#define VXGE_FLICKER_OFF 0
61
62#define vxge_add_string(fmt, size, buf, ...) {\
63 snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
64 *size += ETH_GSTRING_LEN; \
65}
66
67#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
new file mode 100644
index 000000000000..61ef16118157
--- /dev/null
+++ b/drivers/net/vxge/vxge-main.c
@@ -0,0 +1,4502 @@
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
10* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter.
12* Copyright(c) 2002-2009 Neterion Inc.
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
44#include <linux/if_vlan.h>
45#include <linux/pci.h>
46#include <net/ip.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include "vxge-main.h"
50#include "vxge-reg.h"
51
52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
54 "Virtualized Server Adapter");
55
56static struct pci_device_id vxge_id_table[] __devinitdata = {
57 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
58 PCI_ANY_ID},
59 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
60 PCI_ANY_ID},
61 {0}
62};
63
64MODULE_DEVICE_TABLE(pci, vxge_id_table);
65
66VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
67VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
68VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
69VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
70VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
71VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
72
73static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
74 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
75static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
76 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
77module_param_array(bw_percentage, uint, NULL, 0);
78
79static struct vxge_drv_config *driver_config;
80
81static inline int is_vxge_card_up(struct vxgedev *vdev)
82{
83 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
84}
85
86static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
87{
88 unsigned long flags = 0;
89 struct sk_buff *skb_ptr = NULL;
90 struct sk_buff **temp, *head, *skb;
91
92 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
93 vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr);
94 spin_unlock_irqrestore(&fifo->tx_lock, flags);
95 }
96 /* free SKBs */
97 head = skb_ptr;
98 while (head) {
99 skb = head;
100 temp = (struct sk_buff **)&skb->cb;
101 head = *temp;
102 *temp = NULL;
103 dev_kfree_skb_irq(skb);
104 }
105}
106
107static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
108{
109 int i;
110
111 /* Complete all transmits */
112 for (i = 0; i < vdev->no_of_vpath; i++)
113 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
114}
115
116static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
117{
118 int i;
119 struct vxge_ring *ring;
120
121 /* Complete all receives*/
122 for (i = 0; i < vdev->no_of_vpath; i++) {
123 ring = &vdev->vpaths[i].ring;
124 vxge_hw_vpath_poll_rx(ring->handle);
125 }
126}
127
128/*
129 * MultiQ manipulation helper functions
130 */
131void vxge_stop_all_tx_queue(struct vxgedev *vdev)
132{
133 int i;
134 struct net_device *dev = vdev->ndev;
135
136 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
137 for (i = 0; i < vdev->no_of_vpath; i++)
138 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
139 }
140 netif_tx_stop_all_queues(dev);
141}
142
143void vxge_stop_tx_queue(struct vxge_fifo *fifo)
144{
145 struct net_device *dev = fifo->ndev;
146
147 struct netdev_queue *txq = NULL;
148 if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
149 txq = netdev_get_tx_queue(dev, fifo->driver_id);
150 else {
151 txq = netdev_get_tx_queue(dev, 0);
152 fifo->queue_state = VPATH_QUEUE_STOP;
153 }
154
155 netif_tx_stop_queue(txq);
156}
157
158void vxge_start_all_tx_queue(struct vxgedev *vdev)
159{
160 int i;
161 struct net_device *dev = vdev->ndev;
162
163 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
164 for (i = 0; i < vdev->no_of_vpath; i++)
165 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
166 }
167 netif_tx_start_all_queues(dev);
168}
169
170static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
171{
172 int i;
173 struct net_device *dev = vdev->ndev;
174
175 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
176 for (i = 0; i < vdev->no_of_vpath; i++)
177 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
178 }
179 netif_tx_wake_all_queues(dev);
180}
181
182void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
183{
184 struct net_device *dev = fifo->ndev;
185
186 int vpath_no = fifo->driver_id;
187 struct netdev_queue *txq = NULL;
188 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
189 txq = netdev_get_tx_queue(dev, vpath_no);
190 if (netif_tx_queue_stopped(txq))
191 netif_tx_wake_queue(txq);
192 } else {
193 txq = netdev_get_tx_queue(dev, 0);
194 if (fifo->queue_state == VPATH_QUEUE_STOP)
195 if (netif_tx_queue_stopped(txq)) {
196 fifo->queue_state = VPATH_QUEUE_START;
197 netif_tx_wake_queue(txq);
198 }
199 }
200}
201
202/*
203 * vxge_callback_link_up
204 *
205 * This function is called during interrupt context to notify link up state
206 * change.
207 */
208void
209vxge_callback_link_up(struct __vxge_hw_device *hldev)
210{
211 struct net_device *dev = hldev->ndev;
212 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
213
214 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
215 vdev->ndev->name, __func__, __LINE__);
216 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
217 vdev->stats.link_up++;
218
219 netif_carrier_on(vdev->ndev);
220 vxge_wake_all_tx_queue(vdev);
221
222 vxge_debug_entryexit(VXGE_TRACE,
223 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
224}
225
226/*
227 * vxge_callback_link_down
228 *
229 * This function is called during interrupt context to notify link down state
230 * change.
231 */
232void
233vxge_callback_link_down(struct __vxge_hw_device *hldev)
234{
235 struct net_device *dev = hldev->ndev;
236 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
237
238 vxge_debug_entryexit(VXGE_TRACE,
239 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
240 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
241
242 vdev->stats.link_down++;
243 netif_carrier_off(vdev->ndev);
244 vxge_stop_all_tx_queue(vdev);
245
246 vxge_debug_entryexit(VXGE_TRACE,
247 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
248}
249
250/*
251 * vxge_rx_alloc
252 *
253 * Allocate SKB.
254 */
255static struct sk_buff*
256vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
257{
258 struct net_device *dev;
259 struct sk_buff *skb;
260 struct vxge_rx_priv *rx_priv;
261
262 dev = ring->ndev;
263 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
264 ring->ndev->name, __func__, __LINE__);
265
266 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
267
268 /* try to allocate skb first. this one may fail */
269 skb = netdev_alloc_skb(dev, skb_size +
270 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
271 if (skb == NULL) {
272 vxge_debug_mem(VXGE_ERR,
273 "%s: out of memory to allocate SKB", dev->name);
274 ring->stats.skb_alloc_fail++;
275 return NULL;
276 }
277
278 vxge_debug_mem(VXGE_TRACE,
279 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
280 __func__, __LINE__, skb);
281
282 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
283
284 rx_priv->skb = skb;
285 rx_priv->data_size = skb_size;
286 vxge_debug_entryexit(VXGE_TRACE,
287 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
288
289 return skb;
290}
291
292/*
293 * vxge_rx_map
294 */
295static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
296{
297 struct vxge_rx_priv *rx_priv;
298 dma_addr_t dma_addr;
299
300 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
301 ring->ndev->name, __func__, __LINE__);
302 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
303
304 dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data,
305 rx_priv->data_size, PCI_DMA_FROMDEVICE);
306
307 if (dma_addr == 0) {
308 ring->stats.pci_map_fail++;
309 return -EIO;
310 }
311 vxge_debug_mem(VXGE_TRACE,
312 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
313 ring->ndev->name, __func__, __LINE__,
314 (unsigned long long)dma_addr);
315 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
316
317 rx_priv->data_dma = dma_addr;
318 vxge_debug_entryexit(VXGE_TRACE,
319 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
320
321 return 0;
322}
323
324/*
325 * vxge_rx_initial_replenish
326 * Allocation of RxD as an initial replenish procedure.
327 */
328static enum vxge_hw_status
329vxge_rx_initial_replenish(void *dtrh, void *userdata)
330{
331 struct vxge_ring *ring = (struct vxge_ring *)userdata;
332 struct vxge_rx_priv *rx_priv;
333
334 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
335 ring->ndev->name, __func__, __LINE__);
336 if (vxge_rx_alloc(dtrh, ring,
337 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
338 return VXGE_HW_FAIL;
339
340 if (vxge_rx_map(dtrh, ring)) {
341 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
342 dev_kfree_skb(rx_priv->skb);
343
344 return VXGE_HW_FAIL;
345 }
346 vxge_debug_entryexit(VXGE_TRACE,
347 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
348
349 return VXGE_HW_OK;
350}
351
352static inline void
353vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
354 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
355{
356
357 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
358 ring->ndev->name, __func__, __LINE__);
359 skb_record_rx_queue(skb, ring->driver_id);
360 skb->protocol = eth_type_trans(skb, ring->ndev);
361
362 ring->stats.rx_frms++;
363 ring->stats.rx_bytes += pkt_length;
364
365 if (skb->pkt_type == PACKET_MULTICAST)
366 ring->stats.rx_mcast++;
367
368 vxge_debug_rx(VXGE_TRACE,
369 "%s: %s:%d skb protocol = %d",
370 ring->ndev->name, __func__, __LINE__, skb->protocol);
371
372 if (ring->gro_enable) {
373 if (ring->vlgrp && ext_info->vlan &&
374 (ring->vlan_tag_strip ==
375 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
376 vlan_gro_receive(&ring->napi, ring->vlgrp,
377 ext_info->vlan, skb);
378 else
379 napi_gro_receive(&ring->napi, skb);
380 } else {
381 if (ring->vlgrp && vlan &&
382 (ring->vlan_tag_strip ==
383 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
384 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
385 else
386 netif_receive_skb(skb);
387 }
388 vxge_debug_entryexit(VXGE_TRACE,
389 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
390}
391
392static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
393 struct vxge_rx_priv *rx_priv)
394{
395 pci_dma_sync_single_for_device(ring->pdev,
396 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
397
398 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
399 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
400}
401
402static inline void vxge_post(int *dtr_cnt, void **first_dtr,
403 void *post_dtr, struct __vxge_hw_ring *ringh)
404{
405 int dtr_count = *dtr_cnt;
406 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
407 if (*first_dtr)
408 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
409 *first_dtr = post_dtr;
410 } else
411 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
412 dtr_count++;
413 *dtr_cnt = dtr_count;
414}
415
416/*
417 * vxge_rx_1b_compl
418 *
419 * If the interrupt is because of a received frame or if the receive ring
420 * contains fresh as yet un-processed frames, this function is called.
421 */
422enum vxge_hw_status
423vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
424 u8 t_code, void *userdata)
425{
426 struct vxge_ring *ring = (struct vxge_ring *)userdata;
427 struct net_device *dev = ring->ndev;
428 unsigned int dma_sizes;
429 void *first_dtr = NULL;
430 int dtr_cnt = 0;
431 int data_size;
432 dma_addr_t data_dma;
433 int pkt_length;
434 struct sk_buff *skb;
435 struct vxge_rx_priv *rx_priv;
436 struct vxge_hw_ring_rxd_info ext_info;
437 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
438 ring->ndev->name, __func__, __LINE__);
439 ring->pkts_processed = 0;
440
441 vxge_hw_ring_replenish(ringh, 0);
442
443 do {
444 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
445 skb = rx_priv->skb;
446 data_size = rx_priv->data_size;
447 data_dma = rx_priv->data_dma;
448
449 vxge_debug_rx(VXGE_TRACE,
450 "%s: %s:%d skb = 0x%p",
451 ring->ndev->name, __func__, __LINE__, skb);
452
453 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
454 pkt_length = dma_sizes;
455
456 vxge_debug_rx(VXGE_TRACE,
457 "%s: %s:%d Packet Length = %d",
458 ring->ndev->name, __func__, __LINE__, pkt_length);
459
460 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
461
462 /* check skb validity */
463 vxge_assert(skb);
464
465 prefetch((char *)skb + L1_CACHE_BYTES);
466 if (unlikely(t_code)) {
467
468 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
469 VXGE_HW_OK) {
470
471 ring->stats.rx_errors++;
472 vxge_debug_rx(VXGE_TRACE,
473 "%s: %s :%d Rx T_code is %d",
474 ring->ndev->name, __func__,
475 __LINE__, t_code);
476
477 /* If the t_code is not supported and if the
478 * t_code is other than 0x5 (unparseable packet
479 * such as unknown UPV6 header), Drop it !!!
480 */
481 vxge_re_pre_post(dtr, ring, rx_priv);
482
483 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
484 ring->stats.rx_dropped++;
485 continue;
486 }
487 }
488
489 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
490
491 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
492
493 if (!vxge_rx_map(dtr, ring)) {
494 skb_put(skb, pkt_length);
495
496 pci_unmap_single(ring->pdev, data_dma,
497 data_size, PCI_DMA_FROMDEVICE);
498
499 vxge_hw_ring_rxd_pre_post(ringh, dtr);
500 vxge_post(&dtr_cnt, &first_dtr, dtr,
501 ringh);
502 } else {
503 dev_kfree_skb(rx_priv->skb);
504 rx_priv->skb = skb;
505 rx_priv->data_size = data_size;
506 vxge_re_pre_post(dtr, ring, rx_priv);
507
508 vxge_post(&dtr_cnt, &first_dtr, dtr,
509 ringh);
510 ring->stats.rx_dropped++;
511 break;
512 }
513 } else {
514 vxge_re_pre_post(dtr, ring, rx_priv);
515
516 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
517 ring->stats.rx_dropped++;
518 break;
519 }
520 } else {
521 struct sk_buff *skb_up;
522
523 skb_up = netdev_alloc_skb(dev, pkt_length +
524 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
525 if (skb_up != NULL) {
526 skb_reserve(skb_up,
527 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
528
529 pci_dma_sync_single_for_cpu(ring->pdev,
530 data_dma, data_size,
531 PCI_DMA_FROMDEVICE);
532
533 vxge_debug_mem(VXGE_TRACE,
534 "%s: %s:%d skb_up = %p",
535 ring->ndev->name, __func__,
536 __LINE__, skb);
537 memcpy(skb_up->data, skb->data, pkt_length);
538
539 vxge_re_pre_post(dtr, ring, rx_priv);
540
541 vxge_post(&dtr_cnt, &first_dtr, dtr,
542 ringh);
543 /* will netif_rx small SKB instead */
544 skb = skb_up;
545 skb_put(skb, pkt_length);
546 } else {
547 vxge_re_pre_post(dtr, ring, rx_priv);
548
549 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
550 vxge_debug_rx(VXGE_ERR,
551 "%s: vxge_rx_1b_compl: out of "
552 "memory", dev->name);
553 ring->stats.skb_alloc_fail++;
554 break;
555 }
556 }
557
558 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
559 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
560 ring->rx_csum && /* Offload Rx side CSUM */
561 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
562 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
564 else
565 skb->ip_summed = CHECKSUM_NONE;
566
567 vxge_rx_complete(ring, skb, ext_info.vlan,
568 pkt_length, &ext_info);
569
570 ring->budget--;
571 ring->pkts_processed++;
572 if (!ring->budget)
573 break;
574
575 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
576 &t_code) == VXGE_HW_OK);
577
578 if (first_dtr)
579 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
580
581 dev->last_rx = jiffies;
582
583 vxge_debug_entryexit(VXGE_TRACE,
584 "%s:%d Exiting...",
585 __func__, __LINE__);
586 return VXGE_HW_OK;
587}
588
589/*
590 * vxge_xmit_compl
591 *
592 * If an interrupt was raised to indicate DMA complete of the Tx packet,
593 * this function is called. It identifies the last TxD whose buffer was
594 * freed and frees all skbs whose data have already DMA'ed into the NICs
595 * internal memory.
596 */
597enum vxge_hw_status
598vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
599 enum vxge_hw_fifo_tcode t_code, void *userdata,
600 void **skb_ptr)
601{
602 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
603 struct sk_buff *skb, *head = NULL;
604 struct sk_buff **temp;
605 int pkt_cnt = 0;
606
607 vxge_debug_entryexit(VXGE_TRACE,
608 "%s:%d Entered....", __func__, __LINE__);
609
610 do {
611 int frg_cnt;
612 skb_frag_t *frag;
613 int i = 0, j;
614 struct vxge_tx_priv *txd_priv =
615 vxge_hw_fifo_txdl_private_get(dtr);
616
617 skb = txd_priv->skb;
618 frg_cnt = skb_shinfo(skb)->nr_frags;
619 frag = &skb_shinfo(skb)->frags[0];
620
621 vxge_debug_tx(VXGE_TRACE,
622 "%s: %s:%d fifo_hw = %p dtr = %p "
623 "tcode = 0x%x", fifo->ndev->name, __func__,
624 __LINE__, fifo_hw, dtr, t_code);
625 /* check skb validity */
626 vxge_assert(skb);
627 vxge_debug_tx(VXGE_TRACE,
628 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
629 fifo->ndev->name, __func__, __LINE__,
630 skb, txd_priv, frg_cnt);
631 if (unlikely(t_code)) {
632 fifo->stats.tx_errors++;
633 vxge_debug_tx(VXGE_ERR,
634 "%s: tx: dtr %p completed due to "
635 "error t_code %01x", fifo->ndev->name,
636 dtr, t_code);
637 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
638 }
639
640 /* for unfragmented skb */
641 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
642 skb_headlen(skb), PCI_DMA_TODEVICE);
643
644 for (j = 0; j < frg_cnt; j++) {
645 pci_unmap_page(fifo->pdev,
646 txd_priv->dma_buffers[i++],
647 frag->size, PCI_DMA_TODEVICE);
648 frag += 1;
649 }
650
651 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
652
653 /* Updating the statistics block */
654 fifo->stats.tx_frms++;
655 fifo->stats.tx_bytes += skb->len;
656
657 temp = (struct sk_buff **)&skb->cb;
658 *temp = head;
659 head = skb;
660
661 pkt_cnt++;
662 if (pkt_cnt > fifo->indicate_max_pkts)
663 break;
664
665 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
666 &dtr, &t_code) == VXGE_HW_OK);
667
668 vxge_wake_tx_queue(fifo, skb);
669
670 if (skb_ptr)
671 *skb_ptr = (void *) head;
672
673 vxge_debug_entryexit(VXGE_TRACE,
674 "%s: %s:%d Exiting...",
675 fifo->ndev->name, __func__, __LINE__);
676 return VXGE_HW_OK;
677}
678
679/* select a vpath to trasmit the packet */
680static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
681 int *do_lock)
682{
683 u16 queue_len, counter = 0;
684 if (skb->protocol == htons(ETH_P_IP)) {
685 struct iphdr *ip;
686 struct tcphdr *th;
687
688 ip = ip_hdr(skb);
689
690 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
691 th = (struct tcphdr *)(((unsigned char *)ip) +
692 ip->ihl*4);
693
694 queue_len = vdev->no_of_vpath;
695 counter = (ntohs(th->source) +
696 ntohs(th->dest)) &
697 vdev->vpath_selector[queue_len - 1];
698 if (counter >= queue_len)
699 counter = queue_len - 1;
700
701 if (ip->protocol == IPPROTO_UDP) {
702#ifdef NETIF_F_LLTX
703 *do_lock = 0;
704#endif
705 }
706 }
707 }
708 return counter;
709}
710
711static enum vxge_hw_status vxge_search_mac_addr_in_list(
712 struct vxge_vpath *vpath, u64 del_mac)
713{
714 struct list_head *entry, *next;
715 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
716 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
717 return TRUE;
718 }
719 return FALSE;
720}
721
722static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
723{
724 struct macInfo mac_info;
725 u8 *mac_address = NULL;
726 u64 mac_addr = 0, vpath_vector = 0;
727 int vpath_idx = 0;
728 enum vxge_hw_status status = VXGE_HW_OK;
729 struct vxge_vpath *vpath = NULL;
730 struct __vxge_hw_device *hldev;
731
732 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
733
734 mac_address = (u8 *)&mac_addr;
735 memcpy(mac_address, mac_header, ETH_ALEN);
736
737 /* Is this mac address already in the list? */
738 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
739 vpath = &vdev->vpaths[vpath_idx];
740 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
741 return vpath_idx;
742 }
743
744 memset(&mac_info, 0, sizeof(struct macInfo));
745 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
746
747 /* Any vpath has room to add mac address to its da table? */
748 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
749 vpath = &vdev->vpaths[vpath_idx];
750 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
751 /* Add this mac address to this vpath */
752 mac_info.vpath_no = vpath_idx;
753 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
754 status = vxge_add_mac_addr(vdev, &mac_info);
755 if (status != VXGE_HW_OK)
756 return -EPERM;
757 return vpath_idx;
758 }
759 }
760
761 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
762 vpath_idx = 0;
763 mac_info.vpath_no = vpath_idx;
764 /* Is the first vpath already selected as catch-basin ? */
765 vpath = &vdev->vpaths[vpath_idx];
766 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
767 /* Add this mac address to this vpath */
768 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
769 return -EPERM;
770 return vpath_idx;
771 }
772
773 /* Select first vpath as catch-basin */
774 vpath_vector = vxge_mBIT(vpath->device_id);
775 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
776 vxge_hw_mgmt_reg_type_mrpcim,
777 0,
778 (ulong)offsetof(
779 struct vxge_hw_mrpcim_reg,
780 rts_mgr_cbasin_cfg),
781 vpath_vector);
782 if (status != VXGE_HW_OK) {
783 vxge_debug_tx(VXGE_ERR,
784 "%s: Unable to set the vpath-%d in catch-basin mode",
785 VXGE_DRIVER_NAME, vpath->device_id);
786 return -EPERM;
787 }
788
789 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
790 return -EPERM;
791
792 return vpath_idx;
793}
794
795/**
796 * vxge_xmit
797 * @skb : the socket buffer containing the Tx data.
798 * @dev : device pointer.
799 *
800 * This function is the Tx entry point of the driver. Neterion NIC supports
801 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
802 * NOTE: when device cant queue the pkt, just the trans_start variable will
803 * not be upadted.
804*/
805static int
806vxge_xmit(struct sk_buff *skb, struct net_device *dev)
807{
808 struct vxge_fifo *fifo = NULL;
809 void *dtr_priv;
810 void *dtr = NULL;
811 struct vxgedev *vdev = NULL;
812 enum vxge_hw_status status;
813 int frg_cnt, first_frg_len;
814 skb_frag_t *frag;
815 int i = 0, j = 0, avail;
816 u64 dma_pointer;
817 struct vxge_tx_priv *txdl_priv = NULL;
818 struct __vxge_hw_fifo *fifo_hw;
819 u32 max_mss = 0x0;
820 int offload_type;
821 unsigned long flags = 0;
822 int vpath_no = 0;
823 int do_spin_tx_lock = 1;
824
825 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
826 dev->name, __func__, __LINE__);
827
828 /* A buffer with no data will be dropped */
829 if (unlikely(skb->len <= 0)) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: Buffer has no data..", dev->name);
832 dev_kfree_skb(skb);
833 return NETDEV_TX_OK;
834 }
835
836 vdev = (struct vxgedev *)netdev_priv(dev);
837
838 if (unlikely(!is_vxge_card_up(vdev))) {
839 vxge_debug_tx(VXGE_ERR,
840 "%s: vdev not initialized", dev->name);
841 dev_kfree_skb(skb);
842 return NETDEV_TX_OK;
843 }
844
845 if (vdev->config.addr_learn_en) {
846 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
847 if (vpath_no == -EPERM) {
848 vxge_debug_tx(VXGE_ERR,
849 "%s: Failed to store the mac address",
850 dev->name);
851 dev_kfree_skb(skb);
852 return NETDEV_TX_OK;
853 }
854 }
855
856 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
857 vpath_no = skb_get_queue_mapping(skb);
858 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
859 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
860
861 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
862
863 if (vpath_no >= vdev->no_of_vpath)
864 vpath_no = 0;
865
866 fifo = &vdev->vpaths[vpath_no].fifo;
867 fifo_hw = fifo->handle;
868
869 if (do_spin_tx_lock)
870 spin_lock_irqsave(&fifo->tx_lock, flags);
871 else {
872 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
873 return NETDEV_TX_LOCKED;
874 }
875
876 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
877 if (netif_subqueue_stopped(dev, skb)) {
878 spin_unlock_irqrestore(&fifo->tx_lock, flags);
879 return NETDEV_TX_BUSY;
880 }
881 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
882 if (netif_queue_stopped(dev)) {
883 spin_unlock_irqrestore(&fifo->tx_lock, flags);
884 return NETDEV_TX_BUSY;
885 }
886 }
887 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
888 if (avail == 0) {
889 vxge_debug_tx(VXGE_ERR,
890 "%s: No free TXDs available", dev->name);
891 fifo->stats.txd_not_free++;
892 vxge_stop_tx_queue(fifo);
893 goto _exit2;
894 }
895
896 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
897 if (unlikely(status != VXGE_HW_OK)) {
898 vxge_debug_tx(VXGE_ERR,
899 "%s: Out of descriptors .", dev->name);
900 fifo->stats.txd_out_of_desc++;
901 vxge_stop_tx_queue(fifo);
902 goto _exit2;
903 }
904
905 vxge_debug_tx(VXGE_TRACE,
906 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
907 dev->name, __func__, __LINE__,
908 fifo_hw, dtr, dtr_priv);
909
910 if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
911 u16 vlan_tag = vlan_tx_tag_get(skb);
912 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
913 }
914
915 first_frg_len = skb_headlen(skb);
916
917 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
918 PCI_DMA_TODEVICE);
919
920 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
921 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
922 vxge_stop_tx_queue(fifo);
923 fifo->stats.pci_map_fail++;
924 goto _exit2;
925 }
926
927 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
928 txdl_priv->skb = skb;
929 txdl_priv->dma_buffers[j] = dma_pointer;
930
931 frg_cnt = skb_shinfo(skb)->nr_frags;
932 vxge_debug_tx(VXGE_TRACE,
933 "%s: %s:%d skb = %p txdl_priv = %p "
934 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
935 __func__, __LINE__, skb, txdl_priv,
936 frg_cnt, (unsigned long long)dma_pointer);
937
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
939 first_frg_len);
940
941 frag = &skb_shinfo(skb)->frags[0];
942 for (i = 0; i < frg_cnt; i++) {
943 /* ignore 0 length fragment */
944 if (!frag->size)
945 continue;
946
947 dma_pointer =
948 (u64)pci_map_page(fifo->pdev, frag->page,
949 frag->page_offset, frag->size,
950 PCI_DMA_TODEVICE);
951
952 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
953 goto _exit0;
954 vxge_debug_tx(VXGE_TRACE,
955 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
956 dev->name, __func__, __LINE__, i,
957 (unsigned long long)dma_pointer);
958
959 txdl_priv->dma_buffers[j] = dma_pointer;
960 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
961 frag->size);
962 frag += 1;
963 }
964
965 offload_type = vxge_offload_type(skb);
966
967 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
968
969 int mss = vxge_tcp_mss(skb);
970 if (mss) {
971 max_mss = dev->mtu + ETH_HLEN -
972 VXGE_HW_TCPIP_HEADER_MAX_SIZE;
973 if (mss > max_mss)
974 mss = max_mss;
975 vxge_debug_tx(VXGE_TRACE,
976 "%s: %s:%d mss = %d",
977 dev->name, __func__, __LINE__, mss);
978 vxge_hw_fifo_txdl_mss_set(dtr, mss);
979 } else {
980 vxge_assert(skb->len <=
981 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
982 vxge_assert(0);
983 goto _exit1;
984 }
985 }
986
987 if (skb->ip_summed == CHECKSUM_PARTIAL)
988 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
989 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
990 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
991 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
992
993 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
994 dev->trans_start = jiffies;
995 spin_unlock_irqrestore(&fifo->tx_lock, flags);
996
997 VXGE_COMPLETE_VPATH_TX(fifo);
998 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
999 dev->name, __func__, __LINE__);
1000 return 0;
1001
1002_exit0:
1003 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
1004
1005_exit1:
1006 j = 0;
1007 frag = &skb_shinfo(skb)->frags[0];
1008
1009 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
1010 skb_headlen(skb), PCI_DMA_TODEVICE);
1011
1012 for (; j < i; j++) {
1013 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
1014 frag->size, PCI_DMA_TODEVICE);
1015 frag += 1;
1016 }
1017
1018 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
1019_exit2:
1020 dev_kfree_skb(skb);
1021 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1022 VXGE_COMPLETE_VPATH_TX(fifo);
1023
1024 return 0;
1025}
1026
1027/*
1028 * vxge_rx_term
1029 *
1030 * Function will be called by hw function to abort all outstanding receive
1031 * descriptors.
1032 */
1033static void
1034vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1035{
1036 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1037 struct vxge_rx_priv *rx_priv =
1038 vxge_hw_ring_rxd_private_get(dtrh);
1039
1040 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1041 ring->ndev->name, __func__, __LINE__);
1042 if (state != VXGE_HW_RXD_STATE_POSTED)
1043 return;
1044
1045 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1046 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1047
1048 dev_kfree_skb(rx_priv->skb);
1049
1050 vxge_debug_entryexit(VXGE_TRACE,
1051 "%s: %s:%d Exiting...",
1052 ring->ndev->name, __func__, __LINE__);
1053}
1054
1055/*
1056 * vxge_tx_term
1057 *
1058 * Function will be called to abort all outstanding tx descriptors
1059 */
1060static void
1061vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1062{
1063 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1064 skb_frag_t *frag;
1065 int i = 0, j, frg_cnt;
1066 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1067 struct sk_buff *skb = txd_priv->skb;
1068
1069 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1070
1071 if (state != VXGE_HW_TXDL_STATE_POSTED)
1072 return;
1073
1074 /* check skb validity */
1075 vxge_assert(skb);
1076 frg_cnt = skb_shinfo(skb)->nr_frags;
1077 frag = &skb_shinfo(skb)->frags[0];
1078
1079 /* for unfragmented skb */
1080 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1081 skb_headlen(skb), PCI_DMA_TODEVICE);
1082
1083 for (j = 0; j < frg_cnt; j++) {
1084 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1085 frag->size, PCI_DMA_TODEVICE);
1086 frag += 1;
1087 }
1088
1089 dev_kfree_skb(skb);
1090
1091 vxge_debug_entryexit(VXGE_TRACE,
1092 "%s:%d Exiting...", __func__, __LINE__);
1093}
1094
1095/**
1096 * vxge_set_multicast
1097 * @dev: pointer to the device structure
1098 *
1099 * Entry point for multicast address enable/disable
1100 * This function is a driver entry point which gets called by the kernel
1101 * whenever multicast addresses must be enabled/disabled. This also gets
1102 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1103 * determine, if multicast address must be enabled or if promiscuous mode
1104 * is to be disabled etc.
1105 */
1106static void vxge_set_multicast(struct net_device *dev)
1107{
1108 struct dev_mc_list *mclist;
1109 struct vxgedev *vdev;
1110 int i, mcast_cnt = 0;
1111 struct __vxge_hw_device *hldev;
1112 enum vxge_hw_status status = VXGE_HW_OK;
1113 struct macInfo mac_info;
1114 int vpath_idx = 0;
1115 struct vxge_mac_addrs *mac_entry;
1116 struct list_head *list_head;
1117 struct list_head *entry, *next;
1118 u8 *mac_address = NULL;
1119
1120 vxge_debug_entryexit(VXGE_TRACE,
1121 "%s:%d", __func__, __LINE__);
1122
1123 vdev = (struct vxgedev *)netdev_priv(dev);
1124 hldev = (struct __vxge_hw_device *)vdev->devh;
1125
1126 if (unlikely(!is_vxge_card_up(vdev)))
1127 return;
1128
1129 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1130 for (i = 0; i < vdev->no_of_vpath; i++) {
1131 vxge_assert(vdev->vpaths[i].is_open);
1132 status = vxge_hw_vpath_mcast_enable(
1133 vdev->vpaths[i].handle);
1134 vdev->all_multi_flg = 1;
1135 }
1136 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1137 for (i = 0; i < vdev->no_of_vpath; i++) {
1138 vxge_assert(vdev->vpaths[i].is_open);
1139 status = vxge_hw_vpath_mcast_disable(
1140 vdev->vpaths[i].handle);
1141 vdev->all_multi_flg = 1;
1142 }
1143 }
1144
1145 if (status != VXGE_HW_OK)
1146 vxge_debug_init(VXGE_ERR,
1147 "failed to %s multicast, status %d",
1148 dev->flags & IFF_ALLMULTI ?
1149 "enable" : "disable", status);
1150
1151 if (!vdev->config.addr_learn_en) {
1152 if (dev->flags & IFF_PROMISC) {
1153 for (i = 0; i < vdev->no_of_vpath; i++) {
1154 vxge_assert(vdev->vpaths[i].is_open);
1155 status = vxge_hw_vpath_promisc_enable(
1156 vdev->vpaths[i].handle);
1157 }
1158 } else {
1159 for (i = 0; i < vdev->no_of_vpath; i++) {
1160 vxge_assert(vdev->vpaths[i].is_open);
1161 status = vxge_hw_vpath_promisc_disable(
1162 vdev->vpaths[i].handle);
1163 }
1164 }
1165 }
1166
1167 memset(&mac_info, 0, sizeof(struct macInfo));
1168 /* Update individual M_CAST address list */
1169 if ((!vdev->all_multi_flg) && dev->mc_count) {
1170
1171 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1172 list_head = &vdev->vpaths[0].mac_addr_list;
1173 if ((dev->mc_count +
1174 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1175 vdev->vpaths[0].max_mac_addr_cnt)
1176 goto _set_all_mcast;
1177
1178 /* Delete previous MC's */
1179 for (i = 0; i < mcast_cnt; i++) {
1180 if (!list_empty(list_head))
1181 mac_entry = (struct vxge_mac_addrs *)
1182 list_first_entry(list_head,
1183 struct vxge_mac_addrs,
1184 item);
1185
1186 list_for_each_safe(entry, next, list_head) {
1187
1188 mac_entry = (struct vxge_mac_addrs *) entry;
1189 /* Copy the mac address to delete */
1190 mac_address = (u8 *)&mac_entry->macaddr;
1191 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1192
1193 /* Is this a multicast address */
1194 if (0x01 & mac_info.macaddr[0]) {
1195 for (vpath_idx = 0; vpath_idx <
1196 vdev->no_of_vpath;
1197 vpath_idx++) {
1198 mac_info.vpath_no = vpath_idx;
1199 status = vxge_del_mac_addr(
1200 vdev,
1201 &mac_info);
1202 }
1203 }
1204 }
1205 }
1206
1207 /* Add new ones */
1208 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1209 i++, mclist = mclist->next) {
1210
1211 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1212 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1213 vpath_idx++) {
1214 mac_info.vpath_no = vpath_idx;
1215 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1216 status = vxge_add_mac_addr(vdev, &mac_info);
1217 if (status != VXGE_HW_OK) {
1218 vxge_debug_init(VXGE_ERR,
1219 "%s:%d Setting individual"
1220 "multicast address failed",
1221 __func__, __LINE__);
1222 goto _set_all_mcast;
1223 }
1224 }
1225 }
1226
1227 return;
1228_set_all_mcast:
1229 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1230 /* Delete previous MC's */
1231 for (i = 0; i < mcast_cnt; i++) {
1232
1233 list_for_each_safe(entry, next, list_head) {
1234
1235 mac_entry = (struct vxge_mac_addrs *) entry;
1236 /* Copy the mac address to delete */
1237 mac_address = (u8 *)&mac_entry->macaddr;
1238 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1239
1240 /* Is this a multicast address */
1241 if (0x01 & mac_info.macaddr[0])
1242 break;
1243 }
1244
1245 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1246 vpath_idx++) {
1247 mac_info.vpath_no = vpath_idx;
1248 status = vxge_del_mac_addr(vdev, &mac_info);
1249 }
1250 }
1251
1252 /* Enable all multicast */
1253 for (i = 0; i < vdev->no_of_vpath; i++) {
1254 vxge_assert(vdev->vpaths[i].is_open);
1255 status = vxge_hw_vpath_mcast_enable(
1256 vdev->vpaths[i].handle);
1257 if (status != VXGE_HW_OK) {
1258 vxge_debug_init(VXGE_ERR,
1259 "%s:%d Enabling all multicasts failed",
1260 __func__, __LINE__);
1261 }
1262 vdev->all_multi_flg = 1;
1263 }
1264 dev->flags |= IFF_ALLMULTI;
1265 }
1266
1267 vxge_debug_entryexit(VXGE_TRACE,
1268 "%s:%d Exiting...", __func__, __LINE__);
1269}
1270
1271/**
1272 * vxge_set_mac_addr
1273 * @dev: pointer to the device structure
1274 *
1275 * Update entry "0" (default MAC addr)
1276 */
1277static int vxge_set_mac_addr(struct net_device *dev, void *p)
1278{
1279 struct sockaddr *addr = p;
1280 struct vxgedev *vdev;
1281 struct __vxge_hw_device *hldev;
1282 enum vxge_hw_status status = VXGE_HW_OK;
1283 struct macInfo mac_info_new, mac_info_old;
1284 int vpath_idx = 0;
1285
1286 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1287
1288 vdev = (struct vxgedev *)netdev_priv(dev);
1289 hldev = vdev->devh;
1290
1291 if (!is_valid_ether_addr(addr->sa_data))
1292 return -EINVAL;
1293
1294 memset(&mac_info_new, 0, sizeof(struct macInfo));
1295 memset(&mac_info_old, 0, sizeof(struct macInfo));
1296
1297 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1298 __func__, __LINE__);
1299
1300 /* Get the old address */
1301 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1302
1303 /* Copy the new address */
1304 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1305
1306 /* First delete the old mac address from all the vpaths
1307 as we can't specify the index while adding new mac address */
1308 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1309 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1310 if (!vpath->is_open) {
1311 /* This can happen when this interface is added/removed
1312 to the bonding interface. Delete this station address
1313 from the linked list */
1314 vxge_mac_list_del(vpath, &mac_info_old);
1315
1316 /* Add this new address to the linked list
1317 for later restoring */
1318 vxge_mac_list_add(vpath, &mac_info_new);
1319
1320 continue;
1321 }
1322 /* Delete the station address */
1323 mac_info_old.vpath_no = vpath_idx;
1324 status = vxge_del_mac_addr(vdev, &mac_info_old);
1325 }
1326
1327 if (unlikely(!is_vxge_card_up(vdev))) {
1328 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1329 return VXGE_HW_OK;
1330 }
1331
1332 /* Set this mac address to all the vpaths */
1333 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1334 mac_info_new.vpath_no = vpath_idx;
1335 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1336 status = vxge_add_mac_addr(vdev, &mac_info_new);
1337 if (status != VXGE_HW_OK)
1338 return -EINVAL;
1339 }
1340
1341 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1342
1343 return status;
1344}
1345
1346/*
1347 * vxge_vpath_intr_enable
1348 * @vdev: pointer to vdev
1349 * @vp_id: vpath for which to enable the interrupts
1350 *
1351 * Enables the interrupts for the vpath
1352*/
1353void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1354{
1355 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1356 int msix_id, alarm_msix_id;
1357 int tim_msix_id[4] = {[0 ...3] = 0};
1358
1359 vxge_hw_vpath_intr_enable(vpath->handle);
1360
1361 if (vdev->config.intr_type == INTA)
1362 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1363 else {
1364 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1365 alarm_msix_id =
1366 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1367
1368 tim_msix_id[0] = msix_id;
1369 tim_msix_id[1] = msix_id + 1;
1370 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1371 alarm_msix_id);
1372
1373 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1374 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375
1376 /* enable the alarm vector */
1377 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
1378 }
1379}
1380
1381/*
1382 * vxge_vpath_intr_disable
1383 * @vdev: pointer to vdev
1384 * @vp_id: vpath for which to disable the interrupts
1385 *
1386 * Disables the interrupts for the vpath
1387*/
1388void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1389{
1390 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1391 int msix_id;
1392
1393 vxge_hw_vpath_intr_disable(vpath->handle);
1394
1395 if (vdev->config.intr_type == INTA)
1396 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1397 else {
1398 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1399 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1400 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1401
1402 /* disable the alarm vector */
1403 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1404 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1405 }
1406}
1407
1408/*
1409 * vxge_reset_vpath
1410 * @vdev: pointer to vdev
1411 * @vp_id: vpath to reset
1412 *
1413 * Resets the vpath
1414*/
1415static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1416{
1417 enum vxge_hw_status status = VXGE_HW_OK;
1418 int ret = 0;
1419
1420 /* check if device is down already */
1421 if (unlikely(!is_vxge_card_up(vdev)))
1422 return 0;
1423
1424 /* is device reset already scheduled */
1425 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1426 return 0;
1427
1428 if (vdev->vpaths[vp_id].handle) {
1429 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
1430 == VXGE_HW_OK) {
1431 if (is_vxge_card_up(vdev) &&
1432 vxge_hw_vpath_recover_from_reset(
1433 vdev->vpaths[vp_id].handle)
1434 != VXGE_HW_OK) {
1435 vxge_debug_init(VXGE_ERR,
1436 "vxge_hw_vpath_recover_from_reset"
1437 "failed for vpath:%d", vp_id);
1438 return status;
1439 }
1440 } else {
1441 vxge_debug_init(VXGE_ERR,
1442 "vxge_hw_vpath_reset failed for"
1443 "vpath:%d", vp_id);
1444 return status;
1445 }
1446 } else
1447 return VXGE_HW_FAIL;
1448
1449 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1450 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1451
1452 /* Enable all broadcast */
1453 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
1454
1455 /* Enable the interrupts */
1456 vxge_vpath_intr_enable(vdev, vp_id);
1457
1458 smp_wmb();
1459
1460 /* Enable the flow of traffic through the vpath */
1461 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
1462
1463 smp_wmb();
1464 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
1465 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
1466
1467 /* Vpath reset done */
1468 clear_bit(vp_id, &vdev->vp_reset);
1469
1470 /* Start the vpath queue */
1471 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
1472
1473 return ret;
1474}
1475
1476static int do_vxge_reset(struct vxgedev *vdev, int event)
1477{
1478 enum vxge_hw_status status;
1479 int ret = 0, vp_id, i;
1480
1481 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1482
1483 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1484 /* check if device is down already */
1485 if (unlikely(!is_vxge_card_up(vdev)))
1486 return 0;
1487
1488 /* is reset already scheduled */
1489 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1490 return 0;
1491 }
1492
1493 if (event == VXGE_LL_FULL_RESET) {
1494 /* wait for all the vpath reset to complete */
1495 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1496 while (test_bit(vp_id, &vdev->vp_reset))
1497 msleep(50);
1498 }
1499
1500 /* if execution mode is set to debug, don't reset the adapter */
1501 if (unlikely(vdev->exec_mode)) {
1502 vxge_debug_init(VXGE_ERR,
1503 "%s: execution mode is debug, returning..",
1504 vdev->ndev->name);
1505 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1506 vxge_stop_all_tx_queue(vdev);
1507 return 0;
1508 }
1509 }
1510
1511 if (event == VXGE_LL_FULL_RESET) {
1512 vxge_hw_device_intr_disable(vdev->devh);
1513
1514 switch (vdev->cric_err_event) {
1515 case VXGE_HW_EVENT_UNKNOWN:
1516 vxge_stop_all_tx_queue(vdev);
1517 vxge_debug_init(VXGE_ERR,
1518 "fatal: %s: Disabling device due to"
1519 "unknown error",
1520 vdev->ndev->name);
1521 ret = -EPERM;
1522 goto out;
1523 case VXGE_HW_EVENT_RESET_START:
1524 break;
1525 case VXGE_HW_EVENT_RESET_COMPLETE:
1526 case VXGE_HW_EVENT_LINK_DOWN:
1527 case VXGE_HW_EVENT_LINK_UP:
1528 case VXGE_HW_EVENT_ALARM_CLEARED:
1529 case VXGE_HW_EVENT_ECCERR:
1530 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1531 ret = -EPERM;
1532 goto out;
1533 case VXGE_HW_EVENT_FIFO_ERR:
1534 case VXGE_HW_EVENT_VPATH_ERR:
1535 break;
1536 case VXGE_HW_EVENT_CRITICAL_ERR:
1537 vxge_stop_all_tx_queue(vdev);
1538 vxge_debug_init(VXGE_ERR,
1539 "fatal: %s: Disabling device due to"
1540 "serious error",
1541 vdev->ndev->name);
1542 /* SOP or device reset required */
1543 /* This event is not currently used */
1544 ret = -EPERM;
1545 goto out;
1546 case VXGE_HW_EVENT_SERR:
1547 vxge_stop_all_tx_queue(vdev);
1548 vxge_debug_init(VXGE_ERR,
1549 "fatal: %s: Disabling device due to"
1550 "serious error",
1551 vdev->ndev->name);
1552 ret = -EPERM;
1553 goto out;
1554 case VXGE_HW_EVENT_SRPCIM_SERR:
1555 case VXGE_HW_EVENT_MRPCIM_SERR:
1556 ret = -EPERM;
1557 goto out;
1558 case VXGE_HW_EVENT_SLOT_FREEZE:
1559 vxge_stop_all_tx_queue(vdev);
1560 vxge_debug_init(VXGE_ERR,
1561 "fatal: %s: Disabling device due to"
1562 "slot freeze",
1563 vdev->ndev->name);
1564 ret = -EPERM;
1565 goto out;
1566 default:
1567 break;
1568
1569 }
1570 }
1571
1572 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1573 vxge_stop_all_tx_queue(vdev);
1574
1575 if (event == VXGE_LL_FULL_RESET) {
1576 status = vxge_reset_all_vpaths(vdev);
1577 if (status != VXGE_HW_OK) {
1578 vxge_debug_init(VXGE_ERR,
1579 "fatal: %s: can not reset vpaths",
1580 vdev->ndev->name);
1581 ret = -EPERM;
1582 goto out;
1583 }
1584 }
1585
1586 if (event == VXGE_LL_COMPL_RESET) {
1587 for (i = 0; i < vdev->no_of_vpath; i++)
1588 if (vdev->vpaths[i].handle) {
1589 if (vxge_hw_vpath_recover_from_reset(
1590 vdev->vpaths[i].handle)
1591 != VXGE_HW_OK) {
1592 vxge_debug_init(VXGE_ERR,
1593 "vxge_hw_vpath_recover_"
1594 "from_reset failed for vpath: "
1595 "%d", i);
1596 ret = -EPERM;
1597 goto out;
1598 }
1599 } else {
1600 vxge_debug_init(VXGE_ERR,
1601 "vxge_hw_vpath_reset failed for "
1602 "vpath:%d", i);
1603 ret = -EPERM;
1604 goto out;
1605 }
1606 }
1607
1608 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1609 /* Reprogram the DA table with populated mac addresses */
1610 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1611 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1612 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1613 }
1614
1615 /* enable vpath interrupts */
1616 for (i = 0; i < vdev->no_of_vpath; i++)
1617 vxge_vpath_intr_enable(vdev, i);
1618
1619 vxge_hw_device_intr_enable(vdev->devh);
1620
1621 smp_wmb();
1622
1623 /* Indicate card up */
1624 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1625
1626 /* Get the traffic to flow through the vpaths */
1627 for (i = 0; i < vdev->no_of_vpath; i++) {
1628 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1629 smp_wmb();
1630 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1631 }
1632
1633 vxge_wake_all_tx_queue(vdev);
1634 }
1635
1636out:
1637 vxge_debug_entryexit(VXGE_TRACE,
1638 "%s:%d Exiting...", __func__, __LINE__);
1639
1640 /* Indicate reset done */
1641 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1642 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1643 return ret;
1644}
1645
1646/*
1647 * vxge_reset
1648 * @vdev: pointer to ll device
1649 *
1650 * driver may reset the chip on events of serr, eccerr, etc
1651 */
1652int vxge_reset(struct vxgedev *vdev)
1653{
1654 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1655 return 0;
1656}
1657
1658/**
1659 * vxge_poll - Receive handler when Receive Polling is used.
1660 * @dev: pointer to the device structure.
1661 * @budget: Number of packets budgeted to be processed in this iteration.
1662 *
1663 * This function comes into picture only if Receive side is being handled
1664 * through polling (called NAPI in linux). It mostly does what the normal
1665 * Rx interrupt handler does in terms of descriptor and packet processing
1666 * but not in an interrupt context. Also it will process a specified number
1667 * of packets at most in one iteration. This value is passed down by the
1668 * kernel as the function argument 'budget'.
1669 */
1670static int vxge_poll_msix(struct napi_struct *napi, int budget)
1671{
1672 struct vxge_ring *ring =
1673 container_of(napi, struct vxge_ring, napi);
1674 int budget_org = budget;
1675 ring->budget = budget;
1676
1677 vxge_hw_vpath_poll_rx(ring->handle);
1678
1679 if (ring->pkts_processed < budget_org) {
1680 napi_complete(napi);
1681 /* Re enable the Rx interrupts for the vpath */
1682 vxge_hw_channel_msix_unmask(
1683 (struct __vxge_hw_channel *)ring->handle,
1684 ring->rx_vector_no);
1685 }
1686
1687 return ring->pkts_processed;
1688}
1689
1690static int vxge_poll_inta(struct napi_struct *napi, int budget)
1691{
1692 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1693 int pkts_processed = 0;
1694 int i;
1695 int budget_org = budget;
1696 struct vxge_ring *ring;
1697
1698 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1699 pci_get_drvdata(vdev->pdev);
1700
1701 for (i = 0; i < vdev->no_of_vpath; i++) {
1702 ring = &vdev->vpaths[i].ring;
1703 ring->budget = budget;
1704 vxge_hw_vpath_poll_rx(ring->handle);
1705 pkts_processed += ring->pkts_processed;
1706 budget -= ring->pkts_processed;
1707 if (budget <= 0)
1708 break;
1709 }
1710
1711 VXGE_COMPLETE_ALL_TX(vdev);
1712
1713 if (pkts_processed < budget_org) {
1714 napi_complete(napi);
1715 /* Re enable the Rx interrupts for the ring */
1716 vxge_hw_device_unmask_all(hldev);
1717 vxge_hw_device_flush_io(hldev);
1718 }
1719
1720 return pkts_processed;
1721}
1722
1723#ifdef CONFIG_NET_POLL_CONTROLLER
1724/**
1725 * vxge_netpoll - netpoll event handler entry point
1726 * @dev : pointer to the device structure.
1727 * Description:
1728 * This function will be called by upper layer to check for events on the
1729 * interface in situations where interrupts are disabled. It is used for
1730 * specific in-kernel networking tasks, such as remote consoles and kernel
1731 * debugging over the network (example netdump in RedHat).
1732 */
1733static void vxge_netpoll(struct net_device *dev)
1734{
1735 struct __vxge_hw_device *hldev;
1736 struct vxgedev *vdev;
1737
1738 vdev = (struct vxgedev *)netdev_priv(dev);
1739 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1740
1741 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1742
1743 if (pci_channel_offline(vdev->pdev))
1744 return;
1745
1746 disable_irq(dev->irq);
1747 vxge_hw_device_clear_tx_rx(hldev);
1748
1749 vxge_hw_device_clear_tx_rx(hldev);
1750 VXGE_COMPLETE_ALL_RX(vdev);
1751 VXGE_COMPLETE_ALL_TX(vdev);
1752
1753 enable_irq(dev->irq);
1754
1755 vxge_debug_entryexit(VXGE_TRACE,
1756 "%s:%d Exiting...", __func__, __LINE__);
1757 return;
1758}
1759#endif
1760
1761/* RTH configuration */
1762static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1763{
1764 enum vxge_hw_status status = VXGE_HW_OK;
1765 struct vxge_hw_rth_hash_types hash_types;
1766 u8 itable[256] = {0}; /* indirection table */
1767 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1768 int index;
1769
1770 /*
1771 * Filling
1772 * - itable with bucket numbers
1773 * - mtable with bucket-to-vpath mapping
1774 */
1775 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1776 itable[index] = index;
1777 mtable[index] = index % vdev->no_of_vpath;
1778 }
1779
1780 /* Fill RTH hash types */
1781 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1782 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1783 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1784 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1785 hash_types.hash_type_tcpipv6ex_en =
1786 vdev->config.rth_hash_type_tcpipv6ex;
1787 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1788
1789 /* set indirection table, bucket-to-vpath mapping */
1790 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1791 vdev->no_of_vpath,
1792 mtable, itable,
1793 vdev->config.rth_bkt_sz);
1794 if (status != VXGE_HW_OK) {
1795 vxge_debug_init(VXGE_ERR,
1796 "RTH indirection table configuration failed "
1797 "for vpath:%d", vdev->vpaths[0].device_id);
1798 return status;
1799 }
1800
1801 /*
1802 * Because the itable_set() method uses the active_table field
1803 * for the target virtual path the RTH config should be updated
1804 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1805 * when steering frames.
1806 */
1807 for (index = 0; index < vdev->no_of_vpath; index++) {
1808 status = vxge_hw_vpath_rts_rth_set(
1809 vdev->vpaths[index].handle,
1810 vdev->config.rth_algorithm,
1811 &hash_types,
1812 vdev->config.rth_bkt_sz);
1813
1814 if (status != VXGE_HW_OK) {
1815 vxge_debug_init(VXGE_ERR,
1816 "RTH configuration failed for vpath:%d",
1817 vdev->vpaths[index].device_id);
1818 return status;
1819 }
1820 }
1821
1822 return status;
1823}
1824
1825int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1826{
1827 struct vxge_mac_addrs *new_mac_entry;
1828 u8 *mac_address = NULL;
1829
1830 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1831 return TRUE;
1832
1833 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1834 if (!new_mac_entry) {
1835 vxge_debug_mem(VXGE_ERR,
1836 "%s: memory allocation failed",
1837 VXGE_DRIVER_NAME);
1838 return FALSE;
1839 }
1840
1841 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1842
1843 /* Copy the new mac address to the list */
1844 mac_address = (u8 *)&new_mac_entry->macaddr;
1845 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1846
1847 new_mac_entry->state = mac->state;
1848 vpath->mac_addr_cnt++;
1849
1850 /* Is this a multicast address */
1851 if (0x01 & mac->macaddr[0])
1852 vpath->mcast_addr_cnt++;
1853
1854 return TRUE;
1855}
1856
1857/* Add a mac address to DA table */
1858enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1859{
1860 enum vxge_hw_status status = VXGE_HW_OK;
1861 struct vxge_vpath *vpath;
1862 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1863
1864 if (0x01 & mac->macaddr[0]) /* multicast address */
1865 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1866 else
1867 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1868
1869 vpath = &vdev->vpaths[mac->vpath_no];
1870 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1871 mac->macmask, duplicate_mode);
1872 if (status != VXGE_HW_OK) {
1873 vxge_debug_init(VXGE_ERR,
1874 "DA config add entry failed for vpath:%d",
1875 vpath->device_id);
1876 } else
1877 if (FALSE == vxge_mac_list_add(vpath, mac))
1878 status = -EPERM;
1879
1880 return status;
1881}
1882
1883int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1884{
1885 struct list_head *entry, *next;
1886 u64 del_mac = 0;
1887 u8 *mac_address = (u8 *) (&del_mac);
1888
1889 /* Copy the mac address to delete from the list */
1890 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1891
1892 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1893 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1894 list_del(entry);
1895 kfree((struct vxge_mac_addrs *)entry);
1896 vpath->mac_addr_cnt--;
1897
1898 /* Is this a multicast address */
1899 if (0x01 & mac->macaddr[0])
1900 vpath->mcast_addr_cnt--;
1901 return TRUE;
1902 }
1903 }
1904
1905 return FALSE;
1906}
1907/* delete a mac address from DA table */
1908enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1909{
1910 enum vxge_hw_status status = VXGE_HW_OK;
1911 struct vxge_vpath *vpath;
1912
1913 vpath = &vdev->vpaths[mac->vpath_no];
1914 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1915 mac->macmask);
1916 if (status != VXGE_HW_OK) {
1917 vxge_debug_init(VXGE_ERR,
1918 "DA config delete entry failed for vpath:%d",
1919 vpath->device_id);
1920 } else
1921 vxge_mac_list_del(vpath, mac);
1922 return status;
1923}
1924
1925/* list all mac addresses from DA table */
1926enum vxge_hw_status
1927static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1928 struct macInfo *mac)
1929{
1930 enum vxge_hw_status status = VXGE_HW_OK;
1931 unsigned char macmask[ETH_ALEN];
1932 unsigned char macaddr[ETH_ALEN];
1933
1934 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1935 macaddr, macmask);
1936 if (status != VXGE_HW_OK) {
1937 vxge_debug_init(VXGE_ERR,
1938 "DA config list entry failed for vpath:%d",
1939 vpath->device_id);
1940 return status;
1941 }
1942
1943 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1944
1945 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1946 macaddr, macmask);
1947 if (status != VXGE_HW_OK)
1948 break;
1949 }
1950
1951 return status;
1952}
1953
1954/* Store all vlan ids from the list to the vid table */
1955enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1956{
1957 enum vxge_hw_status status = VXGE_HW_OK;
1958 struct vxgedev *vdev = vpath->vdev;
1959 u16 vid;
1960
1961 if (vdev->vlgrp && vpath->is_open) {
1962
1963 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1964 if (!vlan_group_get_device(vdev->vlgrp, vid))
1965 continue;
1966 /* Add these vlan to the vid table */
1967 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1968 }
1969 }
1970
1971 return status;
1972}
1973
1974/* Store all mac addresses from the list to the DA table */
1975enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1976{
1977 enum vxge_hw_status status = VXGE_HW_OK;
1978 struct macInfo mac_info;
1979 u8 *mac_address = NULL;
1980 struct list_head *entry, *next;
1981
1982 memset(&mac_info, 0, sizeof(struct macInfo));
1983
1984 if (vpath->is_open) {
1985
1986 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1987 mac_address =
1988 (u8 *)&
1989 ((struct vxge_mac_addrs *)entry)->macaddr;
1990 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1991 ((struct vxge_mac_addrs *)entry)->state =
1992 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1993 /* does this mac address already exist in da table? */
1994 status = vxge_search_mac_addr_in_da_table(vpath,
1995 &mac_info);
1996 if (status != VXGE_HW_OK) {
1997 /* Add this mac address to the DA table */
1998 status = vxge_hw_vpath_mac_addr_add(
1999 vpath->handle, mac_info.macaddr,
2000 mac_info.macmask,
2001 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
2002 if (status != VXGE_HW_OK) {
2003 vxge_debug_init(VXGE_ERR,
2004 "DA add entry failed for vpath:%d",
2005 vpath->device_id);
2006 ((struct vxge_mac_addrs *)entry)->state
2007 = VXGE_LL_MAC_ADDR_IN_LIST;
2008 }
2009 }
2010 }
2011 }
2012
2013 return status;
2014}
2015
2016/* reset vpaths */
2017enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2018{
2019 int i;
2020 enum vxge_hw_status status = VXGE_HW_OK;
2021
2022 for (i = 0; i < vdev->no_of_vpath; i++)
2023 if (vdev->vpaths[i].handle) {
2024 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
2025 == VXGE_HW_OK) {
2026 if (is_vxge_card_up(vdev) &&
2027 vxge_hw_vpath_recover_from_reset(
2028 vdev->vpaths[i].handle)
2029 != VXGE_HW_OK) {
2030 vxge_debug_init(VXGE_ERR,
2031 "vxge_hw_vpath_recover_"
2032 "from_reset failed for vpath: "
2033 "%d", i);
2034 return status;
2035 }
2036 } else {
2037 vxge_debug_init(VXGE_ERR,
2038 "vxge_hw_vpath_reset failed for "
2039 "vpath:%d", i);
2040 return status;
2041 }
2042 }
2043 return status;
2044}
2045
2046/* close vpaths */
2047void vxge_close_vpaths(struct vxgedev *vdev, int index)
2048{
2049 int i;
2050 for (i = index; i < vdev->no_of_vpath; i++) {
2051 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
2052 vxge_hw_vpath_close(vdev->vpaths[i].handle);
2053 vdev->stats.vpaths_open--;
2054 }
2055 vdev->vpaths[i].is_open = 0;
2056 vdev->vpaths[i].handle = NULL;
2057 }
2058}
2059
2060/* open vpaths */
2061int vxge_open_vpaths(struct vxgedev *vdev)
2062{
2063 enum vxge_hw_status status;
2064 int i;
2065 u32 vp_id = 0;
2066 struct vxge_hw_vpath_attr attr;
2067
2068 for (i = 0; i < vdev->no_of_vpath; i++) {
2069 vxge_assert(vdev->vpaths[i].is_configured);
2070 attr.vp_id = vdev->vpaths[i].device_id;
2071 attr.fifo_attr.callback = vxge_xmit_compl;
2072 attr.fifo_attr.txdl_term = vxge_tx_term;
2073 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2074 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
2075
2076 attr.ring_attr.callback = vxge_rx_1b_compl;
2077 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2078 attr.ring_attr.rxd_term = vxge_rx_term;
2079 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2080 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
2081
2082 vdev->vpaths[i].ring.ndev = vdev->ndev;
2083 vdev->vpaths[i].ring.pdev = vdev->pdev;
2084 status = vxge_hw_vpath_open(vdev->devh, &attr,
2085 &(vdev->vpaths[i].handle));
2086 if (status == VXGE_HW_OK) {
2087 vdev->vpaths[i].fifo.handle =
2088 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2089 vdev->vpaths[i].ring.handle =
2090 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2091 vdev->vpaths[i].fifo.tx_steering_type =
2092 vdev->config.tx_steering_type;
2093 vdev->vpaths[i].fifo.ndev = vdev->ndev;
2094 vdev->vpaths[i].fifo.pdev = vdev->pdev;
2095 vdev->vpaths[i].fifo.indicate_max_pkts =
2096 vdev->config.fifo_indicate_max_pkts;
2097 vdev->vpaths[i].ring.rx_vector_no = 0;
2098 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
2099 vdev->vpaths[i].is_open = 1;
2100 vdev->vp_handles[i] = vdev->vpaths[i].handle;
2101 vdev->vpaths[i].ring.gro_enable =
2102 vdev->config.gro_enable;
2103 vdev->vpaths[i].ring.vlan_tag_strip =
2104 vdev->vlan_tag_strip;
2105 vdev->stats.vpaths_open++;
2106 } else {
2107 vdev->stats.vpath_open_fail++;
2108 vxge_debug_init(VXGE_ERR,
2109 "%s: vpath: %d failed to open "
2110 "with status: %d",
2111 vdev->ndev->name, vdev->vpaths[i].device_id,
2112 status);
2113 vxge_close_vpaths(vdev, 0);
2114 return -EPERM;
2115 }
2116
2117 vp_id =
2118 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
2119 vpath->vp_id;
2120 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2121 }
2122 return VXGE_HW_OK;
2123}
2124
2125/*
2126 * vxge_isr_napi
2127 * @irq: the irq of the device.
2128 * @dev_id: a void pointer to the hldev structure of the Titan device
2129 * @ptregs: pointer to the registers pushed on the stack.
2130 *
2131 * This function is the ISR handler of the device when napi is enabled. It
2132 * identifies the reason for the interrupt and calls the relevant service
2133 * routines.
2134 */
2135static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2136{
2137 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)dev_id;
2138 struct vxgedev *vdev;
2139 struct net_device *dev;
2140 u64 reason;
2141 enum vxge_hw_status status;
2142
2143 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2144
2145 dev = hldev->ndev;
2146 vdev = netdev_priv(dev);
2147
2148 if (pci_channel_offline(vdev->pdev))
2149 return IRQ_NONE;
2150
2151 if (unlikely(!is_vxge_card_up(vdev)))
2152 return IRQ_NONE;
2153
2154 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2155 &reason);
2156 if (status == VXGE_HW_OK) {
2157 vxge_hw_device_mask_all(hldev);
2158
2159 if (reason &
2160 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2161 vdev->vpaths_deployed >>
2162 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2163
2164 vxge_hw_device_clear_tx_rx(hldev);
2165 napi_schedule(&vdev->napi);
2166 vxge_debug_intr(VXGE_TRACE,
2167 "%s:%d Exiting...", __func__, __LINE__);
2168 return IRQ_HANDLED;
2169 } else
2170 vxge_hw_device_unmask_all(hldev);
2171 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2172 (status == VXGE_HW_ERR_CRITICAL) ||
2173 (status == VXGE_HW_ERR_FIFO))) {
2174 vxge_hw_device_mask_all(hldev);
2175 vxge_hw_device_flush_io(hldev);
2176 return IRQ_HANDLED;
2177 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2178 return IRQ_HANDLED;
2179
2180 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2181 return IRQ_NONE;
2182}
2183
2184#ifdef CONFIG_PCI_MSI
2185
2186static irqreturn_t
2187vxge_tx_msix_handle(int irq, void *dev_id)
2188{
2189 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2190
2191 VXGE_COMPLETE_VPATH_TX(fifo);
2192
2193 return IRQ_HANDLED;
2194}
2195
2196static irqreturn_t
2197vxge_rx_msix_napi_handle(int irq, void *dev_id)
2198{
2199 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2200
2201 /* MSIX_IDX for Rx is 1 */
2202 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2203 ring->rx_vector_no);
2204
2205 napi_schedule(&ring->napi);
2206 return IRQ_HANDLED;
2207}
2208
2209static irqreturn_t
2210vxge_alarm_msix_handle(int irq, void *dev_id)
2211{
2212 int i;
2213 enum vxge_hw_status status;
2214 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2215 struct vxgedev *vdev = vpath->vdev;
2216 int alarm_msix_id =
2217 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2218
2219 for (i = 0; i < vdev->no_of_vpath; i++) {
2220 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
2221 alarm_msix_id);
2222
2223 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2224 vdev->exec_mode);
2225 if (status == VXGE_HW_OK) {
2226
2227 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2228 alarm_msix_id);
2229 continue;
2230 }
2231 vxge_debug_intr(VXGE_ERR,
2232 "%s: vxge_hw_vpath_alarm_process failed %x ",
2233 VXGE_DRIVER_NAME, status);
2234 }
2235 return IRQ_HANDLED;
2236}
2237
2238static int vxge_alloc_msix(struct vxgedev *vdev)
2239{
2240 int j, i, ret = 0;
2241 int intr_cnt = 0;
2242 int alarm_msix_id = 0, msix_intr_vect = 0;
2243 vdev->intr_cnt = 0;
2244
2245 /* Tx/Rx MSIX Vectors count */
2246 vdev->intr_cnt = vdev->no_of_vpath * 2;
2247
2248 /* Alarm MSIX Vectors count */
2249 vdev->intr_cnt++;
2250
2251 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2252 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2253 GFP_KERNEL);
2254 if (!vdev->entries) {
2255 vxge_debug_init(VXGE_ERR,
2256 "%s: memory allocation failed",
2257 VXGE_DRIVER_NAME);
2258 return -ENOMEM;
2259 }
2260
2261 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
2262 GFP_KERNEL);
2263 if (!vdev->vxge_entries) {
2264 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2265 VXGE_DRIVER_NAME);
2266 kfree(vdev->entries);
2267 return -ENOMEM;
2268 }
2269
2270 /* Last vector in the list is used for alarm */
2271 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2272 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2273
2274 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2275
2276 /* Initialize the fifo vector */
2277 vdev->entries[j].entry = msix_intr_vect;
2278 vdev->vxge_entries[j].entry = msix_intr_vect;
2279 vdev->vxge_entries[j].in_use = 0;
2280 j++;
2281
2282 /* Initialize the ring vector */
2283 vdev->entries[j].entry = msix_intr_vect + 1;
2284 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2285 vdev->vxge_entries[j].in_use = 0;
2286 j++;
2287 }
2288
2289 /* Initialize the alarm vector */
2290 vdev->entries[j].entry = alarm_msix_id;
2291 vdev->vxge_entries[j].entry = alarm_msix_id;
2292 vdev->vxge_entries[j].in_use = 0;
2293
2294 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2295 /* if driver request exceeeds available irq's, request with a small
2296 * number.
2297 */
2298 if (ret > 0) {
2299 vxge_debug_init(VXGE_ERR,
2300 "%s: MSI-X enable failed for %d vectors, available: %d",
2301 VXGE_DRIVER_NAME, intr_cnt, ret);
2302 vdev->max_vpath_supported = vdev->no_of_vpath;
2303 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2304
2305 /* Reset the alarm vector setting */
2306 vdev->entries[j].entry = 0;
2307 vdev->vxge_entries[j].entry = 0;
2308
2309 /* Initialize the alarm vector with new setting */
2310 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2311 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2312 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2313
2314 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2315 if (!ret)
2316 vxge_debug_init(VXGE_ERR,
2317 "%s: MSI-X enabled for %d vectors",
2318 VXGE_DRIVER_NAME, intr_cnt);
2319 }
2320
2321 if (ret) {
2322 vxge_debug_init(VXGE_ERR,
2323 "%s: MSI-X enable failed for %d vectors, ret: %d",
2324 VXGE_DRIVER_NAME, intr_cnt, ret);
2325 kfree(vdev->entries);
2326 kfree(vdev->vxge_entries);
2327 vdev->entries = NULL;
2328 vdev->vxge_entries = NULL;
2329 return -ENODEV;
2330 }
2331 return 0;
2332}
2333
2334static int vxge_enable_msix(struct vxgedev *vdev)
2335{
2336
2337 int i, ret = 0;
2338 enum vxge_hw_status status;
2339 /* 0 - Tx, 1 - Rx */
2340 int tim_msix_id[4];
2341 int alarm_msix_id = 0, msix_intr_vect = 0;;
2342 vdev->intr_cnt = 0;
2343
2344 /* allocate msix vectors */
2345 ret = vxge_alloc_msix(vdev);
2346 if (!ret) {
2347 /* Last vector in the list is used for alarm */
2348 alarm_msix_id =
2349 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2350 for (i = 0; i < vdev->no_of_vpath; i++) {
2351
2352 /* If fifo or ring are not enabled
2353 the MSIX vector for that should be set to 0
2354 Hence initializeing this array to all 0s.
2355 */
2356 memset(tim_msix_id, 0, sizeof(tim_msix_id));
2357 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2358 tim_msix_id[0] = msix_intr_vect;
2359
2360 tim_msix_id[1] = msix_intr_vect + 1;
2361 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2362
2363 status = vxge_hw_vpath_msix_set(
2364 vdev->vpaths[i].handle,
2365 tim_msix_id, alarm_msix_id);
2366 if (status != VXGE_HW_OK) {
2367 vxge_debug_init(VXGE_ERR,
2368 "vxge_hw_vpath_msix_set "
2369 "failed with status : %x", status);
2370 kfree(vdev->entries);
2371 kfree(vdev->vxge_entries);
2372 pci_disable_msix(vdev->pdev);
2373 return -ENODEV;
2374 }
2375 }
2376 }
2377
2378 return ret;
2379}
2380
2381static void vxge_rem_msix_isr(struct vxgedev *vdev)
2382{
2383 int intr_cnt;
2384
2385 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
2386 intr_cnt++) {
2387 if (vdev->vxge_entries[intr_cnt].in_use) {
2388 synchronize_irq(vdev->entries[intr_cnt].vector);
2389 free_irq(vdev->entries[intr_cnt].vector,
2390 vdev->vxge_entries[intr_cnt].arg);
2391 vdev->vxge_entries[intr_cnt].in_use = 0;
2392 }
2393 }
2394
2395 kfree(vdev->entries);
2396 kfree(vdev->vxge_entries);
2397 vdev->entries = NULL;
2398 vdev->vxge_entries = NULL;
2399
2400 if (vdev->config.intr_type == MSI_X)
2401 pci_disable_msix(vdev->pdev);
2402}
2403#endif
2404
2405static void vxge_rem_isr(struct vxgedev *vdev)
2406{
2407 struct __vxge_hw_device *hldev;
2408 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2409
2410#ifdef CONFIG_PCI_MSI
2411 if (vdev->config.intr_type == MSI_X) {
2412 vxge_rem_msix_isr(vdev);
2413 } else
2414#endif
2415 if (vdev->config.intr_type == INTA) {
2416 synchronize_irq(vdev->pdev->irq);
2417 free_irq(vdev->pdev->irq, hldev);
2418 }
2419}
2420
2421static int vxge_add_isr(struct vxgedev *vdev)
2422{
2423 int ret = 0;
2424 struct __vxge_hw_device *hldev =
2425 (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2426#ifdef CONFIG_PCI_MSI
2427 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2428 u64 function_mode = vdev->config.device_hw_info.function_mode;
2429 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2430
2431 if (vdev->config.intr_type == MSI_X)
2432 ret = vxge_enable_msix(vdev);
2433
2434 if (ret) {
2435 vxge_debug_init(VXGE_ERR,
2436 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2437 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2438 test_and_set_bit(__VXGE_STATE_CARD_UP,
2439 &driver_config->inta_dev_open))
2440 return VXGE_HW_FAIL;
2441 else {
2442 vxge_debug_init(VXGE_ERR,
2443 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2444 vdev->config.intr_type = INTA;
2445 vxge_hw_device_set_intr_type(vdev->devh,
2446 VXGE_HW_INTR_MODE_IRQLINE);
2447 vxge_close_vpaths(vdev, 1);
2448 vdev->no_of_vpath = 1;
2449 vdev->stats.vpaths_open = 1;
2450 }
2451 }
2452
2453 if (vdev->config.intr_type == MSI_X) {
2454 for (intr_idx = 0;
2455 intr_idx < (vdev->no_of_vpath *
2456 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2457
2458 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2459 irq_req = 0;
2460
2461 switch (msix_idx) {
2462 case 0:
2463 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2464 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2465 vdev->ndev->name, pci_fun, vp_idx,
2466 vdev->entries[intr_cnt].entry);
2467 ret = request_irq(
2468 vdev->entries[intr_cnt].vector,
2469 vxge_tx_msix_handle, 0,
2470 vdev->desc[intr_cnt],
2471 &vdev->vpaths[vp_idx].fifo);
2472 vdev->vxge_entries[intr_cnt].arg =
2473 &vdev->vpaths[vp_idx].fifo;
2474 irq_req = 1;
2475 break;
2476 case 1:
2477 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2478 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2479 vdev->ndev->name, pci_fun, vp_idx,
2480 vdev->entries[intr_cnt].entry);
2481 ret = request_irq(
2482 vdev->entries[intr_cnt].vector,
2483 vxge_rx_msix_napi_handle,
2484 0,
2485 vdev->desc[intr_cnt],
2486 &vdev->vpaths[vp_idx].ring);
2487 vdev->vxge_entries[intr_cnt].arg =
2488 &vdev->vpaths[vp_idx].ring;
2489 irq_req = 1;
2490 break;
2491 }
2492
2493 if (ret) {
2494 vxge_debug_init(VXGE_ERR,
2495 "%s: MSIX - %d Registration failed",
2496 vdev->ndev->name, intr_cnt);
2497 vxge_rem_msix_isr(vdev);
2498 if ((function_mode ==
2499 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2500 test_and_set_bit(__VXGE_STATE_CARD_UP,
2501 &driver_config->inta_dev_open))
2502 return VXGE_HW_FAIL;
2503 else {
2504 vxge_hw_device_set_intr_type(
2505 vdev->devh,
2506 VXGE_HW_INTR_MODE_IRQLINE);
2507 vdev->config.intr_type = INTA;
2508 vxge_debug_init(VXGE_ERR,
2509 "%s: Defaulting to INTA"
2510 , vdev->ndev->name);
2511 vxge_close_vpaths(vdev, 1);
2512 vdev->no_of_vpath = 1;
2513 vdev->stats.vpaths_open = 1;
2514 goto INTA_MODE;
2515 }
2516 }
2517
2518 if (irq_req) {
2519 /* We requested for this msix interrupt */
2520 vdev->vxge_entries[intr_cnt].in_use = 1;
2521 vxge_hw_vpath_msix_unmask(
2522 vdev->vpaths[vp_idx].handle,
2523 intr_idx);
2524 intr_cnt++;
2525 }
2526
2527 /* Point to next vpath handler */
2528 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0)
2529 && (vp_idx < (vdev->no_of_vpath - 1)))
2530 vp_idx++;
2531 }
2532
2533 intr_cnt = vdev->max_vpath_supported * 2;
2534 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2535 "%s:vxge Alarm fn: %d MSI-X: %d",
2536 vdev->ndev->name, pci_fun,
2537 vdev->entries[intr_cnt].entry);
2538 /* For Alarm interrupts */
2539 ret = request_irq(vdev->entries[intr_cnt].vector,
2540 vxge_alarm_msix_handle, 0,
2541 vdev->desc[intr_cnt],
2542 &vdev->vpaths[vp_idx]);
2543 if (ret) {
2544 vxge_debug_init(VXGE_ERR,
2545 "%s: MSIX - %d Registration failed",
2546 vdev->ndev->name, intr_cnt);
2547 vxge_rem_msix_isr(vdev);
2548 if ((function_mode ==
2549 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2550 test_and_set_bit(__VXGE_STATE_CARD_UP,
2551 &driver_config->inta_dev_open))
2552 return VXGE_HW_FAIL;
2553 else {
2554 vxge_hw_device_set_intr_type(vdev->devh,
2555 VXGE_HW_INTR_MODE_IRQLINE);
2556 vdev->config.intr_type = INTA;
2557 vxge_debug_init(VXGE_ERR,
2558 "%s: Defaulting to INTA",
2559 vdev->ndev->name);
2560 vxge_close_vpaths(vdev, 1);
2561 vdev->no_of_vpath = 1;
2562 vdev->stats.vpaths_open = 1;
2563 goto INTA_MODE;
2564 }
2565 }
2566
2567 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2568 intr_idx - 2);
2569 vdev->vxge_entries[intr_cnt].in_use = 1;
2570 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
2571 }
2572INTA_MODE:
2573#endif
2574 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2575
2576 if (vdev->config.intr_type == INTA) {
2577 ret = request_irq((int) vdev->pdev->irq,
2578 vxge_isr_napi,
2579 IRQF_SHARED, vdev->desc[0], hldev);
2580 if (ret) {
2581 vxge_debug_init(VXGE_ERR,
2582 "%s %s-%d: ISR registration failed",
2583 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2584 return -ENODEV;
2585 }
2586 vxge_debug_init(VXGE_TRACE,
2587 "new %s-%d line allocated",
2588 "IRQ", vdev->pdev->irq);
2589 }
2590
2591 return VXGE_HW_OK;
2592}
2593
2594static void vxge_poll_vp_reset(unsigned long data)
2595{
2596 struct vxgedev *vdev = (struct vxgedev *)data;
2597 int i, j = 0;
2598
2599 for (i = 0; i < vdev->no_of_vpath; i++) {
2600 if (test_bit(i, &vdev->vp_reset)) {
2601 vxge_reset_vpath(vdev, i);
2602 j++;
2603 }
2604 }
2605 if (j && (vdev->config.intr_type != MSI_X)) {
2606 vxge_hw_device_unmask_all(vdev->devh);
2607 vxge_hw_device_flush_io(vdev->devh);
2608 }
2609
2610 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2611}
2612
2613static void vxge_poll_vp_lockup(unsigned long data)
2614{
2615 struct vxgedev *vdev = (struct vxgedev *)data;
2616 int i;
2617 struct vxge_ring *ring;
2618 enum vxge_hw_status status = VXGE_HW_OK;
2619
2620 for (i = 0; i < vdev->no_of_vpath; i++) {
2621 ring = &vdev->vpaths[i].ring;
2622 /* Did this vpath received any packets */
2623 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2624 status = vxge_hw_vpath_check_leak(ring->handle);
2625
2626 /* Did it received any packets last time */
2627 if ((VXGE_HW_FAIL == status) &&
2628 (VXGE_HW_FAIL == ring->last_status)) {
2629
2630 /* schedule vpath reset */
2631 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2632
2633 /* disable interrupts for this vpath */
2634 vxge_vpath_intr_disable(vdev, i);
2635
2636 /* stop the queue for this vpath */
2637 vxge_stop_tx_queue(&vdev->vpaths[i].
2638 fifo);
2639 continue;
2640 }
2641 }
2642 }
2643 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2644 ring->last_status = status;
2645 }
2646
2647 /* Check every 1 milli second */
2648 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2649}
2650
2651/**
2652 * vxge_open
2653 * @dev: pointer to the device structure.
2654 *
2655 * This function is the open entry point of the driver. It mainly calls a
2656 * function to allocate Rx buffers and inserts them into the buffer
2657 * descriptors and then enables the Rx part of the NIC.
2658 * Return value: '0' on success and an appropriate (-)ve integer as
2659 * defined in errno.h file on failure.
2660 */
2661int
2662vxge_open(struct net_device *dev)
2663{
2664 enum vxge_hw_status status;
2665 struct vxgedev *vdev;
2666 struct __vxge_hw_device *hldev;
2667 int ret = 0;
2668 int i;
2669 u64 val64, function_mode;
2670 vxge_debug_entryexit(VXGE_TRACE,
2671 "%s: %s:%d", dev->name, __func__, __LINE__);
2672
2673 vdev = (struct vxgedev *)netdev_priv(dev);
2674 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2675 function_mode = vdev->config.device_hw_info.function_mode;
2676
2677 /* make sure you have link off by default every time Nic is
2678 * initialized */
2679 netif_carrier_off(dev);
2680
2681 /* Check for another device already opn with INTA */
2682 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2683 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2684 ret = -EPERM;
2685 goto out0;
2686 }
2687
2688 /* Open VPATHs */
2689 status = vxge_open_vpaths(vdev);
2690 if (status != VXGE_HW_OK) {
2691 vxge_debug_init(VXGE_ERR,
2692 "%s: fatal: Vpath open failed", vdev->ndev->name);
2693 ret = -EPERM;
2694 goto out0;
2695 }
2696
2697 vdev->mtu = dev->mtu;
2698
2699 status = vxge_add_isr(vdev);
2700 if (status != VXGE_HW_OK) {
2701 vxge_debug_init(VXGE_ERR,
2702 "%s: fatal: ISR add failed", dev->name);
2703 ret = -EPERM;
2704 goto out1;
2705 }
2706
2707
2708 if (vdev->config.intr_type != MSI_X) {
2709 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2710 vdev->config.napi_weight);
2711 napi_enable(&vdev->napi);
2712 } else {
2713 for (i = 0; i < vdev->no_of_vpath; i++) {
2714 netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
2715 vxge_poll_msix, vdev->config.napi_weight);
2716 napi_enable(&vdev->vpaths[i].ring.napi);
2717 }
2718 }
2719
2720 /* configure RTH */
2721 if (vdev->config.rth_steering) {
2722 status = vxge_rth_configure(vdev);
2723 if (status != VXGE_HW_OK) {
2724 vxge_debug_init(VXGE_ERR,
2725 "%s: fatal: RTH configuration failed",
2726 dev->name);
2727 ret = -EPERM;
2728 goto out2;
2729 }
2730 }
2731
2732 for (i = 0; i < vdev->no_of_vpath; i++) {
2733 /* set initial mtu before enabling the device */
2734 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
2735 vdev->mtu);
2736 if (status != VXGE_HW_OK) {
2737 vxge_debug_init(VXGE_ERR,
2738 "%s: fatal: can not set new MTU", dev->name);
2739 ret = -EPERM;
2740 goto out2;
2741 }
2742 }
2743
2744 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2745 vxge_debug_init(vdev->level_trace,
2746 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2747 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2748
2749 /* Reprogram the DA table with populated mac addresses */
2750 for (i = 0; i < vdev->no_of_vpath; i++) {
2751 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
2752 vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
2753 }
2754
2755 /* Enable vpath to sniff all unicast/multicast traffic that not
2756 * addressed to them. We allow promiscous mode for PF only
2757 */
2758
2759 val64 = 0;
2760 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2761 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2762
2763 vxge_hw_mgmt_reg_write(vdev->devh,
2764 vxge_hw_mgmt_reg_type_mrpcim,
2765 0,
2766 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2767 rxmac_authorize_all_addr),
2768 val64);
2769
2770 vxge_hw_mgmt_reg_write(vdev->devh,
2771 vxge_hw_mgmt_reg_type_mrpcim,
2772 0,
2773 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2774 rxmac_authorize_all_vid),
2775 val64);
2776
2777 vxge_set_multicast(dev);
2778
2779 /* Enabling Bcast and mcast for all vpath */
2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2781 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
2782 if (status != VXGE_HW_OK)
2783 vxge_debug_init(VXGE_ERR,
2784 "%s : Can not enable bcast for vpath "
2785 "id %d", dev->name, i);
2786 if (vdev->config.addr_learn_en) {
2787 status =
2788 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
2789 if (status != VXGE_HW_OK)
2790 vxge_debug_init(VXGE_ERR,
2791 "%s : Can not enable mcast for vpath "
2792 "id %d", dev->name, i);
2793 }
2794 }
2795
2796 vxge_hw_device_setpause_data(vdev->devh, 0,
2797 vdev->config.tx_pause_enable,
2798 vdev->config.rx_pause_enable);
2799
2800 if (vdev->vp_reset_timer.function == NULL)
2801 vxge_os_timer(vdev->vp_reset_timer,
2802 vxge_poll_vp_reset, vdev, (HZ/2));
2803
2804 if (vdev->vp_lockup_timer.function == NULL)
2805 vxge_os_timer(vdev->vp_lockup_timer,
2806 vxge_poll_vp_lockup, vdev, (HZ/2));
2807
2808 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2809
2810 smp_wmb();
2811
2812 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2813 netif_carrier_on(vdev->ndev);
2814 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
2815 vdev->stats.link_up++;
2816 }
2817
2818 vxge_hw_device_intr_enable(vdev->devh);
2819
2820 smp_wmb();
2821
2822 for (i = 0; i < vdev->no_of_vpath; i++) {
2823 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
2824 smp_wmb();
2825 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
2826 }
2827
2828 vxge_start_all_tx_queue(vdev);
2829 goto out0;
2830
2831out2:
2832 vxge_rem_isr(vdev);
2833
2834 /* Disable napi */
2835 if (vdev->config.intr_type != MSI_X)
2836 napi_disable(&vdev->napi);
2837 else {
2838 for (i = 0; i < vdev->no_of_vpath; i++)
2839 napi_disable(&vdev->vpaths[i].ring.napi);
2840 }
2841
2842out1:
2843 vxge_close_vpaths(vdev, 0);
2844out0:
2845 vxge_debug_entryexit(VXGE_TRACE,
2846 "%s: %s:%d Exiting...",
2847 dev->name, __func__, __LINE__);
2848 return ret;
2849}
2850
2851/* Loop throught the mac address list and delete all the entries */
2852void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2853{
2854
2855 struct list_head *entry, *next;
2856 if (list_empty(&vpath->mac_addr_list))
2857 return;
2858
2859 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2860 list_del(entry);
2861 kfree((struct vxge_mac_addrs *)entry);
2862 }
2863}
2864
2865static void vxge_napi_del_all(struct vxgedev *vdev)
2866{
2867 int i;
2868 if (vdev->config.intr_type != MSI_X)
2869 netif_napi_del(&vdev->napi);
2870 else {
2871 for (i = 0; i < vdev->no_of_vpath; i++)
2872 netif_napi_del(&vdev->vpaths[i].ring.napi);
2873 }
2874 return;
2875}
2876
2877int do_vxge_close(struct net_device *dev, int do_io)
2878{
2879 enum vxge_hw_status status;
2880 struct vxgedev *vdev;
2881 struct __vxge_hw_device *hldev;
2882 int i;
2883 u64 val64, vpath_vector;
2884 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2885 dev->name, __func__, __LINE__);
2886
2887 vdev = (struct vxgedev *)netdev_priv(dev);
2888 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2889
2890 /* If vxge_handle_crit_err task is executing,
2891 * wait till it completes. */
2892 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2893 msleep(50);
2894
2895 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2896 if (do_io) {
2897 /* Put the vpath back in normal mode */
2898 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2899 status = vxge_hw_mgmt_reg_read(vdev->devh,
2900 vxge_hw_mgmt_reg_type_mrpcim,
2901 0,
2902 (ulong)offsetof(
2903 struct vxge_hw_mrpcim_reg,
2904 rts_mgr_cbasin_cfg),
2905 &val64);
2906
2907 if (status == VXGE_HW_OK) {
2908 val64 &= ~vpath_vector;
2909 status = vxge_hw_mgmt_reg_write(vdev->devh,
2910 vxge_hw_mgmt_reg_type_mrpcim,
2911 0,
2912 (ulong)offsetof(
2913 struct vxge_hw_mrpcim_reg,
2914 rts_mgr_cbasin_cfg),
2915 val64);
2916 }
2917
2918 /* Remove the function 0 from promiscous mode */
2919 vxge_hw_mgmt_reg_write(vdev->devh,
2920 vxge_hw_mgmt_reg_type_mrpcim,
2921 0,
2922 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2923 rxmac_authorize_all_addr),
2924 0);
2925
2926 vxge_hw_mgmt_reg_write(vdev->devh,
2927 vxge_hw_mgmt_reg_type_mrpcim,
2928 0,
2929 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2930 rxmac_authorize_all_vid),
2931 0);
2932
2933 smp_wmb();
2934 }
2935 del_timer_sync(&vdev->vp_lockup_timer);
2936
2937 del_timer_sync(&vdev->vp_reset_timer);
2938
2939 /* Disable napi */
2940 if (vdev->config.intr_type != MSI_X)
2941 napi_disable(&vdev->napi);
2942 else {
2943 for (i = 0; i < vdev->no_of_vpath; i++)
2944 napi_disable(&vdev->vpaths[i].ring.napi);
2945 }
2946
2947 netif_carrier_off(vdev->ndev);
2948 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
2949 vxge_stop_all_tx_queue(vdev);
2950
2951 /* Note that at this point xmit() is stopped by upper layer */
2952 if (do_io)
2953 vxge_hw_device_intr_disable(vdev->devh);
2954
2955 mdelay(1000);
2956
2957 vxge_rem_isr(vdev);
2958
2959 vxge_napi_del_all(vdev);
2960
2961 if (do_io)
2962 vxge_reset_all_vpaths(vdev);
2963
2964 vxge_close_vpaths(vdev, 0);
2965
2966 vxge_debug_entryexit(VXGE_TRACE,
2967 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2968
2969 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2970 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2971
2972 return 0;
2973}
2974
2975/**
2976 * vxge_close
2977 * @dev: device pointer.
2978 *
2979 * This is the stop entry point of the driver. It needs to undo exactly
2980 * whatever was done by the open entry point, thus it's usually referred to
2981 * as the close function.Among other things this function mainly stops the
2982 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2983 * Return value: '0' on success and an appropriate (-)ve integer as
2984 * defined in errno.h file on failure.
2985 */
2986int
2987vxge_close(struct net_device *dev)
2988{
2989 do_vxge_close(dev, 1);
2990 return 0;
2991}
2992
2993/**
2994 * vxge_change_mtu
2995 * @dev: net device pointer.
2996 * @new_mtu :the new MTU size for the device.
2997 *
2998 * A driver entry point to change MTU size for the device. Before changing
2999 * the MTU the device must be stopped.
3000 */
3001static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3002{
3003 struct vxgedev *vdev = netdev_priv(dev);
3004
3005 vxge_debug_entryexit(vdev->level_trace,
3006 "%s:%d", __func__, __LINE__);
3007 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3008 vxge_debug_init(vdev->level_err,
3009 "%s: mtu size is invalid", dev->name);
3010 return -EPERM;
3011 }
3012
3013 /* check if device is down already */
3014 if (unlikely(!is_vxge_card_up(vdev))) {
3015 /* just store new value, will use later on open() */
3016 dev->mtu = new_mtu;
3017 vxge_debug_init(vdev->level_err,
3018 "%s", "device is down on MTU change");
3019 return 0;
3020 }
3021
3022 vxge_debug_init(vdev->level_trace,
3023 "trying to apply new MTU %d", new_mtu);
3024
3025 if (vxge_close(dev))
3026 return -EIO;
3027
3028 dev->mtu = new_mtu;
3029 vdev->mtu = new_mtu;
3030
3031 if (vxge_open(dev))
3032 return -EIO;
3033
3034 vxge_debug_init(vdev->level_trace,
3035 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3036
3037 vxge_debug_entryexit(vdev->level_trace,
3038 "%s:%d Exiting...", __func__, __LINE__);
3039
3040 return 0;
3041}
3042
3043/**
3044 * vxge_get_stats
3045 * @dev: pointer to the device structure
3046 *
3047 * Updates the device statistics structure. This function updates the device
3048 * statistics structure in the net_device structure and returns a pointer
3049 * to the same.
3050 */
3051static struct net_device_stats *
3052vxge_get_stats(struct net_device *dev)
3053{
3054 struct vxgedev *vdev;
3055 struct net_device_stats *net_stats;
3056 int k;
3057
3058 vdev = netdev_priv(dev);
3059
3060 net_stats = &vdev->stats.net_stats;
3061
3062 memset(net_stats, 0, sizeof(struct net_device_stats));
3063
3064 for (k = 0; k < vdev->no_of_vpath; k++) {
3065 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
3066 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
3067 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
3068 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
3069 net_stats->rx_dropped +=
3070 vdev->vpaths[k].ring.stats.rx_dropped;
3071
3072 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
3073 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
3074 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
3075 }
3076
3077 return net_stats;
3078}
3079
3080/**
3081 * vxge_ioctl
3082 * @dev: Device pointer.
3083 * @ifr: An IOCTL specific structure, that can contain a pointer to
3084 * a proprietary structure used to pass information to the driver.
3085 * @cmd: This is used to distinguish between the different commands that
3086 * can be passed to the IOCTL functions.
3087 *
3088 * Entry point for the Ioctl.
3089 */
3090static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3091{
3092 return -EOPNOTSUPP;
3093}
3094
3095/**
3096 * vxge_tx_watchdog
3097 * @dev: pointer to net device structure
3098 *
3099 * Watchdog for transmit side.
3100 * This function is triggered if the Tx Queue is stopped
3101 * for a pre-defined amount of time when the Interface is still up.
3102 */
3103static void
3104vxge_tx_watchdog(struct net_device *dev)
3105{
3106 struct vxgedev *vdev;
3107
3108 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3109
3110 vdev = (struct vxgedev *)netdev_priv(dev);
3111
3112 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3113
3114 vxge_reset(vdev);
3115 vxge_debug_entryexit(VXGE_TRACE,
3116 "%s:%d Exiting...", __func__, __LINE__);
3117}
3118
3119/**
3120 * vxge_vlan_rx_register
3121 * @dev: net device pointer.
3122 * @grp: vlan group
3123 *
3124 * Vlan group registration
3125 */
3126static void
3127vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3128{
3129 struct vxgedev *vdev;
3130 struct vxge_vpath *vpath;
3131 int vp;
3132 u64 vid;
3133 enum vxge_hw_status status;
3134 int i;
3135
3136 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3137
3138 vdev = (struct vxgedev *)netdev_priv(dev);
3139
3140 vpath = &vdev->vpaths[0];
3141 if ((NULL == grp) && (vpath->is_open)) {
3142 /* Get the first vlan */
3143 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3144
3145 while (status == VXGE_HW_OK) {
3146
3147 /* Delete this vlan from the vid table */
3148 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3149 vpath = &vdev->vpaths[vp];
3150 if (!vpath->is_open)
3151 continue;
3152
3153 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3154 }
3155
3156 /* Get the next vlan to be deleted */
3157 vpath = &vdev->vpaths[0];
3158 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3159 }
3160 }
3161
3162 vdev->vlgrp = grp;
3163
3164 for (i = 0; i < vdev->no_of_vpath; i++) {
3165 if (vdev->vpaths[i].is_configured)
3166 vdev->vpaths[i].ring.vlgrp = grp;
3167 }
3168
3169 vxge_debug_entryexit(VXGE_TRACE,
3170 "%s:%d Exiting...", __func__, __LINE__);
3171}
3172
3173/**
3174 * vxge_vlan_rx_add_vid
3175 * @dev: net device pointer.
3176 * @vid: vid
3177 *
3178 * Add the vlan id to the devices vlan id table
3179 */
3180static void
3181vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3182{
3183 struct vxgedev *vdev;
3184 struct vxge_vpath *vpath;
3185 int vp_id;
3186
3187 vdev = (struct vxgedev *)netdev_priv(dev);
3188
3189 /* Add these vlan to the vid table */
3190 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3191 vpath = &vdev->vpaths[vp_id];
3192 if (!vpath->is_open)
3193 continue;
3194 vxge_hw_vpath_vid_add(vpath->handle, vid);
3195 }
3196}
3197
3198/**
3199 * vxge_vlan_rx_add_vid
3200 * @dev: net device pointer.
3201 * @vid: vid
3202 *
3203 * Remove the vlan id from the device's vlan id table
3204 */
3205static void
3206vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3207{
3208 struct vxgedev *vdev;
3209 struct vxge_vpath *vpath;
3210 int vp_id;
3211
3212 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3213
3214 vdev = (struct vxgedev *)netdev_priv(dev);
3215
3216 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3217
3218 /* Delete this vlan from the vid table */
3219 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3220 vpath = &vdev->vpaths[vp_id];
3221 if (!vpath->is_open)
3222 continue;
3223 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3224 }
3225 vxge_debug_entryexit(VXGE_TRACE,
3226 "%s:%d Exiting...", __func__, __LINE__);
3227}
3228
3229static const struct net_device_ops vxge_netdev_ops = {
3230 .ndo_open = vxge_open,
3231 .ndo_stop = vxge_close,
3232 .ndo_get_stats = vxge_get_stats,
3233 .ndo_start_xmit = vxge_xmit,
3234 .ndo_validate_addr = eth_validate_addr,
3235 .ndo_set_multicast_list = vxge_set_multicast,
3236
3237 .ndo_do_ioctl = vxge_ioctl,
3238
3239 .ndo_set_mac_address = vxge_set_mac_addr,
3240 .ndo_change_mtu = vxge_change_mtu,
3241 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3242 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3243 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3244
3245 .ndo_tx_timeout = vxge_tx_watchdog,
3246#ifdef CONFIG_NET_POLL_CONTROLLER
3247 .ndo_poll_controller = vxge_netpoll,
3248#endif
3249};
3250
3251int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3252 struct vxge_config *config,
3253 int high_dma, int no_of_vpath,
3254 struct vxgedev **vdev_out)
3255{
3256 struct net_device *ndev;
3257 enum vxge_hw_status status = VXGE_HW_OK;
3258 struct vxgedev *vdev;
3259 int i, ret = 0, no_of_queue = 1;
3260 u64 stat;
3261
3262 *vdev_out = NULL;
3263 if (config->tx_steering_type == TX_MULTIQ_STEERING)
3264 no_of_queue = no_of_vpath;
3265
3266 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3267 no_of_queue);
3268 if (ndev == NULL) {
3269 vxge_debug_init(
3270 vxge_hw_device_trace_level_get(hldev),
3271 "%s : device allocation failed", __func__);
3272 ret = -ENODEV;
3273 goto _out0;
3274 }
3275
3276 vxge_debug_entryexit(
3277 vxge_hw_device_trace_level_get(hldev),
3278 "%s: %s:%d Entering...",
3279 ndev->name, __func__, __LINE__);
3280
3281 vdev = netdev_priv(ndev);
3282 memset(vdev, 0, sizeof(struct vxgedev));
3283
3284 vdev->ndev = ndev;
3285 vdev->devh = hldev;
3286 vdev->pdev = hldev->pdev;
3287 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3288 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3289
3290 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3291
3292 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3293 NETIF_F_HW_VLAN_FILTER;
3294 /* Driver entry points */
3295 ndev->irq = vdev->pdev->irq;
3296 ndev->base_addr = (unsigned long) hldev->bar0;
3297
3298 ndev->netdev_ops = &vxge_netdev_ops;
3299
3300 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3301
3302 initialize_ethtool_ops(ndev);
3303
3304 /* Allocate memory for vpath */
3305 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3306 no_of_vpath, GFP_KERNEL);
3307 if (!vdev->vpaths) {
3308 vxge_debug_init(VXGE_ERR,
3309 "%s: vpath memory allocation failed",
3310 vdev->ndev->name);
3311 ret = -ENODEV;
3312 goto _out1;
3313 }
3314
3315 ndev->features |= NETIF_F_SG;
3316
3317 ndev->features |= NETIF_F_HW_CSUM;
3318 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3319 "%s : checksuming enabled", __func__);
3320
3321 if (high_dma) {
3322 ndev->features |= NETIF_F_HIGHDMA;
3323 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3324 "%s : using High DMA", __func__);
3325 }
3326
3327 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3328
3329 if (vdev->config.gro_enable)
3330 ndev->features |= NETIF_F_GRO;
3331
3332 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
3333 ndev->real_num_tx_queues = no_of_vpath;
3334
3335#ifdef NETIF_F_LLTX
3336 ndev->features |= NETIF_F_LLTX;
3337#endif
3338
3339 for (i = 0; i < no_of_vpath; i++)
3340 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3341
3342 if (register_netdev(ndev)) {
3343 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3344 "%s: %s : device registration failed!",
3345 ndev->name, __func__);
3346 ret = -ENODEV;
3347 goto _out2;
3348 }
3349
3350 /* Set the factory defined MAC address initially */
3351 ndev->addr_len = ETH_ALEN;
3352
3353 /* Make Link state as off at this point, when the Link change
3354 * interrupt comes the state will be automatically changed to
3355 * the right state.
3356 */
3357 netif_carrier_off(ndev);
3358
3359 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3360 "%s: Ethernet device registered",
3361 ndev->name);
3362
3363 *vdev_out = vdev;
3364
3365 /* Resetting the Device stats */
3366 status = vxge_hw_mrpcim_stats_access(
3367 hldev,
3368 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3369 0,
3370 0,
3371 &stat);
3372
3373 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3374 vxge_debug_init(
3375 vxge_hw_device_trace_level_get(hldev),
3376 "%s: device stats clear returns"
3377 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3378
3379 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3380 "%s: %s:%d Exiting...",
3381 ndev->name, __func__, __LINE__);
3382
3383 return ret;
3384_out2:
3385 kfree(vdev->vpaths);
3386_out1:
3387 free_netdev(ndev);
3388_out0:
3389 return ret;
3390}
3391
3392/*
3393 * vxge_device_unregister
3394 *
3395 * This function will unregister and free network device
3396 */
3397void
3398vxge_device_unregister(struct __vxge_hw_device *hldev)
3399{
3400 struct vxgedev *vdev;
3401 struct net_device *dev;
3402 char buf[IFNAMSIZ];
3403#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3404 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3405 u32 level_trace;
3406#endif
3407
3408 dev = hldev->ndev;
3409 vdev = netdev_priv(dev);
3410#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3411 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3412 level_trace = vdev->level_trace;
3413#endif
3414 vxge_debug_entryexit(level_trace,
3415 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3416
3417 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3418
3419 /* in 2.6 will call stop() if device is up */
3420 unregister_netdev(dev);
3421
3422 flush_scheduled_work();
3423
3424 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3425 vxge_debug_entryexit(level_trace,
3426 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3427}
3428
3429/*
3430 * vxge_callback_crit_err
3431 *
3432 * This function is called by the alarm handler in interrupt context.
3433 * Driver must analyze it based on the event type.
3434 */
3435static void
3436vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3437 enum vxge_hw_event type, u64 vp_id)
3438{
3439 struct net_device *dev = hldev->ndev;
3440 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3441 int vpath_idx;
3442
3443 vxge_debug_entryexit(vdev->level_trace,
3444 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3445
3446 /* Note: This event type should be used for device wide
3447 * indications only - Serious errors, Slot freeze and critical errors
3448 */
3449 vdev->cric_err_event = type;
3450
3451 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
3452 if (vdev->vpaths[vpath_idx].device_id == vp_id)
3453 break;
3454
3455 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3456 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3457 vxge_debug_init(VXGE_ERR,
3458 "%s: Slot is frozen", vdev->ndev->name);
3459 } else if (type == VXGE_HW_EVENT_SERR) {
3460 vxge_debug_init(VXGE_ERR,
3461 "%s: Encountered Serious Error",
3462 vdev->ndev->name);
3463 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3464 vxge_debug_init(VXGE_ERR,
3465 "%s: Encountered Critical Error",
3466 vdev->ndev->name);
3467 }
3468
3469 if ((type == VXGE_HW_EVENT_SERR) ||
3470 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3471 if (unlikely(vdev->exec_mode))
3472 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3473 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3474 vxge_hw_device_mask_all(hldev);
3475 if (unlikely(vdev->exec_mode))
3476 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3477 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3478 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3479
3480 if (unlikely(vdev->exec_mode))
3481 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3482 else {
3483 /* check if this vpath is already set for reset */
3484 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3485
3486 /* disable interrupts for this vpath */
3487 vxge_vpath_intr_disable(vdev, vpath_idx);
3488
3489 /* stop the queue for this vpath */
3490 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
3491 fifo);
3492 }
3493 }
3494 }
3495
3496 vxge_debug_entryexit(vdev->level_trace,
3497 "%s: %s:%d Exiting...",
3498 vdev->ndev->name, __func__, __LINE__);
3499}
3500
3501static void verify_bandwidth(void)
3502{
3503 int i, band_width, total = 0, equal_priority = 0;
3504
3505 /* 1. If user enters 0 for some fifo, give equal priority to all */
3506 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3507 if (bw_percentage[i] == 0) {
3508 equal_priority = 1;
3509 break;
3510 }
3511 }
3512
3513 if (!equal_priority) {
3514 /* 2. If sum exceeds 100, give equal priority to all */
3515 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3516 if (bw_percentage[i] == 0xFF)
3517 break;
3518
3519 total += bw_percentage[i];
3520 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3521 equal_priority = 1;
3522 break;
3523 }
3524 }
3525 }
3526
3527 if (!equal_priority) {
3528 /* Is all the bandwidth consumed? */
3529 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3530 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3531 /* Split rest of bw equally among next VPs*/
3532 band_width =
3533 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3534 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3535 if (band_width < 2) /* min of 2% */
3536 equal_priority = 1;
3537 else {
3538 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3539 i++)
3540 bw_percentage[i] =
3541 band_width;
3542 }
3543 }
3544 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3545 equal_priority = 1;
3546 }
3547
3548 if (equal_priority) {
3549 vxge_debug_init(VXGE_ERR,
3550 "%s: Assigning equal bandwidth to all the vpaths",
3551 VXGE_DRIVER_NAME);
3552 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3553 VXGE_HW_MAX_VIRTUAL_PATHS;
3554 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3555 bw_percentage[i] = bw_percentage[0];
3556 }
3557
3558 return;
3559}
3560
3561/*
3562 * Vpath configuration
3563 */
3564static int __devinit vxge_config_vpaths(
3565 struct vxge_hw_device_config *device_config,
3566 u64 vpath_mask, struct vxge_config *config_param)
3567{
3568 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3569 u32 txdl_size, txdl_per_memblock;
3570
3571 temp = driver_config->vpath_per_dev;
3572 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3573 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3574 /* No more CPU. Return vpath number as zero.*/
3575 if (driver_config->g_no_cpus == -1)
3576 return 0;
3577
3578 if (!driver_config->g_no_cpus)
3579 driver_config->g_no_cpus = num_online_cpus();
3580
3581 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3582 if (!driver_config->vpath_per_dev)
3583 driver_config->vpath_per_dev = 1;
3584
3585 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3586 if (!vxge_bVALn(vpath_mask, i, 1))
3587 continue;
3588 else
3589 default_no_vpath++;
3590 if (default_no_vpath < driver_config->vpath_per_dev)
3591 driver_config->vpath_per_dev = default_no_vpath;
3592
3593 driver_config->g_no_cpus = driver_config->g_no_cpus -
3594 (driver_config->vpath_per_dev * 2);
3595 if (driver_config->g_no_cpus <= 0)
3596 driver_config->g_no_cpus = -1;
3597 }
3598
3599 if (driver_config->vpath_per_dev == 1) {
3600 vxge_debug_ll_config(VXGE_TRACE,
3601 "%s: Disable tx and rx steering, "
3602 "as single vpath is configured", VXGE_DRIVER_NAME);
3603 config_param->rth_steering = NO_STEERING;
3604 config_param->tx_steering_type = NO_STEERING;
3605 device_config->rth_en = 0;
3606 }
3607
3608 /* configure bandwidth */
3609 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3610 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3611
3612 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3613 device_config->vp_config[i].vp_id = i;
3614 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3615 if (no_of_vpaths < driver_config->vpath_per_dev) {
3616 if (!vxge_bVALn(vpath_mask, i, 1)) {
3617 vxge_debug_ll_config(VXGE_TRACE,
3618 "%s: vpath: %d is not available",
3619 VXGE_DRIVER_NAME, i);
3620 continue;
3621 } else {
3622 vxge_debug_ll_config(VXGE_TRACE,
3623 "%s: vpath: %d available",
3624 VXGE_DRIVER_NAME, i);
3625 no_of_vpaths++;
3626 }
3627 } else {
3628 vxge_debug_ll_config(VXGE_TRACE,
3629 "%s: vpath: %d is not configured, "
3630 "max_config_vpath exceeded",
3631 VXGE_DRIVER_NAME, i);
3632 break;
3633 }
3634
3635 /* Configure Tx fifo's */
3636 device_config->vp_config[i].fifo.enable =
3637 VXGE_HW_FIFO_ENABLE;
3638 device_config->vp_config[i].fifo.max_frags =
3639 MAX_SKB_FRAGS;
3640 device_config->vp_config[i].fifo.memblock_size =
3641 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3642
3643 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd);
3644 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3645
3646 device_config->vp_config[i].fifo.fifo_blocks =
3647 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3648
3649 device_config->vp_config[i].fifo.intr =
3650 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3651
3652 /* Configure tti properties */
3653 device_config->vp_config[i].tti.intr_enable =
3654 VXGE_HW_TIM_INTR_ENABLE;
3655
3656 device_config->vp_config[i].tti.btimer_val =
3657 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3658
3659 device_config->vp_config[i].tti.timer_ac_en =
3660 VXGE_HW_TIM_TIMER_AC_ENABLE;
3661
3662 /* For msi-x with napi (each vector
3663 has a handler of its own) -
3664 Set CI to OFF for all vpaths */
3665 device_config->vp_config[i].tti.timer_ci_en =
3666 VXGE_HW_TIM_TIMER_CI_DISABLE;
3667
3668 device_config->vp_config[i].tti.timer_ri_en =
3669 VXGE_HW_TIM_TIMER_RI_DISABLE;
3670
3671 device_config->vp_config[i].tti.util_sel =
3672 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3673
3674 device_config->vp_config[i].tti.ltimer_val =
3675 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3676
3677 device_config->vp_config[i].tti.rtimer_val =
3678 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3679
3680 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3681 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3682 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3683 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3684 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3685 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3686 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3687
3688 /* Configure Rx rings */
3689 device_config->vp_config[i].ring.enable =
3690 VXGE_HW_RING_ENABLE;
3691
3692 device_config->vp_config[i].ring.ring_blocks =
3693 VXGE_HW_DEF_RING_BLOCKS;
3694 device_config->vp_config[i].ring.buffer_mode =
3695 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3696 device_config->vp_config[i].ring.rxds_limit =
3697 VXGE_HW_DEF_RING_RXDS_LIMIT;
3698 device_config->vp_config[i].ring.scatter_mode =
3699 VXGE_HW_RING_SCATTER_MODE_A;
3700
3701 /* Configure rti properties */
3702 device_config->vp_config[i].rti.intr_enable =
3703 VXGE_HW_TIM_INTR_ENABLE;
3704
3705 device_config->vp_config[i].rti.btimer_val =
3706 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3707
3708 device_config->vp_config[i].rti.timer_ac_en =
3709 VXGE_HW_TIM_TIMER_AC_ENABLE;
3710
3711 device_config->vp_config[i].rti.timer_ci_en =
3712 VXGE_HW_TIM_TIMER_CI_DISABLE;
3713
3714 device_config->vp_config[i].rti.timer_ri_en =
3715 VXGE_HW_TIM_TIMER_RI_DISABLE;
3716
3717 device_config->vp_config[i].rti.util_sel =
3718 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3719
3720 device_config->vp_config[i].rti.urange_a =
3721 RTI_RX_URANGE_A;
3722 device_config->vp_config[i].rti.urange_b =
3723 RTI_RX_URANGE_B;
3724 device_config->vp_config[i].rti.urange_c =
3725 RTI_RX_URANGE_C;
3726 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3727 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3728 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3729 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3730
3731 device_config->vp_config[i].rti.rtimer_val =
3732 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3733
3734 device_config->vp_config[i].rti.ltimer_val =
3735 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3736
3737 device_config->vp_config[i].rpa_strip_vlan_tag =
3738 vlan_tag_strip;
3739 }
3740
3741 driver_config->vpath_per_dev = temp;
3742 return no_of_vpaths;
3743}
3744
3745/* initialize device configuratrions */
3746static void __devinit vxge_device_config_init(
3747 struct vxge_hw_device_config *device_config,
3748 int *intr_type)
3749{
3750 /* Used for CQRQ/SRQ. */
3751 device_config->dma_blockpool_initial =
3752 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3753
3754 device_config->dma_blockpool_max =
3755 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3756
3757 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3758 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3759
3760#ifndef CONFIG_PCI_MSI
3761 vxge_debug_init(VXGE_ERR,
3762 "%s: This Kernel does not support "
3763 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3764 *intr_type = INTA;
3765#endif
3766
3767 /* Configure whether MSI-X or IRQL. */
3768 switch (*intr_type) {
3769 case INTA:
3770 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3771 break;
3772
3773 case MSI_X:
3774 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3775 break;
3776 }
3777 /* Timer period between device poll */
3778 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3779
3780 /* Configure mac based steering. */
3781 device_config->rts_mac_en = addr_learn_en;
3782
3783 /* Configure Vpaths */
3784 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3785
3786 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3787 __func__);
3788 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3789 device_config->dma_blockpool_initial);
3790 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3791 device_config->dma_blockpool_max);
3792 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3793 device_config->intr_mode);
3794 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3795 device_config->device_poll_millis);
3796 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3797 device_config->rts_mac_en);
3798 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3799 device_config->rth_en);
3800 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3801 device_config->rth_it_type);
3802}
3803
3804static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3805{
3806 int i;
3807
3808 vxge_debug_init(VXGE_TRACE,
3809 "%s: %d Vpath(s) opened",
3810 vdev->ndev->name, vdev->no_of_vpath);
3811
3812 switch (vdev->config.intr_type) {
3813 case INTA:
3814 vxge_debug_init(VXGE_TRACE,
3815 "%s: Interrupt type INTA", vdev->ndev->name);
3816 break;
3817
3818 case MSI_X:
3819 vxge_debug_init(VXGE_TRACE,
3820 "%s: Interrupt type MSI-X", vdev->ndev->name);
3821 break;
3822 }
3823
3824 if (vdev->config.rth_steering) {
3825 vxge_debug_init(VXGE_TRACE,
3826 "%s: RTH steering enabled for TCP_IPV4",
3827 vdev->ndev->name);
3828 } else {
3829 vxge_debug_init(VXGE_TRACE,
3830 "%s: RTH steering disabled", vdev->ndev->name);
3831 }
3832
3833 switch (vdev->config.tx_steering_type) {
3834 case NO_STEERING:
3835 vxge_debug_init(VXGE_TRACE,
3836 "%s: Tx steering disabled", vdev->ndev->name);
3837 break;
3838 case TX_PRIORITY_STEERING:
3839 vxge_debug_init(VXGE_TRACE,
3840 "%s: Unsupported tx steering option",
3841 vdev->ndev->name);
3842 vxge_debug_init(VXGE_TRACE,
3843 "%s: Tx steering disabled", vdev->ndev->name);
3844 vdev->config.tx_steering_type = 0;
3845 break;
3846 case TX_VLAN_STEERING:
3847 vxge_debug_init(VXGE_TRACE,
3848 "%s: Unsupported tx steering option",
3849 vdev->ndev->name);
3850 vxge_debug_init(VXGE_TRACE,
3851 "%s: Tx steering disabled", vdev->ndev->name);
3852 vdev->config.tx_steering_type = 0;
3853 break;
3854 case TX_MULTIQ_STEERING:
3855 vxge_debug_init(VXGE_TRACE,
3856 "%s: Tx multiqueue steering enabled",
3857 vdev->ndev->name);
3858 break;
3859 case TX_PORT_STEERING:
3860 vxge_debug_init(VXGE_TRACE,
3861 "%s: Tx port steering enabled",
3862 vdev->ndev->name);
3863 break;
3864 default:
3865 vxge_debug_init(VXGE_ERR,
3866 "%s: Unsupported tx steering type",
3867 vdev->ndev->name);
3868 vxge_debug_init(VXGE_TRACE,
3869 "%s: Tx steering disabled", vdev->ndev->name);
3870 vdev->config.tx_steering_type = 0;
3871 }
3872
3873 if (vdev->config.gro_enable) {
3874 vxge_debug_init(VXGE_ERR,
3875 "%s: Generic receive offload enabled",
3876 vdev->ndev->name);
3877 } else
3878 vxge_debug_init(VXGE_TRACE,
3879 "%s: Generic receive offload disabled",
3880 vdev->ndev->name);
3881
3882 if (vdev->config.addr_learn_en)
3883 vxge_debug_init(VXGE_TRACE,
3884 "%s: MAC Address learning enabled", vdev->ndev->name);
3885
3886 vxge_debug_init(VXGE_TRACE,
3887 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3888
3889 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3890 if (!vxge_bVALn(vpath_mask, i, 1))
3891 continue;
3892 vxge_debug_ll_config(VXGE_TRACE,
3893 "%s: MTU size - %d", vdev->ndev->name,
3894 ((struct __vxge_hw_device *)(vdev->devh))->
3895 config.vp_config[i].mtu);
3896 vxge_debug_init(VXGE_TRACE,
3897 "%s: VLAN tag stripping %s", vdev->ndev->name,
3898 ((struct __vxge_hw_device *)(vdev->devh))->
3899 config.vp_config[i].rpa_strip_vlan_tag
3900 ? "Enabled" : "Disabled");
3901 vxge_debug_init(VXGE_TRACE,
3902 "%s: Ring blocks : %d", vdev->ndev->name,
3903 ((struct __vxge_hw_device *)(vdev->devh))->
3904 config.vp_config[i].ring.ring_blocks);
3905 vxge_debug_init(VXGE_TRACE,
3906 "%s: Fifo blocks : %d", vdev->ndev->name,
3907 ((struct __vxge_hw_device *)(vdev->devh))->
3908 config.vp_config[i].fifo.fifo_blocks);
3909 vxge_debug_ll_config(VXGE_TRACE,
3910 "%s: Max frags : %d", vdev->ndev->name,
3911 ((struct __vxge_hw_device *)(vdev->devh))->
3912 config.vp_config[i].fifo.max_frags);
3913 break;
3914 }
3915}
3916
3917#ifdef CONFIG_PM
3918/**
3919 * vxge_pm_suspend - vxge power management suspend entry point
3920 *
3921 */
3922static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3923{
3924 return -ENOSYS;
3925}
3926/**
3927 * vxge_pm_resume - vxge power management resume entry point
3928 *
3929 */
3930static int vxge_pm_resume(struct pci_dev *pdev)
3931{
3932 return -ENOSYS;
3933}
3934
3935#endif
3936
3937/**
3938 * vxge_io_error_detected - called when PCI error is detected
3939 * @pdev: Pointer to PCI device
3940 * @state: The current pci connection state
3941 *
3942 * This function is called after a PCI bus error affecting
3943 * this device has been detected.
3944 */
3945static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3946 pci_channel_state_t state)
3947{
3948 struct __vxge_hw_device *hldev =
3949 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3950 struct net_device *netdev = hldev->ndev;
3951
3952 netif_device_detach(netdev);
3953
3954 if (netif_running(netdev)) {
3955 /* Bring down the card, while avoiding PCI I/O */
3956 do_vxge_close(netdev, 0);
3957 }
3958
3959 pci_disable_device(pdev);
3960
3961 return PCI_ERS_RESULT_NEED_RESET;
3962}
3963
3964/**
3965 * vxge_io_slot_reset - called after the pci bus has been reset.
3966 * @pdev: Pointer to PCI device
3967 *
3968 * Restart the card from scratch, as if from a cold-boot.
3969 * At this point, the card has exprienced a hard reset,
3970 * followed by fixups by BIOS, and has its config space
3971 * set up identically to what it was at cold boot.
3972 */
3973static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3974{
3975 struct __vxge_hw_device *hldev =
3976 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3977 struct net_device *netdev = hldev->ndev;
3978
3979 struct vxgedev *vdev = netdev_priv(netdev);
3980
3981 if (pci_enable_device(pdev)) {
3982 printk(KERN_ERR "%s: "
3983 "Cannot re-enable device after reset\n",
3984 VXGE_DRIVER_NAME);
3985 return PCI_ERS_RESULT_DISCONNECT;
3986 }
3987
3988 pci_set_master(pdev);
3989 vxge_reset(vdev);
3990
3991 return PCI_ERS_RESULT_RECOVERED;
3992}
3993
3994/**
3995 * vxge_io_resume - called when traffic can start flowing again.
3996 * @pdev: Pointer to PCI device
3997 *
3998 * This callback is called when the error recovery driver tells
3999 * us that its OK to resume normal operation.
4000 */
4001static void vxge_io_resume(struct pci_dev *pdev)
4002{
4003 struct __vxge_hw_device *hldev =
4004 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4005 struct net_device *netdev = hldev->ndev;
4006
4007 if (netif_running(netdev)) {
4008 if (vxge_open(netdev)) {
4009 printk(KERN_ERR "%s: "
4010 "Can't bring device back up after reset\n",
4011 VXGE_DRIVER_NAME);
4012 return;
4013 }
4014 }
4015
4016 netif_device_attach(netdev);
4017}
4018
4019/**
4020 * vxge_probe
4021 * @pdev : structure containing the PCI related information of the device.
4022 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4023 * Description:
4024 * This function is called when a new PCI device gets detected and initializes
4025 * it.
4026 * Return value:
4027 * returns 0 on success and negative on failure.
4028 *
4029 */
4030static int __devinit
4031vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4032{
4033 struct __vxge_hw_device *hldev;
4034 enum vxge_hw_status status;
4035 int ret;
4036 int high_dma = 0;
4037 u64 vpath_mask = 0;
4038 struct vxgedev *vdev;
4039 struct vxge_config ll_config;
4040 struct vxge_hw_device_config *device_config = NULL;
4041 struct vxge_hw_device_attr attr;
4042 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4043 u8 *macaddr;
4044 struct vxge_mac_addrs *entry;
4045 static int bus = -1, device = -1;
4046 u8 new_device = 0;
4047
4048 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4049 attr.pdev = pdev;
4050
4051 if (bus != pdev->bus->number)
4052 new_device = 1;
4053 if (device != PCI_SLOT(pdev->devfn))
4054 new_device = 1;
4055
4056 bus = pdev->bus->number;
4057 device = PCI_SLOT(pdev->devfn);
4058
4059 if (new_device) {
4060 if (driver_config->config_dev_cnt &&
4061 (driver_config->config_dev_cnt !=
4062 driver_config->total_dev_cnt))
4063 vxge_debug_init(VXGE_ERR,
4064 "%s: Configured %d of %d devices",
4065 VXGE_DRIVER_NAME,
4066 driver_config->config_dev_cnt,
4067 driver_config->total_dev_cnt);
4068 driver_config->config_dev_cnt = 0;
4069 driver_config->total_dev_cnt = 0;
4070 driver_config->g_no_cpus = 0;
4071 driver_config->vpath_per_dev = max_config_vpath;
4072 }
4073
4074 driver_config->total_dev_cnt++;
4075 if (++driver_config->config_dev_cnt > max_config_dev) {
4076 ret = 0;
4077 goto _exit0;
4078 }
4079
4080 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4081 GFP_KERNEL);
4082 if (!device_config) {
4083 ret = -ENOMEM;
4084 vxge_debug_init(VXGE_ERR,
4085 "device_config : malloc failed %s %d",
4086 __FILE__, __LINE__);
4087 goto _exit0;
4088 }
4089
4090 memset(&ll_config, 0, sizeof(struct vxge_config));
4091 ll_config.tx_steering_type = TX_MULTIQ_STEERING;
4092 ll_config.intr_type = MSI_X;
4093 ll_config.napi_weight = NEW_NAPI_WEIGHT;
4094 ll_config.rth_steering = RTH_STEERING;
4095
4096 /* get the default configuration parameters */
4097 vxge_hw_device_config_default_get(device_config);
4098
4099 /* initialize configuration parameters */
4100 vxge_device_config_init(device_config, &ll_config.intr_type);
4101
4102 ret = pci_enable_device(pdev);
4103 if (ret) {
4104 vxge_debug_init(VXGE_ERR,
4105 "%s : can not enable PCI device", __func__);
4106 goto _exit0;
4107 }
4108
4109 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4110 vxge_debug_ll_config(VXGE_TRACE,
4111 "%s : using 64bit DMA", __func__);
4112
4113 high_dma = 1;
4114
4115 if (pci_set_consistent_dma_mask(pdev,
4116 0xffffffffffffffffULL)) {
4117 vxge_debug_init(VXGE_ERR,
4118 "%s : unable to obtain 64bit DMA for "
4119 "consistent allocations", __func__);
4120 ret = -ENOMEM;
4121 goto _exit1;
4122 }
4123 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4124 vxge_debug_ll_config(VXGE_TRACE,
4125 "%s : using 32bit DMA", __func__);
4126 } else {
4127 ret = -ENOMEM;
4128 goto _exit1;
4129 }
4130
4131 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4132 vxge_debug_init(VXGE_ERR,
4133 "%s : request regions failed", __func__);
4134 ret = -ENODEV;
4135 goto _exit1;
4136 }
4137
4138 pci_set_master(pdev);
4139
4140 attr.bar0 = pci_ioremap_bar(pdev, 0);
4141 if (!attr.bar0) {
4142 vxge_debug_init(VXGE_ERR,
4143 "%s : cannot remap io memory bar0", __func__);
4144 ret = -ENODEV;
4145 goto _exit2;
4146 }
4147 vxge_debug_ll_config(VXGE_TRACE,
4148 "pci ioremap bar0: %p:0x%llx",
4149 attr.bar0,
4150 (unsigned long long)pci_resource_start(pdev, 0));
4151
4152 attr.bar1 = pci_ioremap_bar(pdev, 2);
4153 if (!attr.bar1) {
4154 vxge_debug_init(VXGE_ERR,
4155 "%s : cannot remap io memory bar2", __func__);
4156 ret = -ENODEV;
4157 goto _exit3;
4158 }
4159 vxge_debug_ll_config(VXGE_TRACE,
4160 "pci ioremap bar1: %p:0x%llx",
4161 attr.bar1,
4162 (unsigned long long)pci_resource_start(pdev, 2));
4163
4164 status = vxge_hw_device_hw_info_get(attr.bar0,
4165 &ll_config.device_hw_info);
4166 if (status != VXGE_HW_OK) {
4167 vxge_debug_init(VXGE_ERR,
4168 "%s: Reading of hardware info failed."
4169 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4170 ret = -EINVAL;
4171 goto _exit4;
4172 }
4173
4174 if (ll_config.device_hw_info.fw_version.major !=
4175 VXGE_DRIVER_VERSION_MAJOR) {
4176 vxge_debug_init(VXGE_ERR,
4177 "FW Ver.(maj): %d not driver's expected version: %d",
4178 ll_config.device_hw_info.fw_version.major,
4179 VXGE_DRIVER_VERSION_MAJOR);
4180 ret = -EINVAL;
4181 goto _exit4;
4182 }
4183
4184 vpath_mask = ll_config.device_hw_info.vpath_mask;
4185 if (vpath_mask == 0) {
4186 vxge_debug_ll_config(VXGE_TRACE,
4187 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4188 ret = -EINVAL;
4189 goto _exit4;
4190 }
4191
4192 vxge_debug_ll_config(VXGE_TRACE,
4193 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4194 (unsigned long long)vpath_mask);
4195
4196 /* Check how many vpaths are available */
4197 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4198 if (!((vpath_mask) & vxge_mBIT(i)))
4199 continue;
4200 max_vpath_supported++;
4201 }
4202
4203 /*
4204 * Configure vpaths and get driver configured number of vpaths
4205 * which is less than or equal to the maximum vpaths per function.
4206 */
4207 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
4208 if (!no_of_vpath) {
4209 vxge_debug_ll_config(VXGE_ERR,
4210 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4211 ret = 0;
4212 goto _exit4;
4213 }
4214
4215 /* Setting driver callbacks */
4216 attr.uld_callbacks.link_up = vxge_callback_link_up;
4217 attr.uld_callbacks.link_down = vxge_callback_link_down;
4218 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4219
4220 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4221 if (status != VXGE_HW_OK) {
4222 vxge_debug_init(VXGE_ERR,
4223 "Failed to initialize device (%d)", status);
4224 ret = -EINVAL;
4225 goto _exit4;
4226 }
4227
4228 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4229
4230 /* set private device info */
4231 pci_set_drvdata(pdev, hldev);
4232
4233 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4234 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4235 ll_config.addr_learn_en = addr_learn_en;
4236 ll_config.rth_algorithm = RTH_ALG_JENKINS;
4237 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4238 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4239 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4240 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4241 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4242 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4243 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
4244 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4245 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4246
4247 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
4248 &vdev)) {
4249 ret = -EINVAL;
4250 goto _exit5;
4251 }
4252
4253 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4254 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4255 vxge_hw_device_trace_level_get(hldev));
4256
4257 /* set private HW device info */
4258 hldev->ndev = vdev->ndev;
4259 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4260 vdev->bar0 = attr.bar0;
4261 vdev->bar1 = attr.bar1;
4262 vdev->max_vpath_supported = max_vpath_supported;
4263 vdev->no_of_vpath = no_of_vpath;
4264
4265 /* Virtual Path count */
4266 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4267 if (!vxge_bVALn(vpath_mask, i, 1))
4268 continue;
4269 if (j >= vdev->no_of_vpath)
4270 break;
4271
4272 vdev->vpaths[j].is_configured = 1;
4273 vdev->vpaths[j].device_id = i;
4274 vdev->vpaths[j].fifo.driver_id = j;
4275 vdev->vpaths[j].ring.driver_id = j;
4276 vdev->vpaths[j].vdev = vdev;
4277 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4278 memcpy((u8 *)vdev->vpaths[j].macaddr,
4279 (u8 *)ll_config.device_hw_info.mac_addrs[i],
4280 ETH_ALEN);
4281
4282 /* Initialize the mac address list header */
4283 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4284
4285 vdev->vpaths[j].mac_addr_cnt = 0;
4286 vdev->vpaths[j].mcast_addr_cnt = 0;
4287 j++;
4288 }
4289 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4290 vdev->max_config_port = max_config_port;
4291
4292 vdev->vlan_tag_strip = vlan_tag_strip;
4293
4294 /* map the hashing selector table to the configured vpaths */
4295 for (i = 0; i < vdev->no_of_vpath; i++)
4296 vdev->vpath_selector[i] = vpath_selector[i];
4297
4298 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4299
4300 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4301 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4302 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4303
4304 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4305 vdev->ndev->name, ll_config.device_hw_info.serial_number);
4306
4307 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4308 vdev->ndev->name, ll_config.device_hw_info.part_number);
4309
4310 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4311 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4312
4313 vxge_debug_init(VXGE_TRACE,
4314 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
4315 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4316 macaddr[3], macaddr[4], macaddr[5]);
4317
4318 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4319 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4320
4321 vxge_debug_init(VXGE_TRACE,
4322 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4323 ll_config.device_hw_info.fw_version.version,
4324 ll_config.device_hw_info.fw_date.date);
4325
4326 vxge_print_parm(vdev, vpath_mask);
4327
4328 /* Store the fw version for ethttool option */
4329 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
4330 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4331 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4332
4333 /* Copy the station mac address to the list */
4334 for (i = 0; i < vdev->no_of_vpath; i++) {
4335 entry = (struct vxge_mac_addrs *)
4336 kzalloc(sizeof(struct vxge_mac_addrs),
4337 GFP_KERNEL);
4338 if (NULL == entry) {
4339 vxge_debug_init(VXGE_ERR,
4340 "%s: mac_addr_list : memory allocation failed",
4341 vdev->ndev->name);
4342 ret = -EPERM;
4343 goto _exit6;
4344 }
4345 macaddr = (u8 *)&entry->macaddr;
4346 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4347 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4348 vdev->vpaths[i].mac_addr_cnt = 1;
4349 }
4350
4351 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4352 vdev->ndev->name, __func__, __LINE__);
4353
4354 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4355 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4356 vxge_hw_device_trace_level_get(hldev));
4357
4358 return 0;
4359
4360_exit6:
4361 for (i = 0; i < vdev->no_of_vpath; i++)
4362 vxge_free_mac_add_list(&vdev->vpaths[i]);
4363
4364 vxge_device_unregister(hldev);
4365_exit5:
4366 vxge_hw_device_terminate(hldev);
4367_exit4:
4368 iounmap(attr.bar1);
4369_exit3:
4370 iounmap(attr.bar0);
4371_exit2:
4372 pci_release_regions(pdev);
4373_exit1:
4374 pci_disable_device(pdev);
4375_exit0:
4376 kfree(device_config);
4377 driver_config->config_dev_cnt--;
4378 pci_set_drvdata(pdev, NULL);
4379 return ret;
4380}
4381
4382/**
4383 * vxge_rem_nic - Free the PCI device
4384 * @pdev: structure containing the PCI related information of the device.
4385 * Description: This function is called by the Pci subsystem to release a
4386 * PCI device and free up all resource held up by the device.
4387 */
4388static void __devexit
4389vxge_remove(struct pci_dev *pdev)
4390{
4391 struct __vxge_hw_device *hldev;
4392 struct vxgedev *vdev = NULL;
4393 struct net_device *dev;
4394 int i = 0;
4395#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4396 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4397 u32 level_trace;
4398#endif
4399
4400 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4401
4402 if (hldev == NULL)
4403 return;
4404 dev = hldev->ndev;
4405 vdev = netdev_priv(dev);
4406
4407#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4408 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4409 level_trace = vdev->level_trace;
4410#endif
4411 vxge_debug_entryexit(level_trace,
4412 "%s:%d", __func__, __LINE__);
4413
4414 vxge_debug_init(level_trace,
4415 "%s : removing PCI device...", __func__);
4416 vxge_device_unregister(hldev);
4417
4418 for (i = 0; i < vdev->no_of_vpath; i++) {
4419 vxge_free_mac_add_list(&vdev->vpaths[i]);
4420 vdev->vpaths[i].mcast_addr_cnt = 0;
4421 vdev->vpaths[i].mac_addr_cnt = 0;
4422 }
4423
4424 kfree(vdev->vpaths);
4425
4426 iounmap(vdev->bar0);
4427 iounmap(vdev->bar1);
4428
4429 /* we are safe to free it now */
4430 free_netdev(dev);
4431
4432 vxge_debug_init(level_trace,
4433 "%s:%d Device unregistered", __func__, __LINE__);
4434
4435 vxge_hw_device_terminate(hldev);
4436
4437 pci_disable_device(pdev);
4438 pci_release_regions(pdev);
4439 pci_set_drvdata(pdev, NULL);
4440 vxge_debug_entryexit(level_trace,
4441 "%s:%d Exiting...", __func__, __LINE__);
4442}
4443
4444static struct pci_error_handlers vxge_err_handler = {
4445 .error_detected = vxge_io_error_detected,
4446 .slot_reset = vxge_io_slot_reset,
4447 .resume = vxge_io_resume,
4448};
4449
4450static struct pci_driver vxge_driver = {
4451 .name = VXGE_DRIVER_NAME,
4452 .id_table = vxge_id_table,
4453 .probe = vxge_probe,
4454 .remove = __devexit_p(vxge_remove),
4455#ifdef CONFIG_PM
4456 .suspend = vxge_pm_suspend,
4457 .resume = vxge_pm_resume,
4458#endif
4459 .err_handler = &vxge_err_handler,
4460};
4461
4462static int __init
4463vxge_starter(void)
4464{
4465 int ret = 0;
4466 char version[32];
4467 snprintf(version, 32, "%s", DRV_VERSION);
4468
4469 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
4470 VXGE_DRIVER_NAME);
4471 printk(KERN_CRIT "%s: Driver version: %s\n",
4472 VXGE_DRIVER_NAME, version);
4473
4474 verify_bandwidth();
4475
4476 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4477 if (!driver_config)
4478 return -ENOMEM;
4479
4480 ret = pci_register_driver(&vxge_driver);
4481
4482 if (driver_config->config_dev_cnt &&
4483 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4484 vxge_debug_init(VXGE_ERR,
4485 "%s: Configured %d of %d devices",
4486 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4487 driver_config->total_dev_cnt);
4488
4489 if (ret)
4490 kfree(driver_config);
4491
4492 return ret;
4493}
4494
4495static void __exit
4496vxge_closer(void)
4497{
4498 pci_unregister_driver(&vxge_driver);
4499 kfree(driver_config);
4500}
4501module_init(vxge_starter);
4502module_exit(vxge_closer);
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
new file mode 100644
index 000000000000..9704b2bd4320
--- /dev/null
+++ b/drivers/net/vxge/vxge-main.h
@@ -0,0 +1,557 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-main.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_MAIN_H
15#define VXGE_MAIN_H
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-version.h"
20#include <linux/list.h>
21
22#define VXGE_DRIVER_NAME "vxge"
23#define VXGE_DRIVER_VENDOR "Neterion, Inc"
24#define VXGE_DRIVER_VERSION_MAJOR 0
25
26#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
27 VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
28 VXGE_VERSION_FOR
29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_HW_RXSYNC_FREQ_CNT 4
35#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
36#define VXGE_LL_RX_COPY_THRESHOLD 256
37#define VXGE_DEF_FIFO_LENGTH 84
38
39#define NO_STEERING 0
40#define PORT_STEERING 0x1
41#define RTH_STEERING 0x2
42#define RX_TOS_STEERING 0x3
43#define RX_VLAN_STEERING 0x4
44#define RTH_BUCKET_SIZE 4
45
46#define TX_PRIORITY_STEERING 1
47#define TX_VLAN_STEERING 2
48#define TX_PORT_STEERING 3
49#define TX_MULTIQ_STEERING 4
50
51#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
52
53#define VXGE_TTI_BTIMER_VAL 250000
54
55#define VXGE_TTI_LTIMER_VAL 1000
56#define VXGE_TTI_RTIMER_VAL 0
57#define VXGE_RTI_BTIMER_VAL 250
58#define VXGE_RTI_LTIMER_VAL 100
59#define VXGE_RTI_RTIMER_VAL 0
60#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
61#define VXGE_ISR_POLLING_CNT 8
62#define VXGE_MAX_CONFIG_DEV 0xFF
63#define VXGE_EXEC_MODE_DISABLE 0
64#define VXGE_EXEC_MODE_ENABLE 1
65#define VXGE_MAX_CONFIG_PORT 1
66#define VXGE_ALL_VID_DISABLE 0
67#define VXGE_ALL_VID_ENABLE 1
68#define VXGE_PAUSE_CTRL_DISABLE 0
69#define VXGE_PAUSE_CTRL_ENABLE 1
70
71#define TTI_TX_URANGE_A 5
72#define TTI_TX_URANGE_B 15
73#define TTI_TX_URANGE_C 40
74#define TTI_TX_UFC_A 5
75#define TTI_TX_UFC_B 40
76#define TTI_TX_UFC_C 60
77#define TTI_TX_UFC_D 100
78
79#define RTI_RX_URANGE_A 5
80#define RTI_RX_URANGE_B 15
81#define RTI_RX_URANGE_C 40
82#define RTI_RX_UFC_A 1
83#define RTI_RX_UFC_B 5
84#define RTI_RX_UFC_C 10
85#define RTI_RX_UFC_D 15
86
87/* Milli secs timer period */
88#define VXGE_TIMER_DELAY 10000
89
90#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
91
92enum vxge_reset_event {
93 /* reset events */
94 VXGE_LL_VPATH_RESET = 0,
95 VXGE_LL_DEVICE_RESET = 1,
96 VXGE_LL_FULL_RESET = 2,
97 VXGE_LL_START_RESET = 3,
98 VXGE_LL_COMPL_RESET = 4
99};
100/* These flags represent the devices temporary state */
101enum vxge_device_state_t {
102__VXGE_STATE_RESET_CARD = 0,
103__VXGE_STATE_CARD_UP
104};
105
106enum vxge_mac_addr_state {
107 /* mac address states */
108 VXGE_LL_MAC_ADDR_IN_LIST = 0,
109 VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
110};
111
112struct vxge_drv_config {
113 int config_dev_cnt;
114 int total_dev_cnt;
115 unsigned long inta_dev_open;
116 int g_no_cpus;
117 unsigned int vpath_per_dev;
118};
119
120struct macInfo {
121 unsigned char macaddr[ETH_ALEN];
122 unsigned char macmask[ETH_ALEN];
123 unsigned int vpath_no;
124 enum vxge_mac_addr_state state;
125};
126
127struct vxge_config {
128 int tx_pause_enable;
129 int rx_pause_enable;
130
131#define NEW_NAPI_WEIGHT 64
132 int napi_weight;
133#define VXGE_GRO_DONOT_AGGREGATE 0
134#define VXGE_GRO_ALWAYS_AGGREGATE 1
135 int gro_enable;
136 int intr_type;
137#define INTA 0
138#define MSI 1
139#define MSI_X 2
140
141 int addr_learn_en;
142
143 int rth_steering;
144 int rth_algorithm;
145 int rth_hash_type_tcpipv4;
146 int rth_hash_type_ipv4;
147 int rth_hash_type_tcpipv6;
148 int rth_hash_type_ipv6;
149 int rth_hash_type_tcpipv6ex;
150 int rth_hash_type_ipv6ex;
151 int rth_bkt_sz;
152 int rth_jhash_golden_ratio;
153 int tx_steering_type;
154 int fifo_indicate_max_pkts;
155 struct vxge_hw_device_hw_info device_hw_info;
156};
157
158struct vxge_msix_entry {
159 /* Mimicing the msix_entry struct of Kernel. */
160 u16 vector;
161 u16 entry;
162 u16 in_use;
163 void *arg;
164};
165
166/* Software Statistics */
167
168struct vxge_sw_stats {
169 /* Network Stats (interface stats) */
170 struct net_device_stats net_stats;
171
172 /* Tx */
173 u64 tx_frms;
174 u64 tx_errors;
175 u64 tx_bytes;
176 u64 txd_not_free;
177 u64 txd_out_of_desc;
178
179 /* Virtual Path */
180 u64 vpaths_open;
181 u64 vpath_open_fail;
182
183 /* Rx */
184 u64 rx_frms;
185 u64 rx_errors;
186 u64 rx_bytes;
187 u64 rx_mcast;
188
189 /* Misc. */
190 u64 link_up;
191 u64 link_down;
192 u64 pci_map_fail;
193 u64 skb_alloc_fail;
194};
195
196struct vxge_mac_addrs {
197 struct list_head item;
198 u64 macaddr;
199 u64 macmask;
200 enum vxge_mac_addr_state state;
201};
202
203struct vxgedev;
204
205struct vxge_fifo_stats {
206 u64 tx_frms;
207 u64 tx_errors;
208 u64 tx_bytes;
209 u64 txd_not_free;
210 u64 txd_out_of_desc;
211 u64 pci_map_fail;
212};
213
214struct vxge_fifo {
215 struct net_device *ndev;
216 struct pci_dev *pdev;
217 struct __vxge_hw_fifo *handle;
218
219 /* The vpath id maintained in the driver -
220 * 0 to 'maximum_vpaths_in_function - 1'
221 */
222 int driver_id;
223 int tx_steering_type;
224 int indicate_max_pkts;
225 spinlock_t tx_lock;
226 /* flag used to maintain queue state when MULTIQ is not enabled */
227#define VPATH_QUEUE_START 0
228#define VPATH_QUEUE_STOP 1
229 int queue_state;
230
231 /* Tx stats */
232 struct vxge_fifo_stats stats;
233} ____cacheline_aligned;
234
235struct vxge_ring_stats {
236 u64 prev_rx_frms;
237 u64 rx_frms;
238 u64 rx_errors;
239 u64 rx_dropped;
240 u64 rx_bytes;
241 u64 rx_mcast;
242 u64 pci_map_fail;
243 u64 skb_alloc_fail;
244};
245
246struct vxge_ring {
247 struct net_device *ndev;
248 struct pci_dev *pdev;
249 struct __vxge_hw_ring *handle;
250 /* The vpath id maintained in the driver -
251 * 0 to 'maximum_vpaths_in_function - 1'
252 */
253 int driver_id;
254
255 /* copy of the flag indicating whether rx_csum is to be used */
256 u32 rx_csum;
257
258 int pkts_processed;
259 int budget;
260 int gro_enable;
261
262 struct napi_struct napi;
263
264#define VXGE_MAX_MAC_ADDR_COUNT 30
265
266 int vlan_tag_strip;
267 struct vlan_group *vlgrp;
268 int rx_vector_no;
269 enum vxge_hw_status last_status;
270
271 /* Rx stats */
272 struct vxge_ring_stats stats;
273} ____cacheline_aligned;
274
275struct vxge_vpath {
276
277 struct vxge_fifo fifo;
278 struct vxge_ring ring;
279
280 struct __vxge_hw_vpath_handle *handle;
281
282 /* Actual vpath id for this vpath in the device - 0 to 16 */
283 int device_id;
284 int max_mac_addr_cnt;
285 int is_configured;
286 int is_open;
287 struct vxgedev *vdev;
288 u8 (macaddr)[ETH_ALEN];
289 u8 (macmask)[ETH_ALEN];
290
291#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
292 /* mac addresses currently programmed into NIC */
293 u16 mac_addr_cnt;
294 u16 mcast_addr_cnt;
295 struct list_head mac_addr_list;
296
297 u32 level_err;
298 u32 level_trace;
299};
300#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
301 for (i = 0; i < vdev->no_of_vpath; i++) { \
302 vdev->vpaths[i].level_err = err; \
303 vdev->vpaths[i].level_trace = trace; \
304 } \
305 vdev->level_err = err; \
306 vdev->level_trace = trace; \
307}
308
309struct vxgedev {
310 struct net_device *ndev;
311 struct pci_dev *pdev;
312 struct __vxge_hw_device *devh;
313 struct vlan_group *vlgrp;
314 int vlan_tag_strip;
315 struct vxge_config config;
316 unsigned long state;
317
318 /* Indicates which vpath to reset */
319 unsigned long vp_reset;
320
321 /* Timer used for polling vpath resets */
322 struct timer_list vp_reset_timer;
323
324 /* Timer used for polling vpath lockup */
325 struct timer_list vp_lockup_timer;
326
327 /*
328 * Flags to track whether device is in All Multicast
329 * or in promiscuous mode.
330 */
331 u16 all_multi_flg;
332
333 /* A flag indicating whether rx_csum is to be used or not. */
334 u32 rx_csum;
335
336 struct vxge_msix_entry *vxge_entries;
337 struct msix_entry *entries;
338 /*
339 * 4 for each vpath * 17;
340 * total is 68
341 */
342#define VXGE_MAX_REQUESTED_MSIX 68
343#define VXGE_INTR_STRLEN 80
344 char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
345
346 enum vxge_hw_event cric_err_event;
347
348 int max_vpath_supported;
349 int no_of_vpath;
350
351 struct napi_struct napi;
352 /* A debug option, when enabled and if error condition occurs,
353 * the driver will do following steps:
354 * - mask all interrupts
355 * - Not clear the source of the alarm
356 * - gracefully stop all I/O
357 * A diagnostic dump of register and stats at this point
358 * reveals very useful information.
359 */
360 int exec_mode;
361 int max_config_port;
362 struct vxge_vpath *vpaths;
363
364 struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
365 void __iomem *bar0;
366 void __iomem *bar1;
367 struct vxge_sw_stats stats;
368 int mtu;
369 /* Below variables are used for vpath selection to transmit a packet */
370 u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
371 u64 vpaths_deployed;
372
373 u32 intr_cnt;
374 u32 level_err;
375 u32 level_trace;
376 char fw_version[VXGE_HW_FW_STRLEN];
377};
378
379struct vxge_rx_priv {
380 struct sk_buff *skb;
381 dma_addr_t data_dma;
382 dma_addr_t data_size;
383};
384
385struct vxge_tx_priv {
386 struct sk_buff *skb;
387 dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
388};
389
390#define VXGE_MODULE_PARAM_INT(p, val) \
391 static int p = val; \
392 module_param(p, int, 0)
393
394#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
395
396#define vxge_os_timer(timer, handle, arg, exp) do { \
397 init_timer(&timer); \
398 timer.function = handle; \
399 timer.data = (unsigned long) arg; \
400 mod_timer(&timer, (jiffies + exp)); \
401 } while (0);
402
403int __devinit vxge_device_register(struct __vxge_hw_device *devh,
404 struct vxge_config *config,
405 int high_dma, int no_of_vpath,
406 struct vxgedev **vdev);
407
408void vxge_device_unregister(struct __vxge_hw_device *devh);
409
410void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
411
412void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
413
414void vxge_callback_link_up(struct __vxge_hw_device *devh);
415
416void vxge_callback_link_down(struct __vxge_hw_device *devh);
417
418enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
419 struct macInfo *mac);
420
421int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
422
423int vxge_reset(struct vxgedev *vdev);
424
425enum vxge_hw_status
426vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
427 u8 t_code, void *userdata);
428
429enum vxge_hw_status
430vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
431 enum vxge_hw_fifo_tcode t_code, void *userdata, void **skb_ptr);
432
433int vxge_close(struct net_device *dev);
434
435int vxge_open(struct net_device *dev);
436
437void vxge_close_vpaths(struct vxgedev *vdev, int index);
438
439int vxge_open_vpaths(struct vxgedev *vdev);
440
441enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
442
443void vxge_stop_all_tx_queue(struct vxgedev *vdev);
444
445void vxge_stop_tx_queue(struct vxge_fifo *fifo);
446
447void vxge_start_all_tx_queue(struct vxgedev *vdev);
448
449void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb);
450
451enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
452 struct macInfo *mac);
453
454enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
455 struct macInfo *mac);
456
457int vxge_mac_list_add(struct vxge_vpath *vpath,
458 struct macInfo *mac);
459
460void vxge_free_mac_add_list(struct vxge_vpath *vpath);
461
462enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
463
464enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
465
466int do_vxge_close(struct net_device *dev, int do_io);
467extern void initialize_ethtool_ops(struct net_device *ndev);
468/**
469 * #define VXGE_DEBUG_INIT: debug for initialization functions
470 * #define VXGE_DEBUG_TX : debug transmit related functions
471 * #define VXGE_DEBUG_RX : debug recevice related functions
472 * #define VXGE_DEBUG_MEM : debug memory module
473 * #define VXGE_DEBUG_LOCK: debug locks
474 * #define VXGE_DEBUG_SEM : debug semaphore
475 * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
476*/
477#define VXGE_DEBUG_INIT 0x00000001
478#define VXGE_DEBUG_TX 0x00000002
479#define VXGE_DEBUG_RX 0x00000004
480#define VXGE_DEBUG_MEM 0x00000008
481#define VXGE_DEBUG_LOCK 0x00000010
482#define VXGE_DEBUG_SEM 0x00000020
483#define VXGE_DEBUG_ENTRYEXIT 0x00000040
484#define VXGE_DEBUG_INTR 0x00000080
485#define VXGE_DEBUG_LL_CONFIG 0x00000100
486
487/* Debug tracing for VXGE driver */
488#ifndef VXGE_DEBUG_MASK
489#define VXGE_DEBUG_MASK 0x0
490#endif
491
492#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
493#define vxge_debug_ll_config(level, fmt, ...) \
494 vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
495#else
496#define vxge_debug_ll_config(level, fmt, ...)
497#endif
498
499#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
500#define vxge_debug_init(level, fmt, ...) \
501 vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
502#else
503#define vxge_debug_init(level, fmt, ...)
504#endif
505
506#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
507#define vxge_debug_tx(level, fmt, ...) \
508 vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
509#else
510#define vxge_debug_tx(level, fmt, ...)
511#endif
512
513#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
514#define vxge_debug_rx(level, fmt, ...) \
515 vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
516#else
517#define vxge_debug_rx(level, fmt, ...)
518#endif
519
520#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
521#define vxge_debug_mem(level, fmt, ...) \
522 vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
523#else
524#define vxge_debug_mem(level, fmt, ...)
525#endif
526
527#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
528#define vxge_debug_entryexit(level, fmt, ...) \
529 vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
530#else
531#define vxge_debug_entryexit(level, fmt, ...)
532#endif
533
534#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
535#define vxge_debug_intr(level, fmt, ...) \
536 vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
537#else
538#define vxge_debug_intr(level, fmt, ...)
539#endif
540
541#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
542 vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
543 level, mask);\
544 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
545 vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
546 vdev->devh), \
547 vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
548 vdev->devh));\
549}
550
551#ifdef NETIF_F_GSO
552#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
553#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
554#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
555#endif
556
557#endif
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
new file mode 100644
index 000000000000..10f4da32929f
--- /dev/null
+++ b/drivers/net/vxge/vxge-reg.h
@@ -0,0 +1,4608 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-reg.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O Virtualized
11 * Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_REG_H
15#define VXGE_REG_H
16
17/*
18 * vxge_mBIT(loc) - set bit at offset
19 */
20#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
21
22/*
23 * vxge_vBIT(val, loc, sz) - set bits at offset
24 */
25#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
26#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
27
28/*
29 * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
30 */
31#define vxge_bVALn(bits, loc, n) \
32 ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
33
34#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
35 vxge_bVALn(bits, 0, 16)
36#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
37 vxge_bVALn(bits, 48, 8)
38#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
39 vxge_bVALn(bits, 56, 8)
40
41#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
42 vxge_bVALn(bits, 3, 5)
43#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
44 vxge_bVALn(bits, 5, 3)
45#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
46
47#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
48#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51
52#define VXGE_HW_ASIC_MODE_RESERVED 0
53#define VXGE_HW_ASIC_MODE_NO_IOV 1
54#define VXGE_HW_ASIC_MODE_SR_IOV 2
55#define VXGE_HW_ASIC_MODE_MR_IOV 3
56
57#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
58#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
59#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
60#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
61
62#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
63
64#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
65 vxge_bVALn(bits, 0, 32)
66
67#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
68 vxge_bVALn(bits, 50, 14)
69
70#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
71 vxge_bVALn(bits, 0, 17)
72
73#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
74 vxge_bVALn(bits, 3, 5)
75
76#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
77 vxge_bVALn(bits, 17, 15)
78
79#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
80#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
81#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
82
83#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
84#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
85
86#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
87 (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
88#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
89 vxge_bVALn(val, 61, 3)
90#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
91 (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
92#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
93 vxge_bVALn(val, 61, 3)
94
95#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
96#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
97
98#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
99 vxge_bVALn(bits, 1, 15)
100#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
101 vxge_bVALn(bits, 17, 15)
102#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
103 vxge_bVALn(bits, 33, 15)
104
105#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
106#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
107#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
108 vxge_vBIT(val, 49, 15)
109
110#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
111#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
112#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
113
114#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
115#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
116#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
117
118#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
119#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
120
121#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
122#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
123#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
124#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
125#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
126#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
127#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
128#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
129#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
130#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
131#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
132#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
133#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
134#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
135
136#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
137 vxge_bVALn(bits, 0, 48)
138#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
139
140#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
141 vxge_bVALn(bits, 0, 48)
142#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
143#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
144 vxge_mBIT(54)
145#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
146 vxge_bVALn(bits, 55, 5)
147#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
148 vxge_vBIT(val, 55, 5)
149#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
150 vxge_bVALn(bits, 62, 2)
151#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
152
153#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
154#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
155#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
156#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
157#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
158#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
159#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
160#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
161#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
162
163#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
164#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
165#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
166#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
167#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
168#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
169#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
170#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
171#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
172#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
173#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
174#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
175#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
176
177#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
178 vxge_bVALn(bits, 0, 48)
179#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
180
181#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
182#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
183
184#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
185#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
186
187#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
188 vxge_bVALn(bits, 3, 1)
189#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
190#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
191 vxge_bVALn(bits, 7, 1)
192#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
193#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
194 vxge_bVALn(bits, 8, 16)
195#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
196
197#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
198 vxge_bVALn(bits, 3, 1)
199#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
200#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
201 vxge_bVALn(bits, 4, 4)
202#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
203 vxge_vBIT(val, 4, 4)
204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
205 vxge_bVALn(bits, 10, 2)
206#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
207 vxge_vBIT(val, 10, 2)
208#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
209#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
210#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
211#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
212 vxge_bVALn(bits, 15, 1)
213#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
214#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
215 vxge_bVALn(bits, 19, 1)
216#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
217#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
218 vxge_bVALn(bits, 23, 1)
219#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
220#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
221 vxge_bVALn(bits, 27, 1)
222#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
223#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
224 vxge_bVALn(bits, 31, 1)
225#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
226#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
227 vxge_bVALn(bits, 35, 1)
228#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
229#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
230 vxge_bVALn(bits, 39, 1)
231#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
232#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
233 vxge_bVALn(bits, 43, 1)
234#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
235
236#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
237 vxge_bVALn(bits, 3, 1)
238#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
239#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
240 vxge_bVALn(bits, 9, 7)
241#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
242 vxge_vBIT(val, 9, 7)
243
244#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
245 vxge_bVALn(bits, 0, 8)
246#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
247 vxge_vBIT(val, 0, 8)
248#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
249 vxge_bVALn(bits, 8, 1)
250#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
251#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
252 vxge_bVALn(bits, 9, 7)
253#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
254 vxge_vBIT(val, 9, 7)
255#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
256 vxge_bVALn(bits, 16, 8)
257#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
258 vxge_vBIT(val, 16, 8)
259#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
260 vxge_bVALn(bits, 24, 1)
261#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
262#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
263 vxge_bVALn(bits, 25, 7)
264#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
265 vxge_vBIT(val, 25, 7)
266#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
267 vxge_bVALn(bits, 0, 8)
268#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
269 vxge_vBIT(val, 0, 8)
270#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
271 vxge_bVALn(bits, 8, 1)
272#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
273#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
274 vxge_bVALn(bits, 9, 7)
275#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
276 vxge_vBIT(val, 9, 7)
277#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
278 vxge_bVALn(bits, 16, 8)
279#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
280 vxge_vBIT(val, 16, 8)
281#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
282 vxge_bVALn(bits, 24, 1)
283#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
284#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
285 vxge_bVALn(bits, 25, 7)
286#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
287 vxge_vBIT(val, 25, 7)
288
289#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
290 vxge_bVALn(bits, 0, 32)
291#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
292 vxge_vBIT(val, 0, 32)
293#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
294 vxge_bVALn(bits, 32, 32)
295#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
296 vxge_vBIT(val, 32, 32)
297
298#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
299 vxge_bVALn(bits, 0, 16)
300#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
301 vxge_vBIT(val, 0, 16)
302#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
303 vxge_bVALn(bits, 16, 16)
304#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
305 vxge_vBIT(val, 16, 16)
306#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
307 vxge_bVALn(bits, 32, 4)
308#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
309 vxge_vBIT(val, 32, 4)
310#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
311 vxge_bVALn(bits, 36, 4)
312#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
313 vxge_vBIT(val, 36, 4)
314#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
315 vxge_bVALn(bits, 40, 2)
316#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
317 vxge_vBIT(val, 40, 2)
318#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
319 vxge_bVALn(bits, 42, 2)
320#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
321 vxge_vBIT(val, 42, 2)
322
323#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
324 vxge_bVALn(bits, 0, 64)
325#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
326
327#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
328 vxge_bVALn(bits, 3, 1)
329#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
330
331#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
332 vxge_bVALn(bits, 3, 1)
333#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
334
335#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
336 vxge_bVALn(bits, 0, 48)
337#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
338 vxge_vBIT(val, 0, 48)
339#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
340 vxge_vBIT(val, 62, 2)
341
342#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
343 vxge_bVALn(bits, 0, 8)
344#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
345 vxge_vBIT(val, 0, 8)
346#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
347 vxge_bVALn(bits, 8, 1)
348#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
349#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
350 vxge_bVALn(bits, 9, 7)
351#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
352 vxge_vBIT(val, 9, 7)
353#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
354 vxge_bVALn(bits, 16, 8)
355#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
356 vxge_vBIT(val, 16, 8)
357#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
358 vxge_bVALn(bits, 24, 1)
359#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
360#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
361 vxge_bVALn(bits, 25, 7)
362#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
363 vxge_vBIT(val, 25, 7)
364#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
365 vxge_bVALn(bits, 32, 8)
366#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
367 vxge_vBIT(val, 32, 8)
368#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
369 vxge_bVALn(bits, 40, 1)
370#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
371#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
372 vxge_bVALn(bits, 41, 7)
373#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
374 vxge_vBIT(val, 41, 7)
375#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
376 vxge_bVALn(bits, 48, 8)
377#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
378 vxge_vBIT(val, 48, 8)
379#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
380 vxge_bVALn(bits, 56, 1)
381#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
382#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
383 vxge_bVALn(bits, 57, 7)
384#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
385 vxge_vBIT(val, 57, 7)
386
387#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
388#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
389#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
390#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
391#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
392#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
393#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
394#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
395
396#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
397#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
398
399#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
400 vxge_bVALn(bits, 0, 8)
401#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
402#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
403 vxge_bVALn(bits, 8, 8)
404#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
405#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
406 vxge_bVALn(bits, 16, 16)
407#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
408 vxge_vBIT(val, 16, 16)
409
410#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
411 vxge_bVALn(bits, 32, 8)
412#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
413#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
414 vxge_bVALn(bits, 40, 8)
415#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
416#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
417 vxge_bVALn(bits, 48, 16)
418#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
419
420#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
421 vxge_bVALn(bits, 0, 8)
422#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
423#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
424 vxge_bVALn(bits, 8, 8)
425#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
426#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
427 vxge_bVALn(bits, 16, 16)
428#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
429 vxge_vBIT(val, 16, 16)
430
431#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
432 vxge_bVALn(bits, 32, 8)
433#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
434#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
435 vxge_bVALn(bits, 40, 8)
436#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
437#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
438 vxge_bVALn(bits, 48, 16)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
440
441#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
442 vxge_bVALn(bits, 0, 18)
443
444#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
445 vxge_bVALn(bits, 48, 16)
446#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
447 vxge_bVALn(bits, 32, 32)
448#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
449#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
450 vxge_bVALn(bits, 0, 32)
451#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
452 vxge_bVALn(bits, 0, 32)
453#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
454 vxge_bVALn(bits, 0, 32)
455#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
456#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
457#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
458 vxge_bVALn(bits, 32, 32)
459#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
460 vxge_bVALn(bits, 32, 32)
461#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
462 vxge_bVALn(bits, 0, 32)
463#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
464 vxge_bVALn(bits, 32, 32)
465#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
466 vxge_bVALn(bits, 0, 32)
467#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
468 vxge_bVALn(bits, 32, 32)
469#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
470 vxge_bVALn(bits, 0, 32)
471#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
472 vxge_bVALn(bits, 32, 32)
473#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
474) vxge_bVALn(bits, 48, 16)
475#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
476#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
477 vxge_bVALn(bits, 16, 16)
478#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
479 vxge_bVALn(bits, 32, 16)
480#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
481#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
482 vxge_bVALn(bits, 16, 16)
483#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
484 vxge_bVALn(bits, 32, 16)
485
486#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
487 vxge_bVALn(bits, 0, 32)
488#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
489 vxge_bVALn(bits, 32, 32)
490#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
491) vxge_bVALn(bits, 32, 32)
492#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
493) vxge_bVALn(bits, 32, 32)
494#define \
495VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
496 vxge_bVALn(bits, 32, 32)
497#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
498 vxge_bVALn(bits, 0, 32)
499#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
500 vxge_bVALn(bits, 32, 32)
501#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
502 vxge_bVALn(bits, 0, 32)
503#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
504 vxge_bVALn(bits, 32, 32)
505#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
506 vxge_bVALn(bits, 0, 32)
507#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
508 vxge_bVALn(bits, 32, 32)
509#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
510 vxge_bVALn(bits, 32, 32)
511#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
512 vxge_bVALn(bits, 32, 32)
513
514#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
515#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
516#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
517#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
518#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
519#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
520#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
521#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
522#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
523#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
524#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
525
526#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
527 vxge_bVALn(bits, 32, 32)
528
529#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
530 vxge_bVALn(bits, 0, 8)
531#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
532 vxge_bVALn(bits, 8, 8)
533#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
534 vxge_bVALn(bits, 16, 8)
535
536#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
537 vxge_bVALn(bits, 0, 8)
538#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
539 vxge_bVALn(bits, 8, 8)
540#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
541 vxge_bVALn(bits, 16, 8)
542
543#define VXGE_HW_CONFIG_PRIV_H
544
545#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
546#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
547#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
548#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
549
550#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
551#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
552
553#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
554#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
555
556#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
557#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
558
559#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
560#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
561
562/*
563 * The registers are memory mapped and are native big-endian byte order. The
564 * little-endian hosts are handled by enabling hardware byte-swapping for
565 * register and dma operations.
566 */
567struct vxge_hw_legacy_reg {
568
569 u8 unused00010[0x00010];
570
571/*0x00010*/ u64 toc_swapper_fb;
572#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
573/*0x00018*/ u64 pifm_rd_swap_en;
574#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
575/*0x00020*/ u64 pifm_rd_flip_en;
576#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
577/*0x00028*/ u64 pifm_wr_swap_en;
578#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
579/*0x00030*/ u64 pifm_wr_flip_en;
580#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
581/*0x00038*/ u64 toc_first_pointer;
582#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
583/*0x00040*/ u64 host_access_en;
584#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
585
586} __packed;
587
588struct vxge_hw_toc_reg {
589
590 u8 unused00050[0x00050];
591
592/*0x00050*/ u64 toc_common_pointer;
593#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
594/*0x00058*/ u64 toc_memrepair_pointer;
595#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
596/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
597#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
598 u8 unused001e0[0x001e0-0x000e8];
599
600/*0x001e0*/ u64 toc_mrpcim_pointer;
601#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
602/*0x001e8*/ u64 toc_srpcim_pointer[17];
603#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
604 u8 unused00278[0x00278-0x00270];
605
606/*0x00278*/ u64 toc_vpmgmt_pointer[17];
607#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
608 u8 unused00390[0x00390-0x00300];
609
610/*0x00390*/ u64 toc_vpath_pointer[17];
611#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
612 u8 unused004a0[0x004a0-0x00418];
613
614/*0x004a0*/ u64 toc_kdfc;
615#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
616#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
617/*0x004a8*/ u64 toc_usdc;
618#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
619#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
620/*0x004b0*/ u64 toc_kdfc_vpath_stride;
621#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
622 vxge_vBIT(val, 0, 64)
623/*0x004b8*/ u64 toc_kdfc_fifo_stride;
624#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
625 vxge_vBIT(val, 0, 64)
626
627} __packed;
628
629struct vxge_hw_common_reg {
630
631 u8 unused00a00[0x00a00];
632
633/*0x00a00*/ u64 prc_status1;
634#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
635/*0x00a08*/ u64 rxdcm_reset_in_progress;
636#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
637/*0x00a10*/ u64 replicq_flush_in_progress;
638#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
639/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
640#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
641/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
642#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
643/*0x00a28*/ u64 noffload_reset_in_progress;
644#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
645/*0x00a30*/ u64 rd_req_in_progress;
646#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
647/*0x00a38*/ u64 rd_req_outstanding;
648#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
649/*0x00a40*/ u64 kdfc_reset_in_progress;
650#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
651 u8 unused00b00[0x00b00-0x00a48];
652
653/*0x00b00*/ u64 one_cfg_vp;
654#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
655/*0x00b08*/ u64 one_common;
656#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
657 u8 unused00b80[0x00b80-0x00b10];
658
659/*0x00b80*/ u64 tim_int_en;
660#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
661/*0x00b88*/ u64 tim_set_int_en;
662#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
663/*0x00b90*/ u64 tim_clr_int_en;
664#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
665/*0x00b98*/ u64 tim_mask_int_during_reset;
666#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
667/*0x00ba0*/ u64 tim_reset_in_progress;
668#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
669/*0x00ba8*/ u64 tim_outstanding_bmap;
670#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
671 u8 unused00c00[0x00c00-0x00bb0];
672
673/*0x00c00*/ u64 msg_reset_in_progress;
674#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
675/*0x00c08*/ u64 msg_mxp_mr_ready;
676#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
677/*0x00c10*/ u64 msg_uxp_mr_ready;
678#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
679/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
680#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
681/*0x00c20*/ u64 msg_umq_rtl_bwr;
682#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
683 u8 unused00d00[0x00d00-0x00c28];
684
685/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
686#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
687/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
688#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
689/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
690#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
691/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
692#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
693/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
694#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
695 u8 unused00d40[0x00d40-0x00d28];
696
697/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
698#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
699/*0x00d48*/ u64 stats_cfg0;
700#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
701 u8 unused00da8[0x00da8-0x00d50];
702
703/*0x00da8*/ u64 clear_msix_mask_vect[4];
704#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
705 vxge_vBIT(val, 0, 17)
706/*0x00dc8*/ u64 set_msix_mask_vect[4];
707#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
708/*0x00de8*/ u64 clear_msix_mask_all_vect;
709#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
710 vxge_vBIT(val, 0, 17)
711/*0x00df0*/ u64 set_msix_mask_all_vect;
712#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
713 vxge_vBIT(val, 0, 17)
714/*0x00df8*/ u64 mask_vector[4];
715#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
716/*0x00e18*/ u64 msix_pending_vector[4];
717#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
718 vxge_vBIT(val, 0, 17)
719/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
720#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
721 vxge_vBIT(val, 0, 17)
722/*0x00e58*/ u64 titan_asic_id;
723#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
724#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
725#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
726/*0x00e60*/ u64 titan_general_int_status;
727#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
728#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
729#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
730#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
731 vxge_vBIT(val, 3, 17)
732 u8 unused00e70[0x00e70-0x00e68];
733
734/*0x00e70*/ u64 titan_mask_all_int;
735#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
736#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
737 u8 unused00e80[0x00e80-0x00e78];
738
739/*0x00e80*/ u64 tim_int_status0;
740#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
741/*0x00e88*/ u64 tim_int_mask0;
742#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
743/*0x00e90*/ u64 tim_int_status1;
744#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
745/*0x00e98*/ u64 tim_int_mask1;
746#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
747/*0x00ea0*/ u64 rti_int_status;
748#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
749/*0x00ea8*/ u64 rti_int_mask;
750#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
751/*0x00eb0*/ u64 adapter_status;
752#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
753#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
754#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
755#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
756#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
757#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
758#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
759#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
760#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
761#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
762#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
763#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
764#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
765#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
766#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
767/*0x00eb8*/ u64 gen_ctrl;
768#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
769#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
770#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
771#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
772#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
773#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
774#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
775 u8 unused00ed0[0x00ed0-0x00ec0];
776
777/*0x00ed0*/ u64 adapter_ready;
778#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
779/*0x00ed8*/ u64 outstanding_read;
780#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
781/*0x00ee0*/ u64 vpath_rst_in_prog;
782#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
783/*0x00ee8*/ u64 vpath_reg_modified;
784#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
785 u8 unused00fc0[0x00fc0-0x00ef0];
786
787/*0x00fc0*/ u64 cp_reset_in_progress;
788#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
789 u8 unused01080[0x01080-0x00fc8];
790
791/*0x01080*/ u64 xgmac_ready;
792#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
793 u8 unused010c0[0x010c0-0x01088];
794
795/*0x010c0*/ u64 fbif_ready;
796#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
797 u8 unused01100[0x01100-0x010c8];
798
799/*0x01100*/ u64 vplane_assignments;
800#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
801/*0x01108*/ u64 vpath_assignments;
802#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
803/*0x01110*/ u64 resource_assignments;
804#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
805 vxge_vBIT(val, 0, 17)
806/*0x01118*/ u64 host_type_assignments;
807#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
808 vxge_vBIT(val, 5, 3)
809 u8 unused01128[0x01128-0x01120];
810
811/*0x01128*/ u64 max_resource_assignments;
812#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
813 vxge_vBIT(val, 3, 5)
814#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
815 vxge_vBIT(val, 11, 5)
816/*0x01130*/ u64 pf_vpath_assignments;
817#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
818 vxge_vBIT(val, 0, 17)
819 u8 unused01200[0x01200-0x01138];
820
821/*0x01200*/ u64 rts_access_icmp;
822#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
823/*0x01208*/ u64 rts_access_tcpsyn;
824#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
825/*0x01210*/ u64 rts_access_zl4pyld;
826#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
827/*0x01218*/ u64 rts_access_l4prtcl_tcp;
828#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
829/*0x01220*/ u64 rts_access_l4prtcl_udp;
830#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
831/*0x01228*/ u64 rts_access_l4prtcl_flex;
832#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
833/*0x01230*/ u64 rts_access_ipfrag;
834#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
835
836} __packed;
837
838struct vxge_hw_memrepair_reg {
839 u64 unused1;
840 u64 unused2;
841} __packed;
842
843struct vxge_hw_pcicfgmgmt_reg {
844
845/*0x00000*/ u64 resource_no;
846#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
847/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
848#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
849 vxge_vBIT(val, 2, 6)
850/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
851#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
852 vxge_vBIT(val, 2, 6)
853/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
854#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
855 vxge_vBIT(val, 2, 6)
856/*0x00020*/ u64 msixgrp_no;
857#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
858
859} __packed;
860
861struct vxge_hw_mrpcim_reg {
862/*0x00000*/ u64 g3fbct_int_status;
863#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
864/*0x00008*/ u64 g3fbct_int_mask;
865/*0x00010*/ u64 g3fbct_err_reg;
866#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
867#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
868#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
869#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
870#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
871#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
872#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
873/*0x00018*/ u64 g3fbct_err_mask;
874/*0x00020*/ u64 g3fbct_err_alarm;
875
876 u8 unused00a00[0x00a00-0x00028];
877
878/*0x00a00*/ u64 wrdma_int_status;
879#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
880#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
881#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
882#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
883#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
884#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
885#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
886#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
887#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
888#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
889#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
890#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
891#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
892/*0x00a08*/ u64 wrdma_int_mask;
893/*0x00a10*/ u64 rc_alarm_reg;
894#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
895#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
896#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
897#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
898#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
899#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
900#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
901#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
902#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
903#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
904#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
905#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
906/*0x00a18*/ u64 rc_alarm_mask;
907/*0x00a20*/ u64 rc_alarm_alarm;
908/*0x00a28*/ u64 rxdrm_sm_err_reg;
909#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
910/*0x00a30*/ u64 rxdrm_sm_err_mask;
911/*0x00a38*/ u64 rxdrm_sm_err_alarm;
912/*0x00a40*/ u64 rxdcm_sm_err_reg;
913#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
914/*0x00a48*/ u64 rxdcm_sm_err_mask;
915/*0x00a50*/ u64 rxdcm_sm_err_alarm;
916/*0x00a58*/ u64 rxdwm_sm_err_reg;
917#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
918/*0x00a60*/ u64 rxdwm_sm_err_mask;
919/*0x00a68*/ u64 rxdwm_sm_err_alarm;
920/*0x00a70*/ u64 rda_err_reg;
921#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
922#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
923#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
924#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
925#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
926#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
927#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
928#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
929/*0x00a78*/ u64 rda_err_mask;
930/*0x00a80*/ u64 rda_err_alarm;
931/*0x00a88*/ u64 rda_ecc_db_reg;
932#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
933/*0x00a90*/ u64 rda_ecc_db_mask;
934/*0x00a98*/ u64 rda_ecc_db_alarm;
935/*0x00aa0*/ u64 rda_ecc_sg_reg;
936#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
937/*0x00aa8*/ u64 rda_ecc_sg_mask;
938/*0x00ab0*/ u64 rda_ecc_sg_alarm;
939/*0x00ab8*/ u64 rqa_err_reg;
940#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
941/*0x00ac0*/ u64 rqa_err_mask;
942/*0x00ac8*/ u64 rqa_err_alarm;
943/*0x00ad0*/ u64 frf_alarm_reg;
944#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
945/*0x00ad8*/ u64 frf_alarm_mask;
946/*0x00ae0*/ u64 frf_alarm_alarm;
947/*0x00ae8*/ u64 rocrc_alarm_reg;
948#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
949#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
950#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
951#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
952#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
953#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
954#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
955#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
956#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
957#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
958#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
959#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
960#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
961#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
962#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
963#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
964#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
965#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
966#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
967/*0x00af0*/ u64 rocrc_alarm_mask;
968/*0x00af8*/ u64 rocrc_alarm_alarm;
969/*0x00b00*/ u64 wde0_alarm_reg;
970#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
971#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
972#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
973#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
974#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
975/*0x00b08*/ u64 wde0_alarm_mask;
976/*0x00b10*/ u64 wde0_alarm_alarm;
977/*0x00b18*/ u64 wde1_alarm_reg;
978#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
979#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
980#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
981#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
982#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
983/*0x00b20*/ u64 wde1_alarm_mask;
984/*0x00b28*/ u64 wde1_alarm_alarm;
985/*0x00b30*/ u64 wde2_alarm_reg;
986#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
987#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
988#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
989#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
990#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
991/*0x00b38*/ u64 wde2_alarm_mask;
992/*0x00b40*/ u64 wde2_alarm_alarm;
993/*0x00b48*/ u64 wde3_alarm_reg;
994#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
995#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
996#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
997#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
998#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
999/*0x00b50*/ u64 wde3_alarm_mask;
1000/*0x00b58*/ u64 wde3_alarm_alarm;
1001
1002 u8 unused00be8[0x00be8-0x00b60];
1003
1004/*0x00be8*/ u64 rx_w_round_robin_0;
1005#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
1006#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
1007#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
1008#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
1009#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
1010#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
1011#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
1012#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
1013/*0x00bf0*/ u64 rx_w_round_robin_1;
1014#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
1015#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
1016#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
1017 vxge_vBIT(val, 19, 5)
1018#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
1019 vxge_vBIT(val, 27, 5)
1020#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
1021 vxge_vBIT(val, 35, 5)
1022#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
1023 vxge_vBIT(val, 43, 5)
1024#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
1025 vxge_vBIT(val, 51, 5)
1026#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
1027 vxge_vBIT(val, 59, 5)
1028/*0x00bf8*/ u64 rx_w_round_robin_2;
1029#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
1030#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
1031 vxge_vBIT(val, 11, 5)
1032#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
1033 vxge_vBIT(val, 19, 5)
1034#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
1035 vxge_vBIT(val, 27, 5)
1036#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
1037 vxge_vBIT(val, 35, 5)
1038#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
1039 vxge_vBIT(val, 43, 5)
1040#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
1041 vxge_vBIT(val, 51, 5)
1042#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
1043 vxge_vBIT(val, 59, 5)
1044/*0x00c00*/ u64 rx_w_round_robin_3;
1045#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
1046#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
1047 vxge_vBIT(val, 11, 5)
1048#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
1049 vxge_vBIT(val, 19, 5)
1050#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
1051 vxge_vBIT(val, 27, 5)
1052#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
1053 vxge_vBIT(val, 35, 5)
1054#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
1055 vxge_vBIT(val, 43, 5)
1056#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
1057 vxge_vBIT(val, 51, 5)
1058#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
1059 vxge_vBIT(val, 59, 5)
1060/*0x00c08*/ u64 rx_w_round_robin_4;
1061#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
1062#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
1063 vxge_vBIT(val, 11, 5)
1064#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
1065 vxge_vBIT(val, 19, 5)
1066#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
1067 vxge_vBIT(val, 27, 5)
1068#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
1069 vxge_vBIT(val, 35, 5)
1070#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
1071 vxge_vBIT(val, 43, 5)
1072#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
1073 vxge_vBIT(val, 51, 5)
1074#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
1075 vxge_vBIT(val, 59, 5)
1076/*0x00c10*/ u64 rx_w_round_robin_5;
1077#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
1078#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
1079 vxge_vBIT(val, 11, 5)
1080#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
1081 vxge_vBIT(val, 19, 5)
1082#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
1083 vxge_vBIT(val, 27, 5)
1084#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
1085 vxge_vBIT(val, 35, 5)
1086#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
1087 vxge_vBIT(val, 43, 5)
1088#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
1089 vxge_vBIT(val, 51, 5)
1090#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
1091 vxge_vBIT(val, 59, 5)
1092/*0x00c18*/ u64 rx_w_round_robin_6;
1093#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
1094#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
1095 vxge_vBIT(val, 11, 5)
1096#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
1097 vxge_vBIT(val, 19, 5)
1098#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
1099 vxge_vBIT(val, 27, 5)
1100#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
1101 vxge_vBIT(val, 35, 5)
1102#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
1103 vxge_vBIT(val, 43, 5)
1104#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
1105 vxge_vBIT(val, 51, 5)
1106#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
1107 vxge_vBIT(val, 59, 5)
1108/*0x00c20*/ u64 rx_w_round_robin_7;
1109#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
1110#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
1111 vxge_vBIT(val, 11, 5)
1112#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
1113 vxge_vBIT(val, 19, 5)
1114#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
1115 vxge_vBIT(val, 27, 5)
1116#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
1117 vxge_vBIT(val, 35, 5)
1118#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
1119 vxge_vBIT(val, 43, 5)
1120#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
1121 vxge_vBIT(val, 51, 5)
1122#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
1123 vxge_vBIT(val, 59, 5)
1124/*0x00c28*/ u64 rx_w_round_robin_8;
1125#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
1126#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
1127 vxge_vBIT(val, 11, 5)
1128#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
1129 vxge_vBIT(val, 19, 5)
1130#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
1131 vxge_vBIT(val, 27, 5)
1132#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
1133 vxge_vBIT(val, 35, 5)
1134#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
1135 vxge_vBIT(val, 43, 5)
1136#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
1137 vxge_vBIT(val, 51, 5)
1138#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
1139 vxge_vBIT(val, 59, 5)
1140/*0x00c30*/ u64 rx_w_round_robin_9;
1141#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
1142#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
1143 vxge_vBIT(val, 11, 5)
1144#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
1145 vxge_vBIT(val, 19, 5)
1146#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
1147 vxge_vBIT(val, 27, 5)
1148#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
1149 vxge_vBIT(val, 35, 5)
1150#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
1151 vxge_vBIT(val, 43, 5)
1152#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
1153 vxge_vBIT(val, 51, 5)
1154#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
1155 vxge_vBIT(val, 59, 5)
1156/*0x00c38*/ u64 rx_w_round_robin_10;
1157#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
1158 vxge_vBIT(val, 3, 5)
1159#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
1160 vxge_vBIT(val, 11, 5)
1161#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
1162 vxge_vBIT(val, 19, 5)
1163#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
1164 vxge_vBIT(val, 27, 5)
1165#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
1166 vxge_vBIT(val, 35, 5)
1167#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
1168 vxge_vBIT(val, 43, 5)
1169#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
1170 vxge_vBIT(val, 51, 5)
1171#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
1172 vxge_vBIT(val, 59, 5)
1173/*0x00c40*/ u64 rx_w_round_robin_11;
1174#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
1175 vxge_vBIT(val, 3, 5)
1176#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
1177 vxge_vBIT(val, 11, 5)
1178#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
1179 vxge_vBIT(val, 19, 5)
1180#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
1181 vxge_vBIT(val, 27, 5)
1182#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
1183 vxge_vBIT(val, 35, 5)
1184#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
1185 vxge_vBIT(val, 43, 5)
1186#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
1187 vxge_vBIT(val, 51, 5)
1188#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
1189 vxge_vBIT(val, 59, 5)
1190/*0x00c48*/ u64 rx_w_round_robin_12;
1191#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
1192 vxge_vBIT(val, 3, 5)
1193#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
1194 vxge_vBIT(val, 11, 5)
1195#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
1196 vxge_vBIT(val, 19, 5)
1197#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
1198 vxge_vBIT(val, 27, 5)
1199#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
1200 vxge_vBIT(val, 35, 5)
1201#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
1202 vxge_vBIT(val, 43, 5)
1203#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
1204 vxge_vBIT(val, 51, 5)
1205#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
1206 vxge_vBIT(val, 59, 5)
1207/*0x00c50*/ u64 rx_w_round_robin_13;
1208#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
1209 vxge_vBIT(val, 3, 5)
1210#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
1211 vxge_vBIT(val, 11, 5)
1212#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
1213 vxge_vBIT(val, 19, 5)
1214#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
1215 vxge_vBIT(val, 27, 5)
1216#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
1217 vxge_vBIT(val, 35, 5)
1218#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
1219 vxge_vBIT(val, 43, 5)
1220#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
1221 vxge_vBIT(val, 51, 5)
1222#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
1223 vxge_vBIT(val, 59, 5)
1224/*0x00c58*/ u64 rx_w_round_robin_14;
1225#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
1226 vxge_vBIT(val, 3, 5)
1227#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
1228 vxge_vBIT(val, 11, 5)
1229#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
1230 vxge_vBIT(val, 19, 5)
1231#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
1232 vxge_vBIT(val, 27, 5)
1233#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
1234 vxge_vBIT(val, 35, 5)
1235#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
1236 vxge_vBIT(val, 43, 5)
1237#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
1238 vxge_vBIT(val, 51, 5)
1239#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
1240 vxge_vBIT(val, 59, 5)
1241/*0x00c60*/ u64 rx_w_round_robin_15;
1242#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
1243 vxge_vBIT(val, 3, 5)
1244#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
1245 vxge_vBIT(val, 11, 5)
1246#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
1247 vxge_vBIT(val, 19, 5)
1248#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
1249 vxge_vBIT(val, 27, 5)
1250#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
1251 vxge_vBIT(val, 35, 5)
1252#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
1253 vxge_vBIT(val, 43, 5)
1254#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
1255 vxge_vBIT(val, 51, 5)
1256#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
1257 vxge_vBIT(val, 59, 5)
1258/*0x00c68*/ u64 rx_w_round_robin_16;
1259#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
1260 vxge_vBIT(val, 3, 5)
1261#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
1262 vxge_vBIT(val, 11, 5)
1263#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
1264 vxge_vBIT(val, 19, 5)
1265#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
1266 vxge_vBIT(val, 27, 5)
1267#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
1268 vxge_vBIT(val, 35, 5)
1269#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
1270 vxge_vBIT(val, 43, 5)
1271#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
1272 vxge_vBIT(val, 51, 5)
1273#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
1274 vxge_vBIT(val, 59, 5)
1275/*0x00c70*/ u64 rx_w_round_robin_17;
1276#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
1277 vxge_vBIT(val, 3, 5)
1278#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
1279 vxge_vBIT(val, 11, 5)
1280#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
1281 vxge_vBIT(val, 19, 5)
1282#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
1283 vxge_vBIT(val, 27, 5)
1284#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
1285 vxge_vBIT(val, 35, 5)
1286#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
1287 vxge_vBIT(val, 43, 5)
1288#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
1289 vxge_vBIT(val, 51, 5)
1290#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
1291 vxge_vBIT(val, 59, 5)
1292/*0x00c78*/ u64 rx_w_round_robin_18;
1293#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
1294 vxge_vBIT(val, 3, 5)
1295#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
1296 vxge_vBIT(val, 11, 5)
1297#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
1298 vxge_vBIT(val, 19, 5)
1299#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
1300 vxge_vBIT(val, 27, 5)
1301#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
1302 vxge_vBIT(val, 35, 5)
1303#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
1304 vxge_vBIT(val, 43, 5)
1305#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
1306 vxge_vBIT(val, 51, 5)
1307#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
1308 vxge_vBIT(val, 59, 5)
1309/*0x00c80*/ u64 rx_w_round_robin_19;
1310#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
1311 vxge_vBIT(val, 3, 5)
1312#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
1313 vxge_vBIT(val, 11, 5)
1314#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
1315 vxge_vBIT(val, 19, 5)
1316#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
1317 vxge_vBIT(val, 27, 5)
1318#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
1319 vxge_vBIT(val, 35, 5)
1320#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
1321 vxge_vBIT(val, 43, 5)
1322#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
1323 vxge_vBIT(val, 51, 5)
1324#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
1325 vxge_vBIT(val, 59, 5)
1326/*0x00c88*/ u64 rx_w_round_robin_20;
1327#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
1328 vxge_vBIT(val, 3, 5)
1329#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
1330 vxge_vBIT(val, 11, 5)
1331#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
1332 vxge_vBIT(val, 19, 5)
1333#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
1334 vxge_vBIT(val, 27, 5)
1335#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
1336 vxge_vBIT(val, 35, 5)
1337#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
1338 vxge_vBIT(val, 43, 5)
1339#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
1340 vxge_vBIT(val, 51, 5)
1341#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
1342 vxge_vBIT(val, 59, 5)
1343/*0x00c90*/ u64 rx_w_round_robin_21;
1344#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
1345 vxge_vBIT(val, 3, 5)
1346#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
1347 vxge_vBIT(val, 11, 5)
1348#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
1349 vxge_vBIT(val, 19, 5)
1350
1351#define VXGE_HW_WRR_RING_SERVICE_STATES 171
1352#define VXGE_HW_WRR_RING_COUNT 22
1353
1354/*0x00c98*/ u64 rx_queue_priority_0;
1355#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1356#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1357#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1358#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1359#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1360#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1361#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1362#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1363/*0x00ca0*/ u64 rx_queue_priority_1;
1364#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
1365#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
1366#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
1367#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
1368#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
1369#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
1370#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
1371#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
1372/*0x00ca8*/ u64 rx_queue_priority_2;
1373#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
1374 u8 unused00cc8[0x00cc8-0x00cb0];
1375
1376/*0x00cc8*/ u64 replication_queue_priority;
1377#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
1378 vxge_vBIT(val, 59, 5)
1379/*0x00cd0*/ u64 rx_queue_select;
1380#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
1381#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
1382#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
1383/*0x00cd8*/ u64 rqa_vpbp_ctrl;
1384#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
1385#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
1386#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
1387/*0x00ce0*/ u64 rx_multi_cast_ctrl;
1388#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
1389#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
1390#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
1391 vxge_vBIT(val, 2, 30)
1392#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
1393/*0x00ce8*/ u64 wde_prm_ctrl;
1394#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
1395#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
1396#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
1397#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
1398#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
1399/*0x00cf0*/ u64 noa_ctrl;
1400#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
1401#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
1402#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
1403#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
1404#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
1405#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
1406#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
1407/*0x00cf8*/ u64 phase_cfg;
1408#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
1409#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
1410#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
1411#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
1412#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
1413#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
1414#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
1415#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
1416#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
1417#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
1418#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
1419#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
1420/*0x00d00*/ u64 rcq_bypq_cfg;
1421#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
1422#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
1423#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
1424 u8 unused00e00[0x00e00-0x00d08];
1425
1426/*0x00e00*/ u64 doorbell_int_status;
1427#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
1428#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
1429/*0x00e08*/ u64 doorbell_int_mask;
1430/*0x00e10*/ u64 kdfc_err_reg;
1431#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1432#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1433#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1434#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1435#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1436/*0x00e18*/ u64 kdfc_err_mask;
1437/*0x00e20*/ u64 kdfc_err_reg_alarm;
1438#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
1439#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
1440#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
1441#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
1442#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
1443 u8 unused00e40[0x00e40-0x00e28];
1444/*0x00e40*/ u64 kdfc_vp_partition_0;
1445#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
1446#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
1447#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
1448#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
1449#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
1450/*0x00e48*/ u64 kdfc_vp_partition_1;
1451#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
1452#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
1453#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
1454#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
1455/*0x00e50*/ u64 kdfc_vp_partition_2;
1456#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
1457#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
1458#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
1459#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
1460/*0x00e58*/ u64 kdfc_vp_partition_3;
1461#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
1462#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
1463#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
1464#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
1465/*0x00e60*/ u64 kdfc_vp_partition_4;
1466#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
1467#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
1468/*0x00e68*/ u64 kdfc_vp_partition_5;
1469#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
1470#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
1471/*0x00e70*/ u64 kdfc_vp_partition_6;
1472#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
1473#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
1474/*0x00e78*/ u64 kdfc_vp_partition_7;
1475#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
1476#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
1477/*0x00e80*/ u64 kdfc_vp_partition_8;
1478#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
1479/*0x00e88*/ u64 kdfc_w_round_robin_0;
1480#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1481#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1482#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1483#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1484#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1485#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1486#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1487#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1488
1489 u8 unused0f28[0x0f28-0x0e90];
1490
1491/*0x00f28*/ u64 kdfc_w_round_robin_20;
1492#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1493#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1494#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1495#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1496#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1497#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1498#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1499#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1500
1501#define VXGE_HW_WRR_FIFO_COUNT 20
1502
1503 u8 unused0fc8[0x0fc8-0x0f30];
1504
1505/*0x00fc8*/ u64 kdfc_w_round_robin_40;
1506#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
1507#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
1508#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
1509#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
1510#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
1511#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
1512#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
1513#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
1514
1515 u8 unused1068[0x01068-0x0fd0];
1516
1517/*0x01068*/ u64 kdfc_entry_type_sel_0;
1518#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
1519#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
1520#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
1521#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
1522#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
1523#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
1524#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
1525#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
1526/*0x01070*/ u64 kdfc_entry_type_sel_1;
1527#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
1528/*0x01078*/ u64 kdfc_fifo_0_ctrl;
1529#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1530#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
1531#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
1532
1533 u8 unused1100[0x01100-0x1080];
1534
1535/*0x01100*/ u64 kdfc_fifo_17_ctrl;
1536#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
1537
1538 u8 unused1600[0x01600-0x1108];
1539
1540/*0x01600*/ u64 rxmac_int_status;
1541#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
1542#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
1543#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
1544 vxge_mBIT(11)
1545/*0x01608*/ u64 rxmac_int_mask;
1546 u8 unused01618[0x01618-0x01610];
1547
1548/*0x01618*/ u64 rxmac_gen_err_reg;
1549/*0x01620*/ u64 rxmac_gen_err_mask;
1550/*0x01628*/ u64 rxmac_gen_err_alarm;
1551/*0x01630*/ u64 rxmac_ecc_err_reg;
1552#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
1553 vxge_vBIT(val, 0, 4)
1554#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
1555 vxge_vBIT(val, 4, 4)
1556#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
1557 vxge_vBIT(val, 8, 4)
1558#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
1559 vxge_vBIT(val, 12, 4)
1560#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
1561 vxge_vBIT(val, 16, 4)
1562#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
1563 vxge_vBIT(val, 20, 4)
1564#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
1565 vxge_vBIT(val, 24, 2)
1566#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
1567 vxge_vBIT(val, 26, 2)
1568#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
1569 vxge_vBIT(val, 28, 2)
1570#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
1571 vxge_vBIT(val, 30, 2)
1572#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
1573#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
1574#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
1575#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
1576#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
1577#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
1578#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
1579#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
1580#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
1581 vxge_vBIT(val, 40, 7)
1582#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
1583 vxge_vBIT(val, 47, 7)
1584#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
1585 vxge_vBIT(val, 54, 3)
1586#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
1587 vxge_vBIT(val, 57, 3)
1588#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
1589 vxge_mBIT(60)
1590#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
1591 vxge_mBIT(61)
1592/*0x01638*/ u64 rxmac_ecc_err_mask;
1593/*0x01640*/ u64 rxmac_ecc_err_alarm;
1594/*0x01648*/ u64 rxmac_various_err_reg;
1595#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
1596#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
1597#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
1598#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
1599/*0x01650*/ u64 rxmac_various_err_mask;
1600/*0x01658*/ u64 rxmac_various_err_alarm;
1601/*0x01660*/ u64 rxmac_gen_cfg;
1602#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
1603/*0x01668*/ u64 rxmac_authorize_all_addr;
1604#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
1605/*0x01670*/ u64 rxmac_authorize_all_vid;
1606#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
1607 u8 unused016c0[0x016c0-0x01678];
1608
1609/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
1610#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1611#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1612#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1613#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1614#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1615#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1616#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1617#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1618#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
1619 u8 unused016e0[0x016e0-0x016c8];
1620
1621/*0x016e0*/ u64 rxmac_cfg0_port[3];
1622#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
1623#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
1624#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
1625#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
1626#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
1627#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
1628#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
1629#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
1630 u8 unused01710[0x01710-0x016f8];
1631
1632/*0x01710*/ u64 rxmac_cfg2_port[3];
1633#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
1634/*0x01728*/ u64 rxmac_pause_cfg_port[3];
1635#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
1636#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
1637#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
1638#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
1639#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
1640#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
1641#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
1642#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
1643#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
1644#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
1645 u8 unused01758[0x01758-0x01740];
1646
1647/*0x01758*/ u64 rxmac_red_cfg0_port[3];
1648#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
1649/*0x01770*/ u64 rxmac_red_cfg1_port[3];
1650#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
1651#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
1652/*0x01788*/ u64 rxmac_red_cfg2_port[3];
1653#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
1654/*0x017a0*/ u64 rxmac_link_util_port[3];
1655#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
1656 vxge_vBIT(val, 1, 7)
1657#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
1658#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
1659 vxge_vBIT(val, 12, 4)
1660#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
1661#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
1662 u8 unused017d0[0x017d0-0x017b8];
1663
1664/*0x017d0*/ u64 rxmac_status_port[3];
1665#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
1666 u8 unused01800[0x01800-0x017e8];
1667
1668/*0x01800*/ u64 rxmac_rx_pa_cfg0;
1669#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
1670#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
1671#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
1672#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
1673#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
1674#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
1675#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
1676#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
1677#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
1678#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
1679#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
1680#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
1681#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
1682#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
1683/*0x01808*/ u64 rxmac_rx_pa_cfg1;
1684#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
1685#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
1686#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
1687#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
1688#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
1689#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
1690 u8 unused01828[0x01828-0x01810];
1691
1692/*0x01828*/ u64 rts_mgr_cfg0;
1693#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
1694#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
1695#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
1696#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
1697#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
1698#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
1699#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
1700#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
1701#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
1702/*0x01830*/ u64 rts_mgr_cfg1;
1703#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
1704#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
1705/*0x01838*/ u64 rts_mgr_criteria_priority;
1706#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
1707#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
1708#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
1709#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
1710#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
1711#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
1712#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
1713#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
1714#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
1715/*0x01840*/ u64 rts_mgr_da_pause_cfg;
1716#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
1717/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
1718#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
1719 vxge_vBIT(val, 0, 17)
1720 u8 unused01890[0x01890-0x01850];
1721/*0x01890*/ u64 rts_mgr_cbasin_cfg;
1722 u8 unused01968[0x01968-0x01898];
1723
1724/*0x01968*/ u64 dbg_stat_rx_any_frms;
1725#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
1726#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
1727#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
1728 vxge_vBIT(val, 16, 8)
1729 u8 unused01a00[0x01a00-0x01970];
1730
1731/*0x01a00*/ u64 rxmac_red_rate_vp[17];
1732#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
1733#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
1734#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
1735#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
1736#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
1737#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
1738#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
1739#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
1740 u8 unused01e00[0x01e00-0x01a88];
1741
1742/*0x01e00*/ u64 xgmac_int_status;
1743#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
1744#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
1745 vxge_mBIT(7)
1746#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
1747 vxge_mBIT(11)
1748#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
1749#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
1750#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
1751/*0x01e08*/ u64 xgmac_int_mask;
1752/*0x01e10*/ u64 xmac_gen_err_reg;
1753#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
1754 vxge_mBIT(7)
1755#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
1756 vxge_mBIT(11)
1757#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
1758#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
1759 vxge_mBIT(19)
1760#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
1761 vxge_mBIT(23)
1762#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
1763#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
1764#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
1765 vxge_vBIT(val, 40, 2)
1766#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
1767 vxge_vBIT(val, 42, 2)
1768#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
1769 vxge_vBIT(val, 44, 2)
1770#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
1771 vxge_vBIT(val, 46, 2)
1772#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
1773 vxge_vBIT(val, 48, 2)
1774#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
1775 vxge_vBIT(val, 50, 2)
1776#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
1777 vxge_vBIT(val, 52, 2)
1778#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
1779 vxge_vBIT(val, 54, 2)
1780#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
1781 vxge_vBIT(val, 56, 2)
1782#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
1783 vxge_vBIT(val, 58, 2)
1784#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
1785/*0x01e18*/ u64 xmac_gen_err_mask;
1786/*0x01e20*/ u64 xmac_gen_err_alarm;
1787/*0x01e28*/ u64 xmac_link_err_port_reg[2];
1788#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
1789#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
1790#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
1791#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
1792#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
1793 vxge_mBIT(19)
1794#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
1795#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
1796#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
1797#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
1798#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
1799#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
1800 vxge_mBIT(47)
1801/*0x01e30*/ u64 xmac_link_err_port_mask[2];
1802/*0x01e38*/ u64 xmac_link_err_port_alarm[2];
1803/*0x01e58*/ u64 xgxs_gen_err_reg;
1804#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
1805/*0x01e60*/ u64 xgxs_gen_err_mask;
1806/*0x01e68*/ u64 xgxs_gen_err_alarm;
1807/*0x01e70*/ u64 asic_ntwk_err_reg;
1808#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
1809#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
1810#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
1811#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
1812#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
1813#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
1814/*0x01e78*/ u64 asic_ntwk_err_mask;
1815/*0x01e80*/ u64 asic_ntwk_err_alarm;
1816/*0x01e88*/ u64 asic_gpio_err_reg;
1817#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
1818/*0x01e90*/ u64 asic_gpio_err_mask;
1819/*0x01e98*/ u64 asic_gpio_err_alarm;
1820/*0x01ea0*/ u64 xgmac_gen_status;
1821#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
1822#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
1823/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
1824#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
1825 vxge_vBIT(val, 0, 17)
1826/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
1827#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
1828/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
1829#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
1830 vxge_vBIT(val, 0, 17)
1831/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
1832#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
1833 u8 unused01f40[0x01f40-0x01ed0];
1834
1835/*0x01f40*/ u64 xmac_gen_cfg;
1836#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
1837#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
1838#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
1839#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
1840#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
1841/*0x01f48*/ u64 xmac_timestamp;
1842#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
1843#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
1844#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
1845#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
1846#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
1847/*0x01f50*/ u64 xmac_stats_gen_cfg;
1848#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
1849#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
1850#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
1851/*0x01f58*/ u64 xmac_stats_sys_cmd;
1852#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
1853#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
1854#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
1855#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
1856/*0x01f60*/ u64 xmac_stats_sys_data;
1857#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
1858 u8 unused01f80[0x01f80-0x01f68];
1859
1860/*0x01f80*/ u64 asic_ntwk_ctrl;
1861#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
1862#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
1863#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
1864/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
1865#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
1866/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
1867#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
1868/*0x01f98*/ u64 xmac_cfg_port[3];
1869#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
1870#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
1871#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
1872#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
1873/*0x01fb0*/ u64 xmac_station_addr_port[2];
1874#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
1875 u8 unused02020[0x02020-0x01fc0];
1876
1877/*0x02020*/ u64 lag_cfg;
1878#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
1879#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
1880#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
1881#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
1882#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
1883/*0x02028*/ u64 lag_status;
1884#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
1885#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
1886 vxge_vBIT(val, 8, 8)
1887/*0x02030*/ u64 lag_active_passive_cfg;
1888#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
1889#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
1890#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
1891#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
1892#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
1893#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
1894 vxge_vBIT(val, 32, 16)
1895 u8 unused02040[0x02040-0x02038];
1896
1897/*0x02040*/ u64 lag_lacp_cfg;
1898#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
1899#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
1900#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
1901#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
1902/*0x02048*/ u64 lag_timer_cfg_1;
1903#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
1904#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
1905#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
1906#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
1907/*0x02050*/ u64 lag_timer_cfg_2;
1908#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
1909#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
1910#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
1911#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
1912/*0x02058*/ u64 lag_sys_id;
1913#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1914#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
1915#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
1916/*0x02060*/ u64 lag_sys_cfg;
1917#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1918 u8 unused02070[0x02070-0x02068];
1919
1920/*0x02070*/ u64 lag_aggr_addr_cfg[2];
1921#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
1922#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
1923#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
1924/*0x02080*/ u64 lag_aggr_id_cfg[2];
1925#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
1926/*0x02090*/ u64 lag_aggr_admin_key[2];
1927#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1928/*0x020a0*/ u64 lag_aggr_alt_admin_key;
1929#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
1930#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
1931/*0x020a8*/ u64 lag_aggr_oper_key[2];
1932#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
1933/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
1934#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
1935/*0x020c8*/ u64 lag_aggr_partner_info[2];
1936#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1937#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
1938 vxge_vBIT(val, 16, 16)
1939/*0x020d8*/ u64 lag_aggr_state[2];
1940#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
1941#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
1942#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
1943#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
1944 u8 unused020f0[0x020f0-0x020e8];
1945
1946/*0x020f0*/ u64 lag_port_cfg[2];
1947#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
1948#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
1949#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
1950#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
1951/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
1952#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
1953#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
1954#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
1955#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
1956/*0x02110*/ u64 lag_port_actor_admin_state[2];
1957#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
1958#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
1959#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
1960#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
1961#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
1962#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
1963#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
1964#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
1965/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
1966#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
1967/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
1968#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
1969#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
1970#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
1971 vxge_vBIT(val, 32, 16)
1972#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
1973 vxge_vBIT(val, 48, 16)
1974/*0x02140*/ u64 lag_port_partner_admin_state[2];
1975#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
1976#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
1977#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
1978#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
1979#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
1980#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
1981#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
1982#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
1983/*0x02150*/ u64 lag_port_to_aggr[2];
1984#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
1985#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
1986/*0x02160*/ u64 lag_port_actor_oper_key[2];
1987#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
1988/*0x02170*/ u64 lag_port_actor_oper_state[2];
1989#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
1990#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
1991#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
1992#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
1993#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
1994#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
1995#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
1996#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
1997/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
1998#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
1999 vxge_vBIT(val, 0, 48)
2000/*0x02190*/ u64 lag_port_partner_oper_info[2];
2001#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
2002 vxge_vBIT(val, 0, 16)
2003#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
2004 vxge_vBIT(val, 16, 16)
2005#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
2006 vxge_vBIT(val, 32, 16)
2007#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
2008 vxge_vBIT(val, 48, 16)
2009/*0x021a0*/ u64 lag_port_partner_oper_state[2];
2010#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
2011#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
2012#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
2013#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
2014 vxge_mBIT(15)
2015#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
2016#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
2017#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
2018#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
2019/*0x021b0*/ u64 lag_port_state_vars[2];
2020#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
2021#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
2022#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
2023#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
2024#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
2025#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
2026#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
2027#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
2028#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
2029#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
2030 vxge_mBIT(32)
2031#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
2032 vxge_mBIT(33)
2033#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
2034#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
2035#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
2036#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
2037 vxge_vBIT(val, 41, 3)
2038#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
2039#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
2040#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
2041#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
2042 vxge_vBIT(val, 56, 4)
2043#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
2044 vxge_vBIT(val, 60, 4)
2045/*0x021c0*/ u64 lag_port_timer_cntr[2];
2046#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
2047#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
2048 vxge_vBIT(val, 8, 8)
2049#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
2050#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
2051#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
2052 vxge_vBIT(val, 32, 8)
2053#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
2054 vxge_vBIT(val, 40, 8)
2055#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
2056 vxge_vBIT(val, 48, 8)
2057#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
2058 vxge_vBIT(val, 56, 8)
2059 u8 unused02208[0x02700-0x021d0];
2060
2061/*0x02700*/ u64 rtdma_int_status;
2062#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
2063#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
2064#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
2065#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
2066/*0x02708*/ u64 rtdma_int_mask;
2067/*0x02710*/ u64 pda_alarm_reg;
2068#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
2069#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
2070/*0x02718*/ u64 pda_alarm_mask;
2071/*0x02720*/ u64 pda_alarm_alarm;
2072/*0x02728*/ u64 pcc_error_reg;
2073#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
2074#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
2075#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
2076#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
2077#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
2078#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
2079/*0x02730*/ u64 pcc_error_mask;
2080/*0x02738*/ u64 pcc_error_alarm;
2081/*0x02740*/ u64 lso_error_reg;
2082#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
2083#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
2084/*0x02748*/ u64 lso_error_mask;
2085/*0x02750*/ u64 lso_error_alarm;
2086/*0x02758*/ u64 sm_error_reg;
2087#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
2088/*0x02760*/ u64 sm_error_mask;
2089/*0x02768*/ u64 sm_error_alarm;
2090
2091 u8 unused027a8[0x027a8-0x02770];
2092
2093/*0x027a8*/ u64 txd_ownership_ctrl;
2094#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
2095/*0x027b0*/ u64 pcc_cfg;
2096#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
2097#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
2098/*0x027b8*/ u64 pcc_control;
2099#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
2100#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
2101#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
2102/*0x027c0*/ u64 pda_status1;
2103#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
2104#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
2105#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
2106#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
2107#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
2108#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
2109#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
2110#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
2111/*0x027c8*/ u64 rtdma_bw_timer;
2112#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
2113
2114 u8 unused02900[0x02900-0x027d0];
2115/*0x02900*/ u64 g3cmct_int_status;
2116#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
2117/*0x02908*/ u64 g3cmct_int_mask;
2118/*0x02910*/ u64 g3cmct_err_reg;
2119#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
2120#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
2121#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
2122#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
2123#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
2124#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
2125#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
2126/*0x02918*/ u64 g3cmct_err_mask;
2127/*0x02920*/ u64 g3cmct_err_alarm;
2128 u8 unused03000[0x03000-0x02928];
2129
2130/*0x03000*/ u64 mc_int_status;
2131#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
2132#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
2133#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
2134#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
2135/*0x03008*/ u64 mc_int_mask;
2136/*0x03010*/ u64 mc_err_reg;
2137#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
2138#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
2139#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
2140#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
2141#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
2142#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
2143#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
2144#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
2145#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
2146#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
2147#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
2148/*0x03018*/ u64 mc_err_mask;
2149/*0x03020*/ u64 mc_err_alarm;
2150/*0x03028*/ u64 grocrc_alarm_reg;
2151#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
2152#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
2153/*0x03030*/ u64 grocrc_alarm_mask;
2154/*0x03038*/ u64 grocrc_alarm_alarm;
2155 u8 unused03100[0x03100-0x03040];
2156
2157/*0x03100*/ u64 rx_thresh_cfg_repl;
2158#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
2159#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
2160#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
2161#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
2162#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
2163#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
2164#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
2165#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
2166 u8 unused033b8[0x033b8-0x03108];
2167
2168/*0x033b8*/ u64 fbmc_ecc_cfg;
2169#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
2170 u8 unused03400[0x03400-0x033c0];
2171
2172/*0x03400*/ u64 pcipif_int_status;
2173#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
2174#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
2175#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
2176#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
2177#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
2178 vxge_mBIT(19)
2179/*0x03408*/ u64 pcipif_int_mask;
2180/*0x03410*/ u64 dbecc_err_reg;
2181#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
2182#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
2183#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
2184#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
2185#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
2186#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
2187/*0x03418*/ u64 dbecc_err_mask;
2188/*0x03420*/ u64 dbecc_err_alarm;
2189/*0x03428*/ u64 sbecc_err_reg;
2190#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
2191#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
2192#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
2193#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
2194#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
2195#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
2196/*0x03430*/ u64 sbecc_err_mask;
2197/*0x03438*/ u64 sbecc_err_alarm;
2198/*0x03440*/ u64 general_err_reg;
2199#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
2200#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
2201#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
2202#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
2203#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
2204#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
2205#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
2206/*0x03448*/ u64 general_err_mask;
2207/*0x03450*/ u64 general_err_alarm;
2208/*0x03458*/ u64 srpcim_msg_reg;
2209#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
2210 vxge_mBIT(0)
2211#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
2212 vxge_mBIT(1)
2213#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
2214 vxge_mBIT(2)
2215#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
2216 vxge_mBIT(3)
2217#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
2218 vxge_mBIT(4)
2219#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
2220 vxge_mBIT(5)
2221#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
2222 vxge_mBIT(6)
2223#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
2224 vxge_mBIT(7)
2225#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
2226 vxge_mBIT(8)
2227#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
2228 vxge_mBIT(9)
2229#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
2230 vxge_mBIT(10)
2231#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
2232 vxge_mBIT(11)
2233#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
2234 vxge_mBIT(12)
2235#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
2236 vxge_mBIT(13)
2237#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
2238 vxge_mBIT(14)
2239#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
2240 vxge_mBIT(15)
2241#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
2242 vxge_mBIT(16)
2243/*0x03460*/ u64 srpcim_msg_mask;
2244/*0x03468*/ u64 srpcim_msg_alarm;
2245 u8 unused03600[0x03600-0x03470];
2246
2247/*0x03600*/ u64 gcmg1_int_status;
2248#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
2249#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
2250#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
2251#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
2252#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
2253#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
2254#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
2255#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
2256#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
2257/*0x03608*/ u64 gcmg1_int_mask;
2258 u8 unused03a00[0x03a00-0x03610];
2259
2260/*0x03a00*/ u64 pcmg1_int_status;
2261#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
2262#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
2263#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
2264#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
2265/*0x03a08*/ u64 pcmg1_int_mask;
2266 u8 unused04000[0x04000-0x03a10];
2267
2268/*0x04000*/ u64 one_int_status;
2269#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
2270#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
2271 vxge_mBIT(13)
2272#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
2273 vxge_mBIT(14)
2274#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
2275#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
2276#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
2277#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
2278#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
2279#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
2280/*0x04008*/ u64 one_int_mask;
2281 u8 unused04818[0x04818-0x04010];
2282
2283/*0x04818*/ u64 noa_wct_ctrl;
2284#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
2285/*0x04820*/ u64 rc_cfg2;
2286#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
2287#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
2288#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
2289#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
2290/*0x04828*/ u64 rc_cfg3;
2291#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
2292/*0x04830*/ u64 rx_multi_cast_ctrl1;
2293#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
2294#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
2295/*0x04838*/ u64 rxdm_dbg_rd;
2296#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
2297#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
2298/*0x04840*/ u64 rxdm_dbg_rd_data;
2299#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
2300/*0x04848*/ u64 rqa_top_prty_for_vh[17];
2301#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
2302 vxge_vBIT(val, 59, 5)
2303 u8 unused04900[0x04900-0x048d0];
2304
2305/*0x04900*/ u64 tim_status;
2306#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
2307/*0x04908*/ u64 tim_ecc_enable;
2308#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
2309#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
2310#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
2311/*0x04910*/ u64 tim_bp_ctrl;
2312#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
2313#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
2314#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
2315/*0x04918*/ u64 tim_resource_assignment_vh[17];
2316#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
2317/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
2318#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
2319 u8 unused04b00[0x04b00-0x04a28];
2320
2321/*0x04b00*/ u64 gcmg2_int_status;
2322#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
2323#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
2324#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
2325/*0x04b08*/ u64 gcmg2_int_mask;
2326/*0x04b10*/ u64 gxtmc_err_reg;
2327#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
2328#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
2329#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
2330#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
2331#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
2332#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
2333#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
2334#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
2335#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
2336#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
2337#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
2338#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
2339#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
2340#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
2341#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
2342#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
2343 vxge_mBIT(21)
2344#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
2345 vxge_mBIT(22)
2346#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
2347#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
2348 vxge_mBIT(24)
2349#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
2350 vxge_mBIT(25)
2351#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
2352#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
2353#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
2354#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
2355#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
2356#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
2357#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
2358#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
2359#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
2360#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
2361/*0x04b18*/ u64 gxtmc_err_mask;
2362/*0x04b20*/ u64 gxtmc_err_alarm;
2363/*0x04b28*/ u64 cmc_err_reg;
2364#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
2365/*0x04b30*/ u64 cmc_err_mask;
2366/*0x04b38*/ u64 cmc_err_alarm;
2367/*0x04b40*/ u64 gcp_err_reg;
2368#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
2369#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
2370#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
2371#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
2372/*0x04b48*/ u64 gcp_err_mask;
2373/*0x04b50*/ u64 gcp_err_alarm;
2374 u8 unused04f00[0x04f00-0x04b58];
2375
2376/*0x04f00*/ u64 pcmg2_int_status;
2377#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
2378#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
2379#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
2380/*0x04f08*/ u64 pcmg2_int_mask;
2381/*0x04f10*/ u64 pxtmc_err_reg;
2382#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
2383#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
2384#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
2385#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
2386#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
2387#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
2388#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
2389#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
2390#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
2391#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
2392#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
2393#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
2394#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
2395#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
2396#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
2397#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
2398#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
2399#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
2400#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
2401#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
2402#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
2403#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
2404#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
2405#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
2406#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
2407#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
2408#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
2409#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
2410#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
2411#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
2412#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
2413#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
2414#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
2415#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
2416#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
2417#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
2418#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
2419#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
2420#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
2421#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
2422#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
2423#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
2424#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
2425#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
2426#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
2427#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
2428#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
2429#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
2430#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
2431#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
2432#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
2433#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
2434#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
2435#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
2436#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
2437#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
2438/*0x04f18*/ u64 pxtmc_err_mask;
2439/*0x04f20*/ u64 pxtmc_err_alarm;
2440/*0x04f28*/ u64 cp_err_reg;
2441#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
2442#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
2443#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
2444#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
2445#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
2446#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
2447#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
2448#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
2449#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
2450#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
2451#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
2452#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
2453#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
2454#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
2455#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
2456#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
2457#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
2458#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
2459#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
2460#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
2461#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
2462#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
2463#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
2464#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
2465#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
2466#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
2467#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
2468#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
2469#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
2470#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
2471#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
2472#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
2473/*0x04f30*/ u64 cp_err_mask;
2474/*0x04f38*/ u64 cp_err_alarm;
2475 u8 unused04fe8[0x04f50-0x04f40];
2476
2477/*0x04f50*/ u64 cp_exc_reg;
2478#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
2479#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
2480#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
2481/*0x04f58*/ u64 cp_exc_mask;
2482/*0x04f60*/ u64 cp_exc_alarm;
2483/*0x04f68*/ u64 cp_exc_cause;
2484#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
2485 u8 unused05200[0x05200-0x04f70];
2486
2487/*0x05200*/ u64 msg_int_status;
2488#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
2489#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
2490#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
2491#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
2492#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
2493/*0x05208*/ u64 msg_int_mask;
2494/*0x05210*/ u64 tim_err_reg;
2495#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
2496#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
2497#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
2498#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
2499#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
2500#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
2501#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
2502#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
2503#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
2504#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
2505#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
2506#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
2507#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
2508#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
2509#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
2510/*0x05218*/ u64 tim_err_mask;
2511/*0x05220*/ u64 tim_err_alarm;
2512/*0x05228*/ u64 msg_err_reg;
2513#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
2514#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
2515#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
2516 vxge_mBIT(2)
2517#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
2518 vxge_mBIT(3)
2519#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
2520#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
2521#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
2522#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
2523#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
2524#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
2525#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
2526#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
2527#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
2528#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
2529#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
2530#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
2531#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
2532#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
2533#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
2534#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
2535#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
2536#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
2537#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
2538#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
2539#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
2540#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
2541 vxge_mBIT(36)
2542#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
2543#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
2544#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
2545#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
2546#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
2547#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
2548#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
2549#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
2550#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
2551#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
2552#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
2553#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
2554#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
2555#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
2556#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
2557#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
2558#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
2559#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
2560#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
2561#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
2562#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
2563#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
2564/*0x05230*/ u64 msg_err_mask;
2565/*0x05238*/ u64 msg_err_alarm;
2566 u8 unused05340[0x05340-0x05240];
2567
2568/*0x05340*/ u64 msg_exc_reg;
2569#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
2570#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
2571#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
2572#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
2573#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
2574#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
2575/*0x05348*/ u64 msg_exc_mask;
2576/*0x05350*/ u64 msg_exc_alarm;
2577/*0x05358*/ u64 msg_exc_cause;
2578#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
2579#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
2580 u8 unused05368[0x05380-0x05360];
2581
2582/*0x05380*/ u64 msg_err2_reg;
2583#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2584 vxge_mBIT(0)
2585#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
2586 vxge_mBIT(1)
2587#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
2588 vxge_mBIT(2)
2589#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
2590 vxge_mBIT(3)
2591#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
2592#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
2593 vxge_mBIT(5)
2594#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
2595#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
2596#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
2597#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
2598#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
2599#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
2600#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
2601 vxge_mBIT(12)
2602#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
2603 vxge_mBIT(13)
2604#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
2605 vxge_mBIT(14)
2606#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
2607 vxge_mBIT(15)
2608#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
2609 vxge_mBIT(16)
2610#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
2611 vxge_mBIT(17)
2612#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
2613 vxge_mBIT(18)
2614#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
2615 vxge_mBIT(19)
2616#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
2617 vxge_mBIT(20)
2618#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
2619 vxge_mBIT(21)
2620#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
2621 vxge_mBIT(22)
2622#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
2623 vxge_mBIT(23)
2624#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
2625 vxge_mBIT(24)
2626#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
2627 vxge_mBIT(25)
2628#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
2629 vxge_mBIT(26)
2630#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
2631 vxge_mBIT(27)
2632#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
2633 vxge_mBIT(28)
2634#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
2635#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2636 vxge_mBIT(30)
2637#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2638 vxge_mBIT(31)
2639#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
2640 vxge_mBIT(32)
2641#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
2642#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
2643#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
2644#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
2645/*0x05388*/ u64 msg_err2_mask;
2646/*0x05390*/ u64 msg_err2_alarm;
2647/*0x05398*/ u64 msg_err3_reg;
2648#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
2649#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
2650#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
2651#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
2652#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
2653#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
2654#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
2655#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
2656#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
2657#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
2658#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
2659#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
2660#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
2661#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
2662#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
2663#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
2664#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
2665#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
2666#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
2667#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
2668#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
2669#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
2670#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
2671#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
2672#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
2673#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
2674#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
2675#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
2676#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
2677#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
2678#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
2679#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
2680#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
2681#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
2682#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
2683#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
2684#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
2685#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
2686#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
2687#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
2688/*0x053a0*/ u64 msg_err3_mask;
2689/*0x053a8*/ u64 msg_err3_alarm;
2690 u8 unused05600[0x05600-0x053b0];
2691
2692/*0x05600*/ u64 fau_gen_err_reg;
2693#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
2694#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
2695#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
2696#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
2697/*0x05608*/ u64 fau_gen_err_mask;
2698/*0x05610*/ u64 fau_gen_err_alarm;
2699/*0x05618*/ u64 fau_ecc_err_reg;
2700#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
2701#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
2702#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
2703 vxge_vBIT(val, 2, 2)
2704#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
2705 vxge_vBIT(val, 4, 2)
2706#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
2707#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
2708#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
2709 vxge_vBIT(val, 8, 2)
2710#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
2711 vxge_vBIT(val, 10, 2)
2712#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
2713#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
2714#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
2715 vxge_vBIT(val, 14, 2)
2716#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
2717 vxge_vBIT(val, 16, 2)
2718#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
2719 vxge_vBIT(val, 18, 2)
2720#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
2721 vxge_vBIT(val, 20, 2)
2722#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
2723/*0x05620*/ u64 fau_ecc_err_mask;
2724/*0x05628*/ u64 fau_ecc_err_alarm;
2725 u8 unused05658[0x05658-0x05630];
2726/*0x05658*/ u64 fau_pa_cfg;
2727#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
2728#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
2729#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
2730 u8 unused05668[0x05668-0x05660];
2731
2732/*0x05668*/ u64 dbg_stats_fau_rx_path;
2733#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
2734 vxge_vBIT(val, 32, 32)
2735 u8 unused056c0[0x056c0-0x05670];
2736
2737/*0x056c0*/ u64 fau_lag_cfg;
2738#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
2739#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
2740 u8 unused05800[0x05800-0x056c8];
2741
2742/*0x05800*/ u64 tpa_int_status;
2743#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
2744#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
2745#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
2746/*0x05808*/ u64 tpa_int_mask;
2747/*0x05810*/ u64 orp_err_reg;
2748#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
2749#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
2750#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
2751#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
2752#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
2753#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
2754#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
2755#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
2756#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
2757#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
2758#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
2759#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
2760/*0x05818*/ u64 orp_err_mask;
2761/*0x05820*/ u64 orp_err_alarm;
2762/*0x05828*/ u64 ptm_alarm_reg;
2763#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
2764#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
2765#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
2766#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
2767#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
2768#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
2769/*0x05830*/ u64 ptm_alarm_mask;
2770/*0x05838*/ u64 ptm_alarm_alarm;
2771/*0x05840*/ u64 tpa_error_reg;
2772#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
2773#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
2774#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
2775/*0x05848*/ u64 tpa_error_mask;
2776/*0x05850*/ u64 tpa_error_alarm;
2777/*0x05858*/ u64 tpa_global_cfg;
2778#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
2779#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
2780 u8 unused05868[0x05870-0x05860];
2781
2782/*0x05870*/ u64 ptm_ecc_cfg;
2783#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
2784/*0x05878*/ u64 ptm_phase_cfg;
2785#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
2786#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
2787 u8 unused05898[0x05898-0x05880];
2788
2789/*0x05898*/ u64 dbg_stats_tpa_tx_path;
2790#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
2791 vxge_vBIT(val, 32, 32)
2792 u8 unused05900[0x05900-0x058a0];
2793
2794/*0x05900*/ u64 tmac_int_status;
2795#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
2796#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
2797/*0x05908*/ u64 tmac_int_mask;
2798/*0x05910*/ u64 txmac_gen_err_reg;
2799#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
2800#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
2801/*0x05918*/ u64 txmac_gen_err_mask;
2802/*0x05920*/ u64 txmac_gen_err_alarm;
2803/*0x05928*/ u64 txmac_ecc_err_reg;
2804#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
2805#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
2806#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
2807#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
2808#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
2809#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
2810#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
2811#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
2812#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
2813#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
2814/*0x05930*/ u64 txmac_ecc_err_mask;
2815/*0x05938*/ u64 txmac_ecc_err_alarm;
2816 u8 unused05978[0x05978-0x05940];
2817
2818/*0x05978*/ u64 dbg_stat_tx_any_frms;
2819#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
2820#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
2821#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
2822 vxge_vBIT(val, 16, 8)
2823 u8 unused059a0[0x059a0-0x05980];
2824
2825/*0x059a0*/ u64 txmac_link_util_port[3];
2826#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
2827 vxge_vBIT(val, 1, 7)
2828#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
2829#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
2830 vxge_vBIT(val, 12, 4)
2831#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
2832#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
2833/*0x059b8*/ u64 txmac_cfg0_port[3];
2834#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
2835#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
2836#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
2837/*0x059d0*/ u64 txmac_cfg1_port[3];
2838#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
2839/*0x059e8*/ u64 txmac_status_port[3];
2840#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
2841 u8 unused05a20[0x05a20-0x05a00];
2842
2843/*0x05a20*/ u64 lag_distrib_dest;
2844#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
2845/*0x05a28*/ u64 lag_marker_cfg;
2846#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
2847#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
2848#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
2849#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
2850 vxge_vBIT(val, 32, 16)
2851#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
2852/*0x05a30*/ u64 lag_tx_cfg;
2853#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
2854#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
2855#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
2856#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
2857/*0x05a38*/ u64 lag_tx_status;
2858#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
2859 vxge_vBIT(val, 0, 8)
2860#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
2861 vxge_vBIT(val, 8, 8)
2862#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
2863 vxge_vBIT(val, 16, 8)
2864 u8 unused05d48[0x05d48-0x05a40];
2865
2866/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
2867#define \
2868VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
2869 vxge_vBIT(val, 0, 64)
2870 u8 unused06420[0x06420-0x05dd0];
2871
2872/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
2873#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
2874 vxge_vBIT(val, 0, 64)
2875/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
2876
2877/*0x06530*/ u64 debug_stats0;
2878#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
2879#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
2880/*0x06538*/ u64 debug_stats1;
2881#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
2882#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
2883/*0x06540*/ u64 debug_stats2;
2884#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
2885/*0x06548*/ u64 debug_stats3_vplane[17];
2886#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
2887#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
2888#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
2889/*0x065d0*/ u64 debug_stats4_vplane[17];
2890#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
2891#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
2892#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
2893
2894 u8 unused07000[0x07000-0x06658];
2895
2896/*0x07000*/ u64 mrpcim_general_int_status;
2897#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
2898#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
2899#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
2900#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
2901#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
2902#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
2903#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
2904#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
2905#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
2906#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
2907#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
2908#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
2909#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
2910#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
2911#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
2912#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
2913#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
2914#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
2915#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
2916#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
2917#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
2918#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
2919#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
2920/*0x07008*/ u64 mrpcim_general_int_mask;
2921#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
2922#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
2923#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
2924#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
2925#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
2926#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
2927#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
2928#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
2929#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
2930#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
2931#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
2932#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
2933#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
2934#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
2935#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
2936#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
2937#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
2938#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
2939#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
2940#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
2941#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
2942#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
2943#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
2944/*0x07010*/ u64 mrpcim_ppif_int_status;
2945#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
2946#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
2947#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
2948#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
2949#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
2950#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
2951#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
2952 vxge_mBIT(31)
2953#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
2954 vxge_mBIT(32)
2955#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
2956 vxge_mBIT(33)
2957#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
2958 vxge_mBIT(34)
2959#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
2960 vxge_mBIT(35)
2961#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
2962 vxge_mBIT(36)
2963#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
2964 vxge_mBIT(37)
2965#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
2966 vxge_mBIT(38)
2967#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
2968 vxge_mBIT(39)
2969#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
2970 vxge_mBIT(40)
2971#define \
2972VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
2973 vxge_mBIT(41)
2974#define \
2975VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
2976 vxge_mBIT(42)
2977#define \
2978VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
2979 vxge_mBIT(43)
2980#define \
2981VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
2982 vxge_mBIT(44)
2983#define \
2984VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
2985 vxge_mBIT(45)
2986#define \
2987VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
2988 vxge_mBIT(46)
2989#define \
2990VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
2991 vxge_mBIT(47)
2992#define \
2993VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
2994 vxge_mBIT(55)
2995/*0x07018*/ u64 mrpcim_ppif_int_mask;
2996 u8 unused07028[0x07028-0x07020];
2997
2998/*0x07028*/ u64 ini_errors_reg;
2999#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
3000#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
3001#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
3002#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
3003#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
3004#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
3005#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
3006#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
3007#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
3008#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
3009#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
3010#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
3011#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
3012#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
3013#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
3014#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
3015#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
3016/*0x07030*/ u64 ini_errors_mask;
3017/*0x07038*/ u64 ini_errors_alarm;
3018/*0x07040*/ u64 dma_errors_reg;
3019#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
3020#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
3021#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
3022#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
3023#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
3024#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
3025#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
3026#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
3027#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
3028#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
3029#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
3030#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
3031#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
3032#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
3033#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
3034#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
3035#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
3036#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
3037#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
3038#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
3039#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
3040#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
3041#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
3042#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
3043#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
3044/*0x07048*/ u64 dma_errors_mask;
3045/*0x07050*/ u64 dma_errors_alarm;
3046/*0x07058*/ u64 tgt_errors_reg;
3047#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
3048#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
3049#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
3050#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
3051#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
3052#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
3053#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
3054#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
3055#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
3056#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
3057#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
3058#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
3059#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
3060#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
3061#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
3062#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
3063#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
3064#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
3065#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
3066#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
3067#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
3068#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
3069/*0x07060*/ u64 tgt_errors_mask;
3070/*0x07068*/ u64 tgt_errors_alarm;
3071/*0x07070*/ u64 config_errors_reg;
3072#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
3073#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
3074#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
3075#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
3076#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
3077#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
3078#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
3079#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
3080#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
3081#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
3082#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
3083#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
3084#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
3085#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
3086#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
3087#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
3088/*0x07078*/ u64 config_errors_mask;
3089/*0x07080*/ u64 config_errors_alarm;
3090 u8 unused07090[0x07090-0x07088];
3091
3092/*0x07090*/ u64 crdt_errors_reg;
3093#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
3094#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
3095 vxge_mBIT(15)
3096#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
3097#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
3098 vxge_mBIT(23)
3099#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
3100#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
3101#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
3102#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
3103 vxge_mBIT(47)
3104/*0x07098*/ u64 crdt_errors_mask;
3105/*0x070a0*/ u64 crdt_errors_alarm;
3106 u8 unused070b0[0x070b0-0x070a8];
3107
3108/*0x070b0*/ u64 mrpcim_general_errors_reg;
3109#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
3110#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
3111#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
3112#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
3113#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
3114#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
3115#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
3116#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
3117#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
3118#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
3119#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
3120#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
3121 vxge_mBIT(47)
3122#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
3123/*0x070b8*/ u64 mrpcim_general_errors_mask;
3124/*0x070c0*/ u64 mrpcim_general_errors_alarm;
3125 u8 unused070d0[0x070d0-0x070c8];
3126
3127/*0x070d0*/ u64 pll_errors_reg;
3128#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
3129#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
3130#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
3131/*0x070d8*/ u64 pll_errors_mask;
3132/*0x070e0*/ u64 pll_errors_alarm;
3133/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
3134#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
3135 vxge_vBIT(val, 0, 17)
3136/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
3137/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
3138/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
3139#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
3140 vxge_vBIT(val, 0, 17)
3141/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
3142/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
3143 u8 unused07128[0x07128-0x07118];
3144
3145/*0x07128*/ u64 crdt_errors_vplane_reg[17];
3146#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
3147 vxge_mBIT(3)
3148#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
3149 vxge_mBIT(7)
3150#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
3151 vxge_mBIT(11)
3152#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
3153 vxge_mBIT(15)
3154#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
3155 vxge_mBIT(19)
3156#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
3157 vxge_mBIT(23)
3158#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
3159 vxge_mBIT(27)
3160#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
3161 vxge_mBIT(31)
3162/*0x07130*/ u64 crdt_errors_vplane_mask[17];
3163/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
3164 u8 unused072f0[0x072f0-0x072c0];
3165
3166/*0x072f0*/ u64 mrpcim_rst_in_prog;
3167#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
3168/*0x072f8*/ u64 mrpcim_reg_modified;
3169#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
3170
3171 u8 unused07378[0x07378-0x07300];
3172
3173/*0x07378*/ u64 write_arb_pending;
3174#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
3175#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
3176#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
3177#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
3178#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
3179/*0x07380*/ u64 read_arb_pending;
3180#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
3181#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
3182#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
3183/*0x07388*/ u64 dmaif_dmadbl_pending;
3184#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
3185#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
3186#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
3187#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
3188#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
3189#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
3190#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
3191 vxge_vBIT(val, 13, 51)
3192/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
3193#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
3194 vxge_vBIT(val, 0, 8)
3195/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
3196#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
3197 vxge_vBIT(val, 4, 12)
3198 u8 unused07500[0x07500-0x074a0];
3199
3200/*0x07500*/ u64 mrpcim_general_cfg1;
3201#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
3202/*0x07508*/ u64 mrpcim_general_cfg2;
3203#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
3204#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
3205#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
3206#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
3207#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
3208#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
3209#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
3210#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
3211#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
3212#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
3213 vxge_vBIT(val, 47, 5)
3214#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
3215#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
3216#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
3217/*0x07510*/ u64 mrpcim_general_cfg3;
3218#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
3219#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
3220#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
3221#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
3222#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
3223#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
3224#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
3225#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
3226 vxge_vBIT(val, 36, 16)
3227#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
3228#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
3229#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
3230#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
3231/*0x07518*/ u64 mrpcim_stats_start_host_addr;
3232#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
3233 vxge_vBIT(val, 0, 57)
3234
3235 u8 unused07950[0x07950-0x07520];
3236
3237/*0x07950*/ u64 rdcrdtarb_cfg0;
3238#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
3239 vxge_vBIT(val, 18, 6)
3240#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
3241 vxge_vBIT(val, 26, 6)
3242#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
3243 vxge_vBIT(val, 34, 6)
3244#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
3245#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
3246#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
3247 u8 unused07be8[0x07be8-0x07958];
3248
3249/*0x07be8*/ u64 bf_sw_reset;
3250#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3251/*0x07bf0*/ u64 sw_reset_status;
3252#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
3253#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
3254 u8 unused07d30[0x07d30-0x07bf8];
3255
3256/*0x07d30*/ u64 mrpcim_debug_stats0;
3257#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
3258#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
3259/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
3260#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
3261 vxge_vBIT(val, 32, 32)
3262/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
3263#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
3264 vxge_vBIT(val, 32, 32)
3265/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
3266#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
3267 vxge_vBIT(val, 32, 32)
3268/*0x07ed0*/ u64 mrpcim_debug_stats4;
3269#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
3270#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
3271 vxge_vBIT(val, 32, 32)
3272/*0x07ed8*/ u64 genstats_count01;
3273#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
3274#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
3275/*0x07ee0*/ u64 genstats_count23;
3276#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
3277#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
3278/*0x07ee8*/ u64 genstats_count4;
3279#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
3280/*0x07ef0*/ u64 genstats_count5;
3281#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
3282
3283 u8 unused07f08[0x07f08-0x07ef8];
3284
3285/*0x07f08*/ u64 genstats_cfg[6];
3286#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
3287#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
3288#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
3289#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
3290/*0x07f38*/ u64 genstat_64bit_cfg;
3291#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
3292#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
3293 u8 unused08000[0x08000-0x07f40];
3294/*0x08000*/ u64 gcmg3_int_status;
3295#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
3296#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
3297#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
3298#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
3299#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
3300#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
3301#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
3302/*0x08008*/ u64 gcmg3_int_mask;
3303 u8 unused09000[0x09000-0x8010];
3304
3305/*0x09000*/ u64 g3ifcmd_fb_int_status;
3306#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3307/*0x09008*/ u64 g3ifcmd_fb_int_mask;
3308/*0x09010*/ u64 g3ifcmd_fb_err_reg;
3309#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3310#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3311#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3312 vxge_vBIT(val, 24, 8)
3313#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3314/*0x09018*/ u64 g3ifcmd_fb_err_mask;
3315/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
3316
3317 u8 unused09400[0x09400-0x09028];
3318
3319/*0x09400*/ u64 g3ifcmd_cmu_int_status;
3320#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3321/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
3322/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
3323#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3324#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3325#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3326 vxge_vBIT(val, 24, 8)
3327#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3328/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
3329/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
3330
3331 u8 unused09800[0x09800-0x09428];
3332
3333/*0x09800*/ u64 g3ifcmd_cml_int_status;
3334#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
3335/*0x09808*/ u64 g3ifcmd_cml_int_mask;
3336/*0x09810*/ u64 g3ifcmd_cml_err_reg;
3337#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
3338#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
3339#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
3340 vxge_vBIT(val, 24, 8)
3341#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
3342/*0x09818*/ u64 g3ifcmd_cml_err_mask;
3343/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
3344 u8 unused09b00[0x09b00-0x09828];
3345
3346/*0x09b00*/ u64 vpath_to_vplane_map[17];
3347#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
3348 vxge_vBIT(val, 3, 5)
3349 u8 unused09c30[0x09c30-0x09b88];
3350
3351/*0x09c30*/ u64 xgxs_cfg_port[2];
3352#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
3353#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
3354#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
3355#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
3356#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
3357#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
3358#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
3359#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
3360/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
3361#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
3362#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
3363 vxge_vBIT(val, 16, 48)
3364/*0x09c50*/ u64 xgxs_rxber_status_port[2];
3365#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
3366 vxge_vBIT(val, 0, 16)
3367#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
3368 vxge_vBIT(val, 16, 16)
3369#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
3370 vxge_vBIT(val, 32, 16)
3371#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
3372 vxge_vBIT(val, 48, 16)
3373/*0x09c60*/ u64 xgxs_status_port[2];
3374#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
3375#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
3376#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
3377#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
3378 vxge_vBIT(val, 12, 4)
3379#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
3380#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
3381#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
3382#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
3383 vxge_vBIT(val, 32, 4)
3384#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
3385 vxge_vBIT(val, 36, 4)
3386/*0x09c70*/ u64 xgxs_pma_reset_port[2];
3387#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
3388 u8 unused09c90[0x09c90-0x09c80];
3389
3390/*0x09c90*/ u64 xgxs_static_cfg_port[2];
3391#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
3392 u8 unused09d40[0x09d40-0x09ca0];
3393
3394/*0x09d40*/ u64 xgxs_info_port[2];
3395#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
3396#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
3397/*0x09d50*/ u64 ratemgmt_cfg_port[2];
3398#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
3399#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
3400#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
3401#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
3402#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
3403/*0x09d60*/ u64 ratemgmt_status_port[2];
3404#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
3405#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
3406#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
3407 u8 unused09d80[0x09d80-0x09d70];
3408
3409/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
3410#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
3411/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
3412#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
3413#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
3414#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
3415#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
3416 vxge_vBIT(val, 16, 4)
3417#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
3418 vxge_vBIT(val, 20, 4)
3419#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
3420 vxge_vBIT(val, 24, 4)
3421#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
3422#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
3423/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
3424#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
3425#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
3426 vxge_mBIT(11)
3427#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
3428 vxge_mBIT(15)
3429#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
3430#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
3431#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
3432#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
3433#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
3434/*0x09db0*/ u64 anbe_cfg_port[2];
3435#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
3436#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
3437#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
3438/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
3439#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
3440#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
3441#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
3442#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
3443 u8 unused09de0[0x09de0-0x09dd0];
3444
3445/*0x09de0*/ u64 anbe_fw_mstr_port[2];
3446#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
3447#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
3448/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
3449#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
3450 vxge_mBIT(3)
3451#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
3452 vxge_mBIT(7)
3453#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
3454 vxge_mBIT(11)
3455#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
3456 vxge_mBIT(15)
3457#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
3458 vxge_vBIT(val, 18, 6)
3459#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
3460 vxge_mBIT(27)
3461#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
3462 vxge_mBIT(35)
3463#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
3464 vxge_mBIT(39)
3465#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
3466 vxge_mBIT(43)
3467#define \
3468VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
3469 vxge_mBIT(47)
3470#define \
3471VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
3472vxge_mBIT(51)
3473#define \
3474VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
3475 vxge_mBIT(55)
3476#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
3477 vxge_vBIT(val, 56, 4)
3478#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
3479 vxge_vBIT(val, 60, 4)
3480/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
3481#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
3482 vxge_mBIT(32)
3483#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
3484 vxge_mBIT(33)
3485#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
3486 vxge_mBIT(40)
3487#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
3488 vxge_mBIT(41)
3489#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
3490 vxge_mBIT(42)
3491#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
3492 vxge_vBIT(val, 43, 5)
3493#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
3494#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
3495#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
3496 vxge_mBIT(50)
3497#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
3498#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
3499#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
3500 vxge_vBIT(val, 54, 5)
3501#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3502 vxge_vBIT(val, 59, 5)
3503/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
3504#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
3505 vxge_vBIT(val, 16, 16)
3506#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
3507 vxge_vBIT(val, 32, 32)
3508 u8 unused09e30[0x09e30-0x09e20];
3509
3510/*0x09e30*/ u64 antp_gen_cfg_port[2];
3511/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
3512#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
3513#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
3514#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
3515 vxge_vBIT(val, 10, 6)
3516#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
3517 vxge_mBIT(23)
3518#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
3519 vxge_mBIT(27)
3520#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
3521#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
3522 vxge_mBIT(35)
3523#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
3524 vxge_mBIT(43)
3525#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
3526#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
3527 vxge_mBIT(51)
3528#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
3529#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
3530 vxge_mBIT(59)
3531/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
3532#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
3533#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
3534#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
3535#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
3536#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
3537 vxge_vBIT(val, 4, 7)
3538#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
3539 vxge_vBIT(val, 11, 5)
3540/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
3541#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
3542#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
3543#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
3544#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
3545#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
3546#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
3547 vxge_vBIT(val, 5, 11)
3548#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
3549 vxge_vBIT(val, 16, 16)
3550#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
3551 vxge_vBIT(val, 32, 16)
3552/*0x09e70*/ u64 mdio_mgr_access_port[2];
3553#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
3554#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
3555#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
3556#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
3557#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
3558#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
3559#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
3560#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
3561#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
3562 u8 unused0a200[0x0a200-0x09e80];
3563/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
3564#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3565 u8 unused0a400[0x0a400-0x0a288];
3566
3567/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
3568#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
3569#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
3570#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
3571#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
3572#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
3573#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
3574 u8 unused0ac90[0x0ac90-0x0a488];
3575} __packed;
3576
3577/*VXGE_HW_SRPCIM_REGS_H*/
3578struct vxge_hw_srpcim_reg {
3579
3580/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
3581#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
3582 vxge_vBIT(val, 0, 32)
3583 u8 unused00100[0x00100-0x00008];
3584
3585/*0x00100*/ u64 srpcim_pcipif_int_status;
3586#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
3587#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
3588#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
3589 BIT(11)
3590/*0x00108*/ u64 srpcim_pcipif_int_mask;
3591/*0x00110*/ u64 mrpcim_msg_reg;
3592#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
3593/*0x00118*/ u64 mrpcim_msg_mask;
3594/*0x00120*/ u64 mrpcim_msg_alarm;
3595/*0x00128*/ u64 vpath_msg_reg;
3596#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
3597#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
3598#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
3599#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
3600#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
3601#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
3602#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
3603#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
3604#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
3605#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
3606#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
3607#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
3608#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
3609#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
3610#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
3611#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
3612#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
3613/*0x00130*/ u64 vpath_msg_mask;
3614/*0x00138*/ u64 vpath_msg_alarm;
3615 u8 unused00160[0x00160-0x00140];
3616
3617/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
3618#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
3619 vxge_vBIT(val, 0, 64)
3620/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
3621#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
3622/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
3623#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
3624 vxge_vBIT(val, 0, 64)
3625/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
3626#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
3627 vxge_vBIT(val, 0, 5)
3628/*0x00180*/ u64 vpath_to_srpcim_rmsg;
3629#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
3630 vxge_vBIT(val, 0, 64)
3631 u8 unused00200[0x00200-0x00188];
3632
3633/*0x00200*/ u64 srpcim_general_int_status;
3634#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
3635#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
3636#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
3637 u8 unused00210[0x00210-0x00208];
3638
3639/*0x00210*/ u64 srpcim_general_int_mask;
3640#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
3641#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
3642#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
3643 u8 unused00220[0x00220-0x00218];
3644
3645/*0x00220*/ u64 srpcim_ppif_int_status;
3646
3647/*0x00228*/ u64 srpcim_ppif_int_mask;
3648/*0x00230*/ u64 srpcim_gen_errors_reg;
3649#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
3650#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
3651#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
3652#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
3653#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
3654#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
3655/*0x00238*/ u64 srpcim_gen_errors_mask;
3656/*0x00240*/ u64 srpcim_gen_errors_alarm;
3657/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
3658#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
3659/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
3660/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
3661/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
3662
3663/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
3664/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
3665 u8 unused00280[0x00280-0x00278];
3666
3667/*0x00280*/ u64 pf_sw_reset;
3668#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
3669/*0x00288*/ u64 srpcim_general_cfg1;
3670#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
3671#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
3672#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
3673#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
3674#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
3675#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
3676/*0x00290*/ u64 srpcim_interrupt_cfg1;
3677#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
3678#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
3679 u8 unused002a8[0x002a8-0x00298];
3680
3681/*0x002a8*/ u64 srpcim_clear_msix_mask;
3682#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
3683/*0x002b0*/ u64 srpcim_set_msix_mask;
3684#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
3685/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
3686#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
3687/*0x002c0*/ u64 srpcim_rst_in_prog;
3688#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
3689/*0x002c8*/ u64 srpcim_reg_modified;
3690#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
3691/*0x002d0*/ u64 tgt_pf_illegal_access;
3692#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
3693/*0x002d8*/ u64 srpcim_msix_status;
3694#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
3695#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
3696 u8 unused00880[0x00880-0x002e0];
3697
3698/*0x00880*/ u64 xgmac_sr_int_status;
3699#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
3700/*0x00888*/ u64 xgmac_sr_int_mask;
3701/*0x00890*/ u64 asic_ntwk_sr_err_reg;
3702#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
3703#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
3704#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
3705 BIT(11)
3706#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
3707/*0x00898*/ u64 asic_ntwk_sr_err_mask;
3708/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
3709 u8 unused008c0[0x008c0-0x008a8];
3710
3711/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
3712#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
3713 vxge_vBIT(val, 0, 17)
3714 u8 unused00900[0x00900-0x008c8];
3715
3716/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
3717#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
3718 vxge_vBIT(val, 59, 5)
3719/*0x00908*/ u64 umq_vh_data_list_empty;
3720#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
3721 BIT(0)
3722/*0x00910*/ u64 wde_cfg;
3723#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
3724#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
3725#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
3726#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
3727#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
3728#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
3729#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
3730#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
3731#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
3732#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
3733#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
3734#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
3735#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
3736#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
3737#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
3738#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
3739#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
3740#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
3741#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
3742#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
3743#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
3744
3745} __packed;
3746
3747/*VXGE_HW_VPMGMT_REGS_H*/
3748struct vxge_hw_vpmgmt_reg {
3749
3750 u8 unused00040[0x00040-0x00000];
3751
3752/*0x00040*/ u64 vpath_to_func_map_cfg1;
3753#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
3754 vxge_vBIT(val, 3, 5)
3755/*0x00048*/ u64 vpath_is_first;
3756#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
3757/*0x00050*/ u64 srpcim_to_vpath_wmsg;
3758#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
3759 vxge_vBIT(val, 0, 64)
3760/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
3761#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
3762 vxge_mBIT(0)
3763 u8 unused00100[0x00100-0x00060];
3764
3765/*0x00100*/ u64 tim_vpath_assignment;
3766#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
3767 u8 unused00140[0x00140-0x00108];
3768
3769/*0x00140*/ u64 rqa_top_prty_for_vp;
3770#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
3771 vxge_vBIT(val, 59, 5)
3772 u8 unused001c0[0x001c0-0x00148];
3773
3774/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
3775#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
3776#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
3777#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
3778#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
3779 vxge_mBIT(19)
3780#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
3781 vxge_mBIT(23)
3782#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
3783#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
3784#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
3785 vxge_mBIT(39)
3786#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
3787 vxge_mBIT(43)
3788#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
3789 vxge_mBIT(47)
3790#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
3791 vxge_mBIT(51)
3792#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
3793 vxge_mBIT(55)
3794#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
3795 vxge_mBIT(59)
3796#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
3797/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
3798#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
3799#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
3800 vxge_vBIT(val, 24, 8)
3801#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
3802#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
3803#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
3804#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
3805#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
3806#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
3807#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
3808/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
3809#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
3810 vxge_vBIT(val, 5, 3)
3811#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
3812 vxge_vBIT(val, 9, 3)
3813#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
3814 vxge_vBIT(val, 13, 3)
3815#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
3816 vxge_vBIT(val, 17, 3)
3817#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
3818 vxge_vBIT(val, 21, 3)
3819#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
3820 vxge_vBIT(val, 25, 3)
3821#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
3822 vxge_vBIT(val, 29, 3)
3823#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
3824 vxge_vBIT(val, 33, 3)
3825#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
3826 vxge_vBIT(val, 37, 3)
3827/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
3828#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
3829#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
3830#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
3831#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
3832#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
3833#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
3834#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
3835 vxge_mBIT(27)
3836#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
3837 vxge_vBIT(val, 50, 14)
3838/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
3839#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
3840#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
3841#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
3842 vxge_vBIT(val, 9, 3)
3843#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
3844#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
3845 vxge_vBIT(val, 20, 16)
3846#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
3847 vxge_mBIT(39)
3848#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
3849 vxge_mBIT(43)
3850#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
3851#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
3852 vxge_vBIT(val, 48, 8)
3853#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
3854 vxge_mBIT(59)
3855 u8 unused00240[0x00240-0x00208];
3856
3857/*0x00240*/ u64 xmac_vsport_choices_vp;
3858#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
3859 u8 unused00260[0x00260-0x00248];
3860
3861/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
3862#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
3863#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
3864 vxge_mBIT(11)
3865/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
3866#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
3867 vxge_mBIT(3)
3868#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
3869#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
3870 vxge_mBIT(11)
3871#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
3872/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
3873#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
3874 vxge_vBIT(val, 2, 2)
3875#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
3876 vxge_mBIT(7)
3877#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
3878#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
3879 vxge_vBIT(val, 28, 4)
3880#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
3881 vxge_vBIT(val, 32, 4)
3882/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
3883#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
3884#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
3885 vxge_vBIT(val, 6, 2)
3886#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
3887#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
3888#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
3889 vxge_vBIT(val, 32, 16)
3890/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
3891#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
3892 vxge_vBIT(val, 4, 4)
3893#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
3894 vxge_vBIT(val, 8, 4)
3895#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
3896/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
3897#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
3898#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
3899 vxge_mBIT(7)
3900#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
3901#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
3902 u8 unused002c0[0x002c0-0x002a8];
3903
3904/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
3905#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
3906/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
3907#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
3908#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
3909#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
3910 u8 unused00300[0x00300-0x002e0];
3911
3912/*0x00300*/ u64 wol_mp_crc;
3913#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
3914#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
3915/*0x00308*/ u64 wol_mp_mask_a;
3916#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
3917/*0x00310*/ u64 wol_mp_mask_b;
3918#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
3919 u8 unused00360[0x00360-0x00318];
3920
3921/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
3922#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
3923#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
3924#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
3925/*0x00368*/ u64 rx_datapath_util_vp_clone;
3926#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
3927 vxge_vBIT(val, 7, 9)
3928#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
3929 vxge_vBIT(val, 16, 4)
3930#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
3931 vxge_vBIT(val, 20, 4)
3932#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
3933 vxge_vBIT(val, 24, 4)
3934 u8 unused00380[0x00380-0x00370];
3935
3936/*0x00380*/ u64 tx_datapath_util_vp_clone;
3937#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
3938 vxge_vBIT(val, 7, 9)
3939#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
3940 vxge_vBIT(val, 16, 4)
3941#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
3942 vxge_vBIT(val, 20, 4)
3943#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
3944 vxge_vBIT(val, 24, 4)
3945
3946} __packed;
3947
3948struct vxge_hw_vpath_reg {
3949
3950 u8 unused00300[0x00300];
3951
3952/*0x00300*/ u64 usdc_vpath;
3953#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
3954 u8 unused00a00[0x00a00-0x00308];
3955
3956/*0x00a00*/ u64 wrdma_alarm_status;
3957#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
3958/*0x00a08*/ u64 wrdma_alarm_mask;
3959 u8 unused00a30[0x00a30-0x00a10];
3960
3961/*0x00a30*/ u64 prc_alarm_reg;
3962#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
3963#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
3964#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
3965#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
3966/*0x00a38*/ u64 prc_alarm_mask;
3967/*0x00a40*/ u64 prc_alarm_alarm;
3968/*0x00a48*/ u64 prc_cfg1;
3969#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
3970#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
3971#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
3972#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
3973#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
3974#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
3975#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
3976 u8 unused00a60[0x00a60-0x00a50];
3977
3978/*0x00a60*/ u64 prc_cfg4;
3979#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
3980#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
3981#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
3982#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
3983#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
3984#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
3985#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
3986#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
3987#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
3988/*0x00a68*/ u64 prc_cfg5;
3989#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
3990/*0x00a70*/ u64 prc_cfg6;
3991#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
3992#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
3993#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
3994#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
3995#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
3996#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
3997#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
3998/*0x00a78*/ u64 prc_cfg7;
3999#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4000#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
4001#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
4002#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
4003#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
4004#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
4005/*0x00a80*/ u64 tim_dest_addr;
4006#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
4007/*0x00a88*/ u64 prc_rxd_doorbell;
4008#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
4009/*0x00a90*/ u64 rqa_prty_for_vp;
4010#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
4011/*0x00a98*/ u64 rxdmem_size;
4012#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
4013/*0x00aa0*/ u64 frm_in_progress_cnt;
4014#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
4015 vxge_vBIT(val, 59, 5)
4016/*0x00aa8*/ u64 rx_multi_cast_stats;
4017#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
4018/*0x00ab0*/ u64 rx_frm_transferred;
4019#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
4020 vxge_vBIT(val, 32, 32)
4021/*0x00ab8*/ u64 rxd_returned;
4022#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
4023 u8 unused00c00[0x00c00-0x00ac0];
4024
4025/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
4026#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
4027#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
4028#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
4029/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
4030#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
4031/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
4032#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4033#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
4034#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
4035#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4036#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
4037#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
4038#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
4039#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
4040#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4041#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4042#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4043/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
4044#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
4045#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
4046#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
4047#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4048#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
4049#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
4050#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
4051#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
4052#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4053#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4054#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4055/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
4056#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
4057#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
4058#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
4059#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
4060#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
4061#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
4062#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
4063#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
4064#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
4065#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
4066/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
4067#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4068/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
4069#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4070/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
4071#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
4072/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
4073#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
4074#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
4075#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
4076/*0x00c48*/ u64 kdfc_drbl_triplet_total;
4077#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
4078 vxge_vBIT(val, 17, 15)
4079 u8 unused00c60[0x00c60-0x00c50];
4080
4081/*0x00c60*/ u64 usdc_drbl_ctrl;
4082#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
4083#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
4084/*0x00c68*/ u64 usdc_vp_ready;
4085#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
4086#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
4087#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
4088/*0x00c70*/ u64 kdfc_status;
4089#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
4090#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
4091#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
4092 u8 unused00c80[0x00c80-0x00c78];
4093
4094/*0x00c80*/ u64 xmac_rpa_vcfg;
4095#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
4096#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
4097#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
4098#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
4099#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
4100#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
4101/*0x00c88*/ u64 rxmac_vcfg0;
4102#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
4103#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
4104#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
4105#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
4106#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
4107#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
4108#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
4109/*0x00c90*/ u64 rxmac_vcfg1;
4110#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
4111#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
4112#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
4113/*0x00c98*/ u64 rts_access_steer_ctrl;
4114#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
4115#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
4116#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
4117#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
4118#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
4119#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
4120#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
4121/*0x00ca0*/ u64 rts_access_steer_data0;
4122#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
4123/*0x00ca8*/ u64 rts_access_steer_data1;
4124#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
4125 u8 unused00d00[0x00d00-0x00cb0];
4126
4127/*0x00d00*/ u64 xmac_vsport_choice;
4128#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
4129/*0x00d08*/ u64 xmac_stats_cfg;
4130/*0x00d10*/ u64 xmac_stats_access_cmd;
4131#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
4132#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
4133#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
4134/*0x00d18*/ u64 xmac_stats_access_data;
4135#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
4136/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
4137#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
4138#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
4139#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
4140 u8 unused00d30[0x00d30-0x00d28];
4141
4142/*0x00d30*/ u64 xgmac_vp_int_status;
4143#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
4144 vxge_mBIT(3)
4145/*0x00d38*/ u64 xgmac_vp_int_mask;
4146/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
4147#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
4148#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
4149#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
4150 vxge_mBIT(11)
4151#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
4152 vxge_mBIT(15)
4153#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
4154 vxge_mBIT(19)
4155#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
4156/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
4157/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
4158 u8 unused00d80[0x00d80-0x00d58];
4159
4160/*0x00d80*/ u64 rtdma_bw_ctrl;
4161#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
4162#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
4163/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
4164#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
4165#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
4166#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
4167#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
4168#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
4169#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
4170 vxge_vBIT(val, 21, 3)
4171#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
4172#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
4173 vxge_vBIT(val, 29, 3)
4174#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
4175#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
4176 vxge_vBIT(val, 37, 3)
4177#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
4178#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
4179 vxge_vBIT(val, 51, 5)
4180#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
4181#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
4182 vxge_vBIT(val, 61, 3)
4183/*0x00d90*/ u64 pda_pcc_job_monitor;
4184#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
4185/*0x00d98*/ u64 tx_protocol_assist_cfg;
4186#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
4187#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
4188 u8 unused01000[0x01000-0x00da0];
4189
4190/*0x01000*/ u64 tim_cfg1_int_num[4];
4191#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4192#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
4193#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
4194#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
4195#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
4196#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
4197#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
4198#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
4199#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
4200/*0x01020*/ u64 tim_cfg2_int_num[4];
4201#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
4202#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
4203#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
4204#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
4205/*0x01040*/ u64 tim_cfg3_int_num[4];
4206#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
4207#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
4208#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
4209#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
4210#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
4211/*0x01060*/ u64 tim_wrkld_clc;
4212#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
4213#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
4214#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
4215#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
4216#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
4217#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
4218/*0x01068*/ u64 tim_bitmap;
4219#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
4220#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
4221#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
4222/*0x01070*/ u64 tim_ring_assn;
4223#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
4224/*0x01078*/ u64 tim_remap;
4225#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
4226#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
4227#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
4228#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
4229/*0x01080*/ u64 tim_vpath_map;
4230#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
4231/*0x01088*/ u64 tim_pci_cfg;
4232#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
4233#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
4234#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
4235#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
4236 u8 unused01100[0x01100-0x01090];
4237
4238/*0x01100*/ u64 sgrp_assign;
4239#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
4240/*0x01108*/ u64 sgrp_aoa_and_result;
4241#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
4242 vxge_vBIT(val, 0, 64)
4243/*0x01110*/ u64 rpe_pci_cfg;
4244#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
4245#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
4246#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
4247#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
4248#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
4249#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
4250#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
4251#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
4252#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
4253#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
4254#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
4255#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
4256#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
4257#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
4258#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
4259#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
4260#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
4261#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
4262#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
4263#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
4264#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
4265/*0x01118*/ u64 rpe_lro_cfg;
4266#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
4267#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
4268#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
4269#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
4270/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
4271#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
4272/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
4273#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
4274 vxge_vBIT(val, 0, 32)
4275#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
4276 vxge_vBIT(val, 32, 32)
4277/*0x01130*/ u64 txpe_pci_nce_cfg;
4278#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
4279#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
4280#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
4281 u8 unused01180[0x01180-0x01138];
4282
4283/*0x01180*/ u64 msg_qpad_en_cfg;
4284#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
4285#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
4286#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
4287#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
4288#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
4289#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
4290#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
4291#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
4292/*0x01188*/ u64 msg_pci_cfg;
4293#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
4294#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
4295#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
4296#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
4297/*0x01190*/ u64 umqdmq_ir_init;
4298#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
4299/*0x01198*/ u64 dmq_ir_int;
4300#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
4301#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
4302#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4303#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4304/*0x011a0*/ u64 dmq_bwr_init_add;
4305#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4306/*0x011a8*/ u64 dmq_bwr_init_byte;
4307#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4308/*0x011b0*/ u64 dmq_ir;
4309#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
4310/*0x011b8*/ u64 umq_int;
4311#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
4312#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
4313#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4314#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4315/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
4316#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
4317/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
4318#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
4319/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
4320#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
4321/*0x011d8*/ u64 umq_bwr_init_add;
4322#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
4323/*0x011e0*/ u64 umq_bwr_init_byte;
4324#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4325/*0x011e8*/ u64 gendma_int;
4326#define VXGE_HW_GENDMA_INT_IMMED_ENABLE vxge_mBIT(6)
4327#define VXGE_HW_GENDMA_INT_EVENT_ENABLE vxge_mBIT(7)
4328#define VXGE_HW_GENDMA_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4329#define VXGE_HW_GENDMA_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4330/*0x011f0*/ u64 umqdmq_ir_init_notify;
4331#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
4332/*0x011f8*/ u64 dmq_init_notify;
4333#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4334/*0x01200*/ u64 umq_init_notify;
4335#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
4336 u8 unused01380[0x01380-0x01208];
4337
4338/*0x01380*/ u64 tpa_cfg;
4339#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
4340#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
4341#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
4342#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
4343 u8 unused01400[0x01400-0x01388];
4344
4345/*0x01400*/ u64 tx_vp_reset_discarded_frms;
4346#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
4347 vxge_vBIT(val, 48, 16)
4348 u8 unused01480[0x01480-0x01408];
4349
4350/*0x01480*/ u64 fau_rpa_vcfg;
4351#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
4352#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
4353#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
4354 u8 unused014d0[0x014d0-0x01488];
4355
4356/*0x014d0*/ u64 dbg_stats_rx_mpa;
4357#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
4358#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
4359#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
4360/*0x014d8*/ u64 dbg_stats_rx_fau;
4361#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
4362#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
4363 vxge_vBIT(val, 16, 16)
4364#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
4365 vxge_vBIT(val, 32, 32)
4366 u8 unused014f0[0x014f0-0x014e0];
4367
4368/*0x014f0*/ u64 fbmc_vp_rdy;
4369#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
4370 u8 unused01e00[0x01e00-0x014f8];
4371
4372/*0x01e00*/ u64 vpath_pcipif_int_status;
4373#define \
4374VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
4375 vxge_mBIT(3)
4376#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
4377 vxge_mBIT(7)
4378/*0x01e08*/ u64 vpath_pcipif_int_mask;
4379 u8 unused01e20[0x01e20-0x01e10];
4380
4381/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
4382#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
4383 vxge_mBIT(3)
4384/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
4385/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
4386 u8 unused01ea0[0x01ea0-0x01e38];
4387
4388/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
4389#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
4390 vxge_vBIT(val, 0, 64)
4391/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
4392#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
4393 vxge_mBIT(0)
4394 u8 unused02000[0x02000-0x01eb0];
4395
4396/*0x02000*/ u64 vpath_general_int_status;
4397#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
4398#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
4399#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
4400#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
4401/*0x02008*/ u64 vpath_general_int_mask;
4402#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
4403#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
4404#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
4405#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
4406/*0x02010*/ u64 vpath_ppif_int_status;
4407#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
4408 vxge_mBIT(3)
4409#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
4410 vxge_mBIT(7)
4411#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
4412 vxge_mBIT(11)
4413#define \
4414VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
4415 vxge_mBIT(15)
4416#define \
4417VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
4418 vxge_mBIT(19)
4419/*0x02018*/ u64 vpath_ppif_int_mask;
4420/*0x02020*/ u64 kdfcctl_errors_reg;
4421#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
4422#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
4423#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
4424#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
4425#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
4426#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
4427#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
4428#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
4429#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
4430/*0x02028*/ u64 kdfcctl_errors_mask;
4431/*0x02030*/ u64 kdfcctl_errors_alarm;
4432 u8 unused02040[0x02040-0x02038];
4433
4434/*0x02040*/ u64 general_errors_reg;
4435#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
4436#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
4437#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
4438#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
4439#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
4440#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
4441#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
4442/*0x02048*/ u64 general_errors_mask;
4443/*0x02050*/ u64 general_errors_alarm;
4444/*0x02058*/ u64 pci_config_errors_reg;
4445#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
4446#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
4447#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
4448/*0x02060*/ u64 pci_config_errors_mask;
4449/*0x02068*/ u64 pci_config_errors_alarm;
4450/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
4451#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
4452 vxge_mBIT(3)
4453/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
4454/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
4455/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
4456#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
4457 vxge_vBIT(val, 0, 17)
4458/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
4459/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
4460 u8 unused02108[0x02108-0x020a0];
4461
4462/*0x02108*/ u64 kdfcctl_status;
4463#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
4464#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
4465#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
4466#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
4467#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
4468#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
4469/*0x02110*/ u64 rsthdlr_status;
4470#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
4471#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
4472/*0x02118*/ u64 fifo0_status;
4473#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
4474/*0x02120*/ u64 fifo1_status;
4475#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
4476/*0x02128*/ u64 fifo2_status;
4477#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
4478 u8 unused02158[0x02158-0x02130];
4479
4480/*0x02158*/ u64 tgt_illegal_access;
4481#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
4482 u8 unused02200[0x02200-0x02160];
4483
4484/*0x02200*/ u64 vpath_general_cfg1;
4485#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
4486#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
4487#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
4488#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
4489#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
4490#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
4491#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
4492#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
4493#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
4494/*0x02208*/ u64 vpath_general_cfg2;
4495#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
4496/*0x02210*/ u64 vpath_general_cfg3;
4497#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
4498 u8 unused02220[0x02220-0x02218];
4499
4500/*0x02220*/ u64 kdfcctl_cfg0;
4501#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
4502#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
4503#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
4504#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
4505#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
4506#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
4507#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
4508#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
4509#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
4510#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
4511#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
4512#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
4513#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
4514#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
4515#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
4516#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
4517#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
4518#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
4519#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
4520#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
4521#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
4522#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
4523#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
4524#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
4525#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
4526#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
4527#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
4528#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
4529#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
4530#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
4531
4532 u8 unused02268[0x02268-0x02228];
4533
4534/*0x02268*/ u64 stats_cfg;
4535#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
4536/*0x02270*/ u64 interrupt_cfg0;
4537#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
4538#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
4539#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
4540#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
4541#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
4542 u8 unused02280[0x02280-0x02278];
4543
4544/*0x02280*/ u64 interrupt_cfg2;
4545#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
4546/*0x02288*/ u64 one_shot_vect0_en;
4547#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
4548/*0x02290*/ u64 one_shot_vect1_en;
4549#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
4550/*0x02298*/ u64 one_shot_vect2_en;
4551#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
4552/*0x022a0*/ u64 one_shot_vect3_en;
4553#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
4554 u8 unused022b0[0x022b0-0x022a8];
4555
4556/*0x022b0*/ u64 pci_config_access_cfg1;
4557#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
4558#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
4559/*0x022b8*/ u64 pci_config_access_cfg2;
4560#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
4561/*0x022c0*/ u64 pci_config_access_status;
4562#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
4563#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
4564 u8 unused02300[0x02300-0x022c8];
4565
4566/*0x02300*/ u64 vpath_debug_stats0;
4567#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
4568/*0x02308*/ u64 vpath_debug_stats1;
4569#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
4570/*0x02310*/ u64 vpath_debug_stats2;
4571#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
4572/*0x02318*/ u64 vpath_debug_stats3;
4573#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
4574 vxge_vBIT(val, 0, 64)
4575/*0x02320*/ u64 vpath_debug_stats4;
4576#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
4577 vxge_vBIT(val, 0, 64)
4578/*0x02328*/ u64 vpath_debug_stats5;
4579#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4580/*0x02330*/ u64 vpath_debug_stats6;
4581#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
4582/*0x02338*/ u64 vpath_genstats_count01;
4583#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
4584 vxge_vBIT(val, 0, 32)
4585#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
4586 vxge_vBIT(val, 32, 32)
4587/*0x02340*/ u64 vpath_genstats_count23;
4588#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
4589 vxge_vBIT(val, 0, 32)
4590#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
4591 vxge_vBIT(val, 32, 32)
4592/*0x02348*/ u64 vpath_genstats_count4;
4593#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
4594 vxge_vBIT(val, 32, 32)
4595/*0x02350*/ u64 vpath_genstats_count5;
4596#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
4597 vxge_vBIT(val, 32, 32)
4598 u8 unused02648[0x02648-0x02358];
4599} __packed;
4600
4601#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
4602
4603/* Capability lists */
4604#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
4605#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
4606#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
4607
4608#endif
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
new file mode 100644
index 000000000000..7be0ae10d69b
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -0,0 +1,2528 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#include <linux/etherdevice.h>
15
16#include "vxge-traffic.h"
17#include "vxge-config.h"
18#include "vxge-main.h"
19
20/*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
23 *
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
26 *
27 * See also: vxge_hw_vpath_intr_disable()
28 */
29enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30{
31 u64 val64;
32
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 val64 = readq(&vp_reg->vpath_general_int_status);
86
87 /* Mask unwanted interrupts */
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104 /* Unmask the individual interrupts */
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141exit:
142 return status;
143
144}
145
146/*
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
149 *
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
152 *
153 * See also: vxge_hw_vpath_intr_enable()
154 */
155enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157{
158 u64 val64;
159
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 if (vp == NULL) {
164 status = VXGE_HW_ERR_INVALID_HANDLE;
165 goto exit;
166 }
167
168 vpath = vp->vpath;
169
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172 goto exit;
173 }
174 vp_reg = vpath->vp_reg;
175
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
179
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
186
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
189
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
195
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
198
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
201
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
204
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
207
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
210
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
213
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
216
217exit:
218 return status;
219}
220
221/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
224 * @msix_id: MSIX ID
225 *
226 * The function masks the msix interrupt for the given msix_id
227 *
228 * Returns: 0
229 */
230void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{
232
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239}
240
241/**
242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243 * @channeh: Channel for rx or tx handle
244 * @msix_id: MSI ID
245 *
246 * The function unmasks the msix interrupt for the given msix_id
247 *
248 * Returns: 0
249 */
250void
251vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{
253
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260}
261
262/**
263 * vxge_hw_device_set_intr_type - Updates the configuration
264 * with new interrupt type.
265 * @hldev: HW device handle.
266 * @intr_mode: New interrupt type
267 */
268u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269{
270
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
276
277 hldev->config.intr_mode = intr_mode;
278 return intr_mode;
279}
280
281/**
282 * vxge_hw_device_intr_enable - Enable interrupts.
283 * @hldev: HW device handle.
284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285 * the type(s) of interrupts to enable.
286 *
287 * Enable Titan interrupts. The function is to be executed the last in
288 * Titan initialization sequence.
289 *
290 * See also: vxge_hw_device_intr_disable()
291 */
292void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
293{
294 u32 i;
295 u64 val64;
296 u32 val32;
297
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
301 continue;
302
303 vxge_hw_vpath_intr_enable(
304 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
305 }
306
307 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
308 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
309 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
310
311 if (val64 != 0) {
312 writeq(val64, &hldev->common_reg->tim_int_status0);
313
314 writeq(~val64, &hldev->common_reg->tim_int_mask0);
315 }
316
317 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
318 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
319
320 if (val32 != 0) {
321 __vxge_hw_pio_mem_write32_upper(val32,
322 &hldev->common_reg->tim_int_status1);
323
324 __vxge_hw_pio_mem_write32_upper(~val32,
325 &hldev->common_reg->tim_int_mask1);
326 }
327 }
328
329 val64 = readq(&hldev->common_reg->titan_general_int_status);
330
331 vxge_hw_device_unmask_all(hldev);
332
333 return;
334}
335
336/**
337 * vxge_hw_device_intr_disable - Disable Titan interrupts.
338 * @hldev: HW device handle.
339 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
340 * the type(s) of interrupts to disable.
341 *
342 * Disable Titan interrupts.
343 *
344 * See also: vxge_hw_device_intr_enable()
345 */
346void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
347{
348 u32 i;
349
350 vxge_hw_device_mask_all(hldev);
351
352 /* mask all the tim interrupts */
353 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
354 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
355 &hldev->common_reg->tim_int_mask1);
356
357 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
358
359 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
360 continue;
361
362 vxge_hw_vpath_intr_disable(
363 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
364 }
365
366 return;
367}
368
369/**
370 * vxge_hw_device_mask_all - Mask all device interrupts.
371 * @hldev: HW device handle.
372 *
373 * Mask all device interrupts.
374 *
375 * See also: vxge_hw_device_unmask_all()
376 */
377void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
378{
379 u64 val64;
380
381 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
382 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
383
384 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
385 &hldev->common_reg->titan_mask_all_int);
386
387 return;
388}
389
390/**
391 * vxge_hw_device_unmask_all - Unmask all device interrupts.
392 * @hldev: HW device handle.
393 *
394 * Unmask all device interrupts.
395 *
396 * See also: vxge_hw_device_mask_all()
397 */
398void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
399{
400 u64 val64 = 0;
401
402 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
403 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
404
405 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
406 &hldev->common_reg->titan_mask_all_int);
407
408 return;
409}
410
411/**
412 * vxge_hw_device_flush_io - Flush io writes.
413 * @hldev: HW device handle.
414 *
415 * The function performs a read operation to flush io writes.
416 *
417 * Returns: void
418 */
419void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
420{
421 u32 val32;
422
423 val32 = readl(&hldev->common_reg->titan_general_int_status);
424}
425
426/**
427 * vxge_hw_device_begin_irq - Begin IRQ processing.
428 * @hldev: HW device handle.
429 * @skip_alarms: Do not clear the alarms
430 * @reason: "Reason" for the interrupt, the value of Titan's
431 * general_int_status register.
432 *
433 * The function performs two actions, It first checks whether (shared IRQ) the
434 * interrupt was raised by the device. Next, it masks the device interrupts.
435 *
436 * Note:
437 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
438 * bridge. Therefore, two back-to-back interrupts are potentially possible.
439 *
440 * Returns: 0, if the interrupt is not "ours" (note that in this case the
441 * device remain enabled).
442 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
443 * status.
444 */
445enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
446 u32 skip_alarms, u64 *reason)
447{
448 u32 i;
449 u64 val64;
450 u64 adapter_status;
451 u64 vpath_mask;
452 enum vxge_hw_status ret = VXGE_HW_OK;
453
454 val64 = readq(&hldev->common_reg->titan_general_int_status);
455
456 if (unlikely(!val64)) {
457 /* not Titan interrupt */
458 *reason = 0;
459 ret = VXGE_HW_ERR_WRONG_IRQ;
460 goto exit;
461 }
462
463 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
464
465 adapter_status = readq(&hldev->common_reg->adapter_status);
466
467 if (adapter_status == VXGE_HW_ALL_FOXES) {
468
469 __vxge_hw_device_handle_error(hldev,
470 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
471 *reason = 0;
472 ret = VXGE_HW_ERR_SLOT_FREEZE;
473 goto exit;
474 }
475 }
476
477 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
478
479 *reason = val64;
480
481 vpath_mask = hldev->vpaths_deployed >>
482 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
483
484 if (val64 &
485 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
486 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
487
488 return VXGE_HW_OK;
489 }
490
491 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
492
493 if (unlikely(val64 &
494 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
495
496 enum vxge_hw_status error_level = VXGE_HW_OK;
497
498 hldev->stats.sw_dev_err_stats.vpath_alarms++;
499
500 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
501
502 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
503 continue;
504
505 ret = __vxge_hw_vpath_alarm_process(
506 &hldev->virtual_paths[i], skip_alarms);
507
508 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
509
510 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
511 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
512 break;
513 }
514
515 ret = error_level;
516 }
517exit:
518 return ret;
519}
520
521/*
522 * __vxge_hw_device_handle_link_up_ind
523 * @hldev: HW device handle.
524 *
525 * Link up indication handler. The function is invoked by HW when
526 * Titan indicates that the link is up for programmable amount of time.
527 */
528enum vxge_hw_status
529__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
530{
531 /*
532 * If the previous link state is not down, return.
533 */
534 if (hldev->link_state == VXGE_HW_LINK_UP)
535 goto exit;
536
537 hldev->link_state = VXGE_HW_LINK_UP;
538
539 /* notify driver */
540 if (hldev->uld_callbacks.link_up)
541 hldev->uld_callbacks.link_up(hldev);
542exit:
543 return VXGE_HW_OK;
544}
545
546/*
547 * __vxge_hw_device_handle_link_down_ind
548 * @hldev: HW device handle.
549 *
550 * Link down indication handler. The function is invoked by HW when
551 * Titan indicates that the link is down.
552 */
553enum vxge_hw_status
554__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555{
556 /*
557 * If the previous link state is not down, return.
558 */
559 if (hldev->link_state == VXGE_HW_LINK_DOWN)
560 goto exit;
561
562 hldev->link_state = VXGE_HW_LINK_DOWN;
563
564 /* notify driver */
565 if (hldev->uld_callbacks.link_down)
566 hldev->uld_callbacks.link_down(hldev);
567exit:
568 return VXGE_HW_OK;
569}
570
571/**
572 * __vxge_hw_device_handle_error - Handle error
573 * @hldev: HW device
574 * @vp_id: Vpath Id
575 * @type: Error type. Please see enum vxge_hw_event{}
576 *
577 * Handle error.
578 */
579enum vxge_hw_status
580__vxge_hw_device_handle_error(
581 struct __vxge_hw_device *hldev,
582 u32 vp_id,
583 enum vxge_hw_event type)
584{
585 switch (type) {
586 case VXGE_HW_EVENT_UNKNOWN:
587 break;
588 case VXGE_HW_EVENT_RESET_START:
589 case VXGE_HW_EVENT_RESET_COMPLETE:
590 case VXGE_HW_EVENT_LINK_DOWN:
591 case VXGE_HW_EVENT_LINK_UP:
592 goto out;
593 case VXGE_HW_EVENT_ALARM_CLEARED:
594 goto out;
595 case VXGE_HW_EVENT_ECCERR:
596 case VXGE_HW_EVENT_MRPCIM_ECCERR:
597 goto out;
598 case VXGE_HW_EVENT_FIFO_ERR:
599 case VXGE_HW_EVENT_VPATH_ERR:
600 case VXGE_HW_EVENT_CRITICAL_ERR:
601 case VXGE_HW_EVENT_SERR:
602 break;
603 case VXGE_HW_EVENT_SRPCIM_SERR:
604 case VXGE_HW_EVENT_MRPCIM_SERR:
605 goto out;
606 case VXGE_HW_EVENT_SLOT_FREEZE:
607 break;
608 default:
609 vxge_assert(0);
610 goto out;
611 }
612
613 /* notify driver */
614 if (hldev->uld_callbacks.crit_err)
615 hldev->uld_callbacks.crit_err(
616 (struct __vxge_hw_device *)hldev,
617 type, vp_id);
618out:
619
620 return VXGE_HW_OK;
621}
622
623/**
624 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
625 * condition that has caused the Tx and RX interrupt.
626 * @hldev: HW device.
627 *
628 * Acknowledge (that is, clear) the condition that has caused
629 * the Tx and Rx interrupt.
630 * See also: vxge_hw_device_begin_irq(),
631 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
632 */
633void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
634{
635
636 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
637 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
638 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
639 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
640 &hldev->common_reg->tim_int_status0);
641 }
642
643 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
644 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
645 __vxge_hw_pio_mem_write32_upper(
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
647 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
648 &hldev->common_reg->tim_int_status1);
649 }
650
651 return;
652}
653
654/*
655 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
656 * @channel: Channel
657 * @dtrh: Buffer to return the DTR pointer
658 *
659 * Allocates a dtr from the reserve array. If the reserve array is empty,
660 * it swaps the reserve and free arrays.
661 *
662 */
663enum vxge_hw_status
664vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
665{
666 void **tmp_arr;
667
668 if (channel->reserve_ptr - channel->reserve_top > 0) {
669_alloc_after_swap:
670 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
671
672 return VXGE_HW_OK;
673 }
674
675 /* switch between empty and full arrays */
676
677 /* the idea behind such a design is that by having free and reserved
678 * arrays separated we basically separated irq and non-irq parts.
679 * i.e. no additional lock need to be done when we free a resource */
680
681 if (channel->length - channel->free_ptr > 0) {
682
683 tmp_arr = channel->reserve_arr;
684 channel->reserve_arr = channel->free_arr;
685 channel->free_arr = tmp_arr;
686 channel->reserve_ptr = channel->length;
687 channel->reserve_top = channel->free_ptr;
688 channel->free_ptr = channel->length;
689
690 channel->stats->reserve_free_swaps_cnt++;
691
692 goto _alloc_after_swap;
693 }
694
695 channel->stats->full_cnt++;
696
697 *dtrh = NULL;
698 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
699}
700
701/*
702 * vxge_hw_channel_dtr_post - Post a dtr to the channel
703 * @channelh: Channel
704 * @dtrh: DTR pointer
705 *
706 * Posts a dtr to work array.
707 *
708 */
709void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
710{
711 vxge_assert(channel->work_arr[channel->post_index] == NULL);
712
713 channel->work_arr[channel->post_index++] = dtrh;
714
715 /* wrap-around */
716 if (channel->post_index == channel->length)
717 channel->post_index = 0;
718}
719
720/*
721 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
722 * @channel: Channel
723 * @dtr: Buffer to return the next completed DTR pointer
724 *
725 * Returns the next completed dtr with out removing it from work array
726 *
727 */
728void
729vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
730{
731 vxge_assert(channel->compl_index < channel->length);
732
733 *dtrh = channel->work_arr[channel->compl_index];
734}
735
736/*
737 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
738 * @channel: Channel handle
739 *
740 * Removes the next completed dtr from work array
741 *
742 */
743void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
744{
745 channel->work_arr[channel->compl_index] = NULL;
746
747 /* wrap-around */
748 if (++channel->compl_index == channel->length)
749 channel->compl_index = 0;
750
751 channel->stats->total_compl_cnt++;
752}
753
754/*
755 * vxge_hw_channel_dtr_free - Frees a dtr
756 * @channel: Channel handle
757 * @dtr: DTR pointer
758 *
759 * Returns the dtr to free array
760 *
761 */
762void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
763{
764 channel->free_arr[--channel->free_ptr] = dtrh;
765}
766
767/*
768 * vxge_hw_channel_dtr_count
769 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
770 *
771 * Retreive number of DTRs available. This function can not be called
772 * from data path. ring_initial_replenishi() is the only user.
773 */
774int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
775{
776 return (channel->reserve_ptr - channel->reserve_top) +
777 (channel->length - channel->free_ptr);
778}
779
780/**
781 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
782 * @ring: Handle to the ring object used for receive
783 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
784 * with a valid handle.
785 *
786 * Reserve Rx descriptor for the subsequent filling-in driver
787 * and posting on the corresponding channel (@channelh)
788 * via vxge_hw_ring_rxd_post().
789 *
790 * Returns: VXGE_HW_OK - success.
791 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
792 *
793 */
794enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
795 void **rxdh)
796{
797 enum vxge_hw_status status;
798 struct __vxge_hw_channel *channel;
799
800 channel = &ring->channel;
801
802 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
803
804 if (status == VXGE_HW_OK) {
805 struct vxge_hw_ring_rxd_1 *rxdp =
806 (struct vxge_hw_ring_rxd_1 *)*rxdh;
807
808 rxdp->control_0 = rxdp->control_1 = 0;
809 }
810
811 return status;
812}
813
814/**
815 * vxge_hw_ring_rxd_free - Free descriptor.
816 * @ring: Handle to the ring object used for receive
817 * @rxdh: Descriptor handle.
818 *
819 * Free the reserved descriptor. This operation is "symmetrical" to
820 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
821 * lifecycle.
822 *
823 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
824 * be:
825 *
826 * - reserved (vxge_hw_ring_rxd_reserve);
827 *
828 * - posted (vxge_hw_ring_rxd_post);
829 *
830 * - completed (vxge_hw_ring_rxd_next_completed);
831 *
832 * - and recycled again (vxge_hw_ring_rxd_free).
833 *
834 * For alternative state transitions and more details please refer to
835 * the design doc.
836 *
837 */
838void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
839{
840 struct __vxge_hw_channel *channel;
841
842 channel = &ring->channel;
843
844 vxge_hw_channel_dtr_free(channel, rxdh);
845
846}
847
848/**
849 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
850 * @ring: Handle to the ring object used for receive
851 * @rxdh: Descriptor handle.
852 *
853 * This routine prepares a rxd and posts
854 */
855void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
856{
857 struct __vxge_hw_channel *channel;
858
859 channel = &ring->channel;
860
861 vxge_hw_channel_dtr_post(channel, rxdh);
862}
863
864/**
865 * vxge_hw_ring_rxd_post_post - Process rxd after post.
866 * @ring: Handle to the ring object used for receive
867 * @rxdh: Descriptor handle.
868 *
869 * Processes rxd after post
870 */
871void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
872{
873 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
874 struct __vxge_hw_channel *channel;
875
876 channel = &ring->channel;
877
878 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
879
880 if (ring->stats->common_stats.usage_cnt > 0)
881 ring->stats->common_stats.usage_cnt--;
882}
883
884/**
885 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
886 * @ring: Handle to the ring object used for receive
887 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
888 *
889 * Post descriptor on the ring.
890 * Prior to posting the descriptor should be filled in accordance with
891 * Host/Titan interface specification for a given service (LL, etc.).
892 *
893 */
894void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
895{
896 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
897 struct __vxge_hw_channel *channel;
898
899 channel = &ring->channel;
900
901 wmb();
902 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
903
904 vxge_hw_channel_dtr_post(channel, rxdh);
905
906 if (ring->stats->common_stats.usage_cnt > 0)
907 ring->stats->common_stats.usage_cnt--;
908}
909
910/**
911 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
912 * @ring: Handle to the ring object used for receive
913 * @rxdh: Descriptor handle.
914 *
915 * Processes rxd after post with memory barrier.
916 */
917void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
918{
919 struct __vxge_hw_channel *channel;
920
921 channel = &ring->channel;
922
923 wmb();
924 vxge_hw_ring_rxd_post_post(ring, rxdh);
925}
926
927/**
928 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
929 * @ring: Handle to the ring object used for receive
930 * @rxdh: Descriptor handle. Returned by HW.
931 * @t_code: Transfer code, as per Titan User Guide,
932 * Receive Descriptor Format. Returned by HW.
933 *
934 * Retrieve the _next_ completed descriptor.
935 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
936 * driver of new completed descriptors. After that
937 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
938 * completions (the very first completion is passed by HW via
939 * vxge_hw_ring_callback_f).
940 *
941 * Implementation-wise, the driver is free to call
942 * vxge_hw_ring_rxd_next_completed either immediately from inside the
943 * ring callback, or in a deferred fashion and separate (from HW)
944 * context.
945 *
946 * Non-zero @t_code means failure to fill-in receive buffer(s)
947 * of the descriptor.
948 * For instance, parity error detected during the data transfer.
949 * In this case Titan will complete the descriptor and indicate
950 * for the host that the received data is not to be used.
951 * For details please refer to Titan User Guide.
952 *
953 * Returns: VXGE_HW_OK - success.
954 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
955 * are currently available for processing.
956 *
957 * See also: vxge_hw_ring_callback_f{},
958 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
959 */
960enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
961 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
962{
963 struct __vxge_hw_channel *channel;
964 struct vxge_hw_ring_rxd_1 *rxdp;
965 enum vxge_hw_status status = VXGE_HW_OK;
966
967 channel = &ring->channel;
968
969 vxge_hw_channel_dtr_try_complete(channel, rxdh);
970
971 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
972 if (rxdp == NULL) {
973 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
974 goto exit;
975 }
976
977 /* check whether it is not the end */
978 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
979
980 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
981 0);
982
983 ++ring->cmpl_cnt;
984 vxge_hw_channel_dtr_complete(channel);
985
986 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
987
988 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
989
990 ring->stats->common_stats.usage_cnt++;
991 if (ring->stats->common_stats.usage_max <
992 ring->stats->common_stats.usage_cnt)
993 ring->stats->common_stats.usage_max =
994 ring->stats->common_stats.usage_cnt;
995
996 status = VXGE_HW_OK;
997 goto exit;
998 }
999
1000 /* reset it. since we don't want to return
1001 * garbage to the driver */
1002 *rxdh = NULL;
1003 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1004exit:
1005 return status;
1006}
1007
1008/**
1009 * vxge_hw_ring_handle_tcode - Handle transfer code.
1010 * @ring: Handle to the ring object used for receive
1011 * @rxdh: Descriptor handle.
1012 * @t_code: One of the enumerated (and documented in the Titan user guide)
1013 * "transfer codes".
1014 *
1015 * Handle descriptor's transfer code. The latter comes with each completed
1016 * descriptor.
1017 *
1018 * Returns: one of the enum vxge_hw_status{} enumerated types.
1019 * VXGE_HW_OK - for success.
1020 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1021 */
1022enum vxge_hw_status vxge_hw_ring_handle_tcode(
1023 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1024{
1025 struct __vxge_hw_channel *channel;
1026 enum vxge_hw_status status = VXGE_HW_OK;
1027
1028 channel = &ring->channel;
1029
1030 /* If the t_code is not supported and if the
1031 * t_code is other than 0x5 (unparseable packet
1032 * such as unknown UPV6 header), Drop it !!!
1033 */
1034
1035 if (t_code == 0 || t_code == 5) {
1036 status = VXGE_HW_OK;
1037 goto exit;
1038 }
1039
1040 if (t_code > 0xF) {
1041 status = VXGE_HW_ERR_INVALID_TCODE;
1042 goto exit;
1043 }
1044
1045 ring->stats->rxd_t_code_err_cnt[t_code]++;
1046exit:
1047 return status;
1048}
1049
1050/**
1051 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1052 *
1053 * @fifo: fifohandle
1054 * @txdl_ptr: The starting location of the TxDL in host memory
1055 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1056 * @no_snoop: No snoop flags
1057 *
1058 * This function posts a non-offload doorbell to doorbell FIFO
1059 *
1060 */
1061static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1062 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1063{
1064 struct __vxge_hw_channel *channel;
1065
1066 channel = &fifo->channel;
1067
1068 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1069 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1070 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1071 &fifo->nofl_db->control_0);
1072
1073 wmb();
1074
1075 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1076 wmb();
1077
1078}
1079
1080/**
1081 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1082 * the fifo
1083 * @fifoh: Handle to the fifo object used for non offload send
1084 */
1085u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1086{
1087 return vxge_hw_channel_dtr_count(&fifoh->channel);
1088}
1089
1090/**
1091 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1092 * @fifoh: Handle to the fifo object used for non offload send
1093 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1094 * with a valid handle.
1095 * @txdl_priv: Buffer to return the pointer to per txdl space
1096 *
1097 * Reserve a single TxDL (that is, fifo descriptor)
1098 * for the subsequent filling-in by driver)
1099 * and posting on the corresponding channel (@channelh)
1100 * via vxge_hw_fifo_txdl_post().
1101 *
1102 * Note: it is the responsibility of driver to reserve multiple descriptors
1103 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1104 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1105 *
1106 * Returns: VXGE_HW_OK - success;
1107 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1108 *
1109 */
1110enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1111 struct __vxge_hw_fifo *fifo,
1112 void **txdlh, void **txdl_priv)
1113{
1114 struct __vxge_hw_channel *channel;
1115 enum vxge_hw_status status;
1116 int i;
1117
1118 channel = &fifo->channel;
1119
1120 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1121
1122 if (status == VXGE_HW_OK) {
1123 struct vxge_hw_fifo_txd *txdp =
1124 (struct vxge_hw_fifo_txd *)*txdlh;
1125 struct __vxge_hw_fifo_txdl_priv *priv;
1126
1127 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1128
1129 /* reset the TxDL's private */
1130 priv->align_dma_offset = 0;
1131 priv->align_vaddr_start = priv->align_vaddr;
1132 priv->align_used_frags = 0;
1133 priv->frags = 0;
1134 priv->alloc_frags = fifo->config->max_frags;
1135 priv->next_txdl_priv = NULL;
1136
1137 *txdl_priv = (void *)(size_t)txdp->host_control;
1138
1139 for (i = 0; i < fifo->config->max_frags; i++) {
1140 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1141 txdp->control_0 = txdp->control_1 = 0;
1142 }
1143 }
1144
1145 return status;
1146}
1147
1148/**
1149 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1150 * descriptor.
1151 * @fifo: Handle to the fifo object used for non offload send
1152 * @txdlh: Descriptor handle.
1153 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1154 * (of buffers).
1155 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1156 * @size: Size of the data buffer (in bytes).
1157 *
1158 * This API is part of the preparation of the transmit descriptor for posting
1159 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1160 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1161 * All three APIs fill in the fields of the fifo descriptor,
1162 * in accordance with the Titan specification.
1163 *
1164 */
1165void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1166 void *txdlh, u32 frag_idx,
1167 dma_addr_t dma_pointer, u32 size)
1168{
1169 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1170 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1171 struct __vxge_hw_channel *channel;
1172
1173 channel = &fifo->channel;
1174
1175 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1176 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1177
1178 if (frag_idx != 0)
1179 txdp->control_0 = txdp->control_1 = 0;
1180 else {
1181 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1182 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1183 txdp->control_1 |= fifo->interrupt_type;
1184 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1185 fifo->tx_intr_num);
1186 if (txdl_priv->frags) {
1187 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1188 (txdl_priv->frags - 1);
1189 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1190 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1191 }
1192 }
1193
1194 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1195
1196 txdp->buffer_pointer = (u64)dma_pointer;
1197 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1198 fifo->stats->total_buffers++;
1199 txdl_priv->frags++;
1200}
1201
1202/**
1203 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1204 * @fifo: Handle to the fifo object used for non offload send
1205 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1206 * @frags: Number of contiguous buffers that are part of a single
1207 * transmit operation.
1208 *
1209 * Post descriptor on the 'fifo' type channel for transmission.
1210 * Prior to posting the descriptor should be filled in accordance with
1211 * Host/Titan interface specification for a given service (LL, etc.).
1212 *
1213 */
1214void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1215{
1216 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1217 struct vxge_hw_fifo_txd *txdp_last;
1218 struct vxge_hw_fifo_txd *txdp_first;
1219 struct __vxge_hw_channel *channel;
1220
1221 channel = &fifo->channel;
1222
1223 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1224 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1225
1226 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1227 txdp_last->control_0 |=
1228 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1229 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1230
1231 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1232
1233 __vxge_hw_non_offload_db_post(fifo,
1234 (u64)(size_t)txdl_priv->dma_addr,
1235 txdl_priv->frags - 1,
1236 fifo->no_snoop_bits);
1237
1238 fifo->stats->total_posts++;
1239 fifo->stats->common_stats.usage_cnt++;
1240 if (fifo->stats->common_stats.usage_max <
1241 fifo->stats->common_stats.usage_cnt)
1242 fifo->stats->common_stats.usage_max =
1243 fifo->stats->common_stats.usage_cnt;
1244}
1245
1246/**
1247 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1248 * @fifo: Handle to the fifo object used for non offload send
1249 * @txdlh: Descriptor handle. Returned by HW.
1250 * @t_code: Transfer code, as per Titan User Guide,
1251 * Transmit Descriptor Format.
1252 * Returned by HW.
1253 *
1254 * Retrieve the _next_ completed descriptor.
1255 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1256 * driver of new completed descriptors. After that
1257 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1258 * completions (the very first completion is passed by HW via
1259 * vxge_hw_channel_callback_f).
1260 *
1261 * Implementation-wise, the driver is free to call
1262 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1263 * channel callback, or in a deferred fashion and separate (from HW)
1264 * context.
1265 *
1266 * Non-zero @t_code means failure to process the descriptor.
1267 * The failure could happen, for instance, when the link is
1268 * down, in which case Titan completes the descriptor because it
1269 * is not able to send the data out.
1270 *
1271 * For details please refer to Titan User Guide.
1272 *
1273 * Returns: VXGE_HW_OK - success.
1274 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1275 * are currently available for processing.
1276 *
1277 */
1278enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1279 struct __vxge_hw_fifo *fifo, void **txdlh,
1280 enum vxge_hw_fifo_tcode *t_code)
1281{
1282 struct __vxge_hw_channel *channel;
1283 struct vxge_hw_fifo_txd *txdp;
1284 enum vxge_hw_status status = VXGE_HW_OK;
1285
1286 channel = &fifo->channel;
1287
1288 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1289
1290 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1291 if (txdp == NULL) {
1292 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1293 goto exit;
1294 }
1295
1296 /* check whether host owns it */
1297 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1298
1299 vxge_assert(txdp->host_control != 0);
1300
1301 vxge_hw_channel_dtr_complete(channel);
1302
1303 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1304
1305 if (fifo->stats->common_stats.usage_cnt > 0)
1306 fifo->stats->common_stats.usage_cnt--;
1307
1308 status = VXGE_HW_OK;
1309 goto exit;
1310 }
1311
1312 /* no more completions */
1313 *txdlh = NULL;
1314 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315exit:
1316 return status;
1317}
1318
1319/**
1320 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1321 * @fifo: Handle to the fifo object used for non offload send
1322 * @txdlh: Descriptor handle.
1323 * @t_code: One of the enumerated (and documented in the Titan user guide)
1324 * "transfer codes".
1325 *
1326 * Handle descriptor's transfer code. The latter comes with each completed
1327 * descriptor.
1328 *
1329 * Returns: one of the enum vxge_hw_status{} enumerated types.
1330 * VXGE_HW_OK - for success.
1331 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1332 */
1333enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1334 void *txdlh,
1335 enum vxge_hw_fifo_tcode t_code)
1336{
1337 struct __vxge_hw_channel *channel;
1338
1339 enum vxge_hw_status status = VXGE_HW_OK;
1340 channel = &fifo->channel;
1341
1342 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1343 status = VXGE_HW_ERR_INVALID_TCODE;
1344 goto exit;
1345 }
1346
1347 fifo->stats->txd_t_code_err_cnt[t_code]++;
1348exit:
1349 return status;
1350}
1351
1352/**
1353 * vxge_hw_fifo_txdl_free - Free descriptor.
1354 * @fifo: Handle to the fifo object used for non offload send
1355 * @txdlh: Descriptor handle.
1356 *
1357 * Free the reserved descriptor. This operation is "symmetrical" to
1358 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1359 * lifecycle.
1360 *
1361 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1362 * be:
1363 *
1364 * - reserved (vxge_hw_fifo_txdl_reserve);
1365 *
1366 * - posted (vxge_hw_fifo_txdl_post);
1367 *
1368 * - completed (vxge_hw_fifo_txdl_next_completed);
1369 *
1370 * - and recycled again (vxge_hw_fifo_txdl_free).
1371 *
1372 * For alternative state transitions and more details please refer to
1373 * the design doc.
1374 *
1375 */
1376void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1377{
1378 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1379 u32 max_frags;
1380 struct __vxge_hw_channel *channel;
1381
1382 channel = &fifo->channel;
1383
1384 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1385 (struct vxge_hw_fifo_txd *)txdlh);
1386
1387 max_frags = fifo->config->max_frags;
1388
1389 vxge_hw_channel_dtr_free(channel, txdlh);
1390}
1391
1392/**
1393 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1394 * to MAC address table.
1395 * @vp: Vpath handle.
1396 * @macaddr: MAC address to be added for this vpath into the list
1397 * @macaddr_mask: MAC address mask for macaddr
1398 * @duplicate_mode: Duplicate MAC address add mode. Please see
1399 * enum vxge_hw_vpath_mac_addr_add_mode{}
1400 *
1401 * Adds the given mac address and mac address mask into the list for this
1402 * vpath.
1403 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1404 * vxge_hw_vpath_mac_addr_get_next
1405 *
1406 */
1407enum vxge_hw_status
1408vxge_hw_vpath_mac_addr_add(
1409 struct __vxge_hw_vpath_handle *vp,
1410 u8 (macaddr)[ETH_ALEN],
1411 u8 (macaddr_mask)[ETH_ALEN],
1412 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1413{
1414 u32 i;
1415 u64 data1 = 0ULL;
1416 u64 data2 = 0ULL;
1417 enum vxge_hw_status status = VXGE_HW_OK;
1418
1419 if (vp == NULL) {
1420 status = VXGE_HW_ERR_INVALID_HANDLE;
1421 goto exit;
1422 }
1423
1424 for (i = 0; i < ETH_ALEN; i++) {
1425 data1 <<= 8;
1426 data1 |= (u8)macaddr[i];
1427
1428 data2 <<= 8;
1429 data2 |= (u8)macaddr_mask[i];
1430 }
1431
1432 switch (duplicate_mode) {
1433 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1434 i = 0;
1435 break;
1436 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1437 i = 1;
1438 break;
1439 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1440 i = 2;
1441 break;
1442 default:
1443 i = 0;
1444 break;
1445 }
1446
1447 status = __vxge_hw_vpath_rts_table_set(vp,
1448 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1449 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1450 0,
1451 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1452 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1453 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1454exit:
1455 return status;
1456}
1457
1458/**
1459 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1460 * from MAC address table.
1461 * @vp: Vpath handle.
1462 * @macaddr: First MAC address entry for this vpath in the list
1463 * @macaddr_mask: MAC address mask for macaddr
1464 *
1465 * Returns the first mac address and mac address mask in the list for this
1466 * vpath.
1467 * see also: vxge_hw_vpath_mac_addr_get_next
1468 *
1469 */
1470enum vxge_hw_status
1471vxge_hw_vpath_mac_addr_get(
1472 struct __vxge_hw_vpath_handle *vp,
1473 u8 (macaddr)[ETH_ALEN],
1474 u8 (macaddr_mask)[ETH_ALEN])
1475{
1476 u32 i;
1477 u64 data1 = 0ULL;
1478 u64 data2 = 0ULL;
1479 enum vxge_hw_status status = VXGE_HW_OK;
1480
1481 if (vp == NULL) {
1482 status = VXGE_HW_ERR_INVALID_HANDLE;
1483 goto exit;
1484 }
1485
1486 status = __vxge_hw_vpath_rts_table_get(vp,
1487 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1488 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1489 0, &data1, &data2);
1490
1491 if (status != VXGE_HW_OK)
1492 goto exit;
1493
1494 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1495
1496 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1497
1498 for (i = ETH_ALEN; i > 0; i--) {
1499 macaddr[i-1] = (u8)(data1 & 0xFF);
1500 data1 >>= 8;
1501
1502 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1503 data2 >>= 8;
1504 }
1505exit:
1506 return status;
1507}
1508
1509/**
1510 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1511 * vpath
1512 * from MAC address table.
1513 * @vp: Vpath handle.
1514 * @macaddr: Next MAC address entry for this vpath in the list
1515 * @macaddr_mask: MAC address mask for macaddr
1516 *
1517 * Returns the next mac address and mac address mask in the list for this
1518 * vpath.
1519 * see also: vxge_hw_vpath_mac_addr_get
1520 *
1521 */
1522enum vxge_hw_status
1523vxge_hw_vpath_mac_addr_get_next(
1524 struct __vxge_hw_vpath_handle *vp,
1525 u8 (macaddr)[ETH_ALEN],
1526 u8 (macaddr_mask)[ETH_ALEN])
1527{
1528 u32 i;
1529 u64 data1 = 0ULL;
1530 u64 data2 = 0ULL;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532
1533 if (vp == NULL) {
1534 status = VXGE_HW_ERR_INVALID_HANDLE;
1535 goto exit;
1536 }
1537
1538 status = __vxge_hw_vpath_rts_table_get(vp,
1539 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1540 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1541 0, &data1, &data2);
1542
1543 if (status != VXGE_HW_OK)
1544 goto exit;
1545
1546 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1547
1548 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1549
1550 for (i = ETH_ALEN; i > 0; i--) {
1551 macaddr[i-1] = (u8)(data1 & 0xFF);
1552 data1 >>= 8;
1553
1554 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1555 data2 >>= 8;
1556 }
1557
1558exit:
1559 return status;
1560}
1561
1562/**
1563 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1564 * to MAC address table.
1565 * @vp: Vpath handle.
1566 * @macaddr: MAC address to be added for this vpath into the list
1567 * @macaddr_mask: MAC address mask for macaddr
1568 *
1569 * Delete the given mac address and mac address mask into the list for this
1570 * vpath.
1571 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1572 * vxge_hw_vpath_mac_addr_get_next
1573 *
1574 */
1575enum vxge_hw_status
1576vxge_hw_vpath_mac_addr_delete(
1577 struct __vxge_hw_vpath_handle *vp,
1578 u8 (macaddr)[ETH_ALEN],
1579 u8 (macaddr_mask)[ETH_ALEN])
1580{
1581 u32 i;
1582 u64 data1 = 0ULL;
1583 u64 data2 = 0ULL;
1584 enum vxge_hw_status status = VXGE_HW_OK;
1585
1586 if (vp == NULL) {
1587 status = VXGE_HW_ERR_INVALID_HANDLE;
1588 goto exit;
1589 }
1590
1591 for (i = 0; i < ETH_ALEN; i++) {
1592 data1 <<= 8;
1593 data1 |= (u8)macaddr[i];
1594
1595 data2 <<= 8;
1596 data2 |= (u8)macaddr_mask[i];
1597 }
1598
1599 status = __vxge_hw_vpath_rts_table_set(vp,
1600 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1601 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1602 0,
1603 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1604 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1605exit:
1606 return status;
1607}
1608
1609/**
1610 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1611 * to vlan id table.
1612 * @vp: Vpath handle.
1613 * @vid: vlan id to be added for this vpath into the list
1614 *
1615 * Adds the given vlan id into the list for this vpath.
1616 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1617 * vxge_hw_vpath_vid_get_next
1618 *
1619 */
1620enum vxge_hw_status
1621vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1622{
1623 enum vxge_hw_status status = VXGE_HW_OK;
1624
1625 if (vp == NULL) {
1626 status = VXGE_HW_ERR_INVALID_HANDLE;
1627 goto exit;
1628 }
1629
1630 status = __vxge_hw_vpath_rts_table_set(vp,
1631 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1632 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1633 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1634exit:
1635 return status;
1636}
1637
1638/**
1639 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1640 * from vlan id table.
1641 * @vp: Vpath handle.
1642 * @vid: Buffer to return vlan id
1643 *
1644 * Returns the first vlan id in the list for this vpath.
1645 * see also: vxge_hw_vpath_vid_get_next
1646 *
1647 */
1648enum vxge_hw_status
1649vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1650{
1651 u64 data;
1652 enum vxge_hw_status status = VXGE_HW_OK;
1653
1654 if (vp == NULL) {
1655 status = VXGE_HW_ERR_INVALID_HANDLE;
1656 goto exit;
1657 }
1658
1659 status = __vxge_hw_vpath_rts_table_get(vp,
1660 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1661 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1662 0, vid, &data);
1663
1664 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1665exit:
1666 return status;
1667}
1668
1669/**
1670 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1671 * from vlan id table.
1672 * @vp: Vpath handle.
1673 * @vid: Buffer to return vlan id
1674 *
1675 * Returns the next vlan id in the list for this vpath.
1676 * see also: vxge_hw_vpath_vid_get
1677 *
1678 */
1679enum vxge_hw_status
1680vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1681{
1682 u64 data;
1683 enum vxge_hw_status status = VXGE_HW_OK;
1684
1685 if (vp == NULL) {
1686 status = VXGE_HW_ERR_INVALID_HANDLE;
1687 goto exit;
1688 }
1689
1690 status = __vxge_hw_vpath_rts_table_get(vp,
1691 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1692 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1693 0, vid, &data);
1694
1695 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1696exit:
1697 return status;
1698}
1699
1700/**
1701 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1702 * to vlan id table.
1703 * @vp: Vpath handle.
1704 * @vid: vlan id to be added for this vpath into the list
1705 *
1706 * Adds the given vlan id into the list for this vpath.
1707 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1708 * vxge_hw_vpath_vid_get_next
1709 *
1710 */
1711enum vxge_hw_status
1712vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1713{
1714 enum vxge_hw_status status = VXGE_HW_OK;
1715
1716 if (vp == NULL) {
1717 status = VXGE_HW_ERR_INVALID_HANDLE;
1718 goto exit;
1719 }
1720
1721 status = __vxge_hw_vpath_rts_table_set(vp,
1722 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1723 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1724 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1725exit:
1726 return status;
1727}
1728
1729/**
1730 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1731 * @vp: Vpath handle.
1732 *
1733 * Enable promiscuous mode of Titan-e operation.
1734 *
1735 * See also: vxge_hw_vpath_promisc_disable().
1736 */
1737enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1738 struct __vxge_hw_vpath_handle *vp)
1739{
1740 u64 val64;
1741 struct __vxge_hw_virtualpath *vpath;
1742 enum vxge_hw_status status = VXGE_HW_OK;
1743
1744 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1745 status = VXGE_HW_ERR_INVALID_HANDLE;
1746 goto exit;
1747 }
1748
1749 vpath = vp->vpath;
1750
1751 /* Enable promiscous mode for function 0 only */
1752 if (!(vpath->hldev->access_rights &
1753 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1754 return VXGE_HW_OK;
1755
1756 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1757
1758 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1759
1760 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1761 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1762 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1763 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1764
1765 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1766 }
1767exit:
1768 return status;
1769}
1770
1771/**
1772 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1773 * @vp: Vpath handle.
1774 *
1775 * Disable promiscuous mode of Titan-e operation.
1776 *
1777 * See also: vxge_hw_vpath_promisc_enable().
1778 */
1779enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1780 struct __vxge_hw_vpath_handle *vp)
1781{
1782 u64 val64;
1783 struct __vxge_hw_virtualpath *vpath;
1784 enum vxge_hw_status status = VXGE_HW_OK;
1785
1786 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1787 status = VXGE_HW_ERR_INVALID_HANDLE;
1788 goto exit;
1789 }
1790
1791 vpath = vp->vpath;
1792
1793 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1794
1795 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1796
1797 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1798 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1799 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1800
1801 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1802 }
1803exit:
1804 return status;
1805}
1806
1807/*
1808 * vxge_hw_vpath_bcast_enable - Enable broadcast
1809 * @vp: Vpath handle.
1810 *
1811 * Enable receiving broadcasts.
1812 */
1813enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1814 struct __vxge_hw_vpath_handle *vp)
1815{
1816 u64 val64;
1817 struct __vxge_hw_virtualpath *vpath;
1818 enum vxge_hw_status status = VXGE_HW_OK;
1819
1820 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1821 status = VXGE_HW_ERR_INVALID_HANDLE;
1822 goto exit;
1823 }
1824
1825 vpath = vp->vpath;
1826
1827 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1828
1829 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1830 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1831 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1832 }
1833exit:
1834 return status;
1835}
1836
1837/**
1838 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1839 * @vp: Vpath handle.
1840 *
1841 * Enable Titan-e multicast addresses.
1842 * Returns: VXGE_HW_OK on success.
1843 *
1844 */
1845enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1846 struct __vxge_hw_vpath_handle *vp)
1847{
1848 u64 val64;
1849 struct __vxge_hw_virtualpath *vpath;
1850 enum vxge_hw_status status = VXGE_HW_OK;
1851
1852 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1853 status = VXGE_HW_ERR_INVALID_HANDLE;
1854 goto exit;
1855 }
1856
1857 vpath = vp->vpath;
1858
1859 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1860
1861 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1862 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1863 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1864 }
1865exit:
1866 return status;
1867}
1868
1869/**
1870 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1871 * @vp: Vpath handle.
1872 *
1873 * Disable Titan-e multicast addresses.
1874 * Returns: VXGE_HW_OK - success.
1875 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1876 *
1877 */
1878enum vxge_hw_status
1879vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1880{
1881 u64 val64;
1882 struct __vxge_hw_virtualpath *vpath;
1883 enum vxge_hw_status status = VXGE_HW_OK;
1884
1885 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1886 status = VXGE_HW_ERR_INVALID_HANDLE;
1887 goto exit;
1888 }
1889
1890 vpath = vp->vpath;
1891
1892 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1893
1894 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1895 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1896 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1897 }
1898exit:
1899 return status;
1900}
1901
1902/*
1903 * __vxge_hw_vpath_alarm_process - Process Alarms.
1904 * @vpath: Virtual Path.
1905 * @skip_alarms: Do not clear the alarms
1906 *
1907 * Process vpath alarms.
1908 *
1909 */
1910enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1911 struct __vxge_hw_virtualpath *vpath,
1912 u32 skip_alarms)
1913{
1914 u64 val64;
1915 u64 alarm_status;
1916 u64 pic_status;
1917 struct __vxge_hw_device *hldev = NULL;
1918 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1919 u64 mask64;
1920 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1921 struct vxge_hw_vpath_reg __iomem *vp_reg;
1922
1923 if (vpath == NULL) {
1924 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1925 alarm_event);
1926 goto out;
1927 }
1928
1929 hldev = vpath->hldev;
1930 vp_reg = vpath->vp_reg;
1931 alarm_status = readq(&vp_reg->vpath_general_int_status);
1932
1933 if (alarm_status == VXGE_HW_ALL_FOXES) {
1934 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1935 alarm_event);
1936 goto out;
1937 }
1938
1939 sw_stats = vpath->sw_stats;
1940
1941 if (alarm_status & ~(
1942 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1943 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1944 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1945 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1946 sw_stats->error_stats.unknown_alarms++;
1947
1948 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1949 alarm_event);
1950 goto out;
1951 }
1952
1953 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1954
1955 val64 = readq(&vp_reg->xgmac_vp_int_status);
1956
1957 if (val64 &
1958 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1959
1960 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1961
1962 if (((val64 &
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1964 (!(val64 &
1965 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1966 ((val64 &
1967 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1968 && (!(val64 &
1969 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1970 ))) {
1971 sw_stats->error_stats.network_sustained_fault++;
1972
1973 writeq(
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1975 &vp_reg->asic_ntwk_vp_err_mask);
1976
1977 __vxge_hw_device_handle_link_down_ind(hldev);
1978 alarm_event = VXGE_HW_SET_LEVEL(
1979 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1980 }
1981
1982 if (((val64 &
1983 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1984 (!(val64 &
1985 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1986 ((val64 &
1987 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1988 && (!(val64 &
1989 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1990 ))) {
1991
1992 sw_stats->error_stats.network_sustained_ok++;
1993
1994 writeq(
1995 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1996 &vp_reg->asic_ntwk_vp_err_mask);
1997
1998 __vxge_hw_device_handle_link_up_ind(hldev);
1999 alarm_event = VXGE_HW_SET_LEVEL(
2000 VXGE_HW_EVENT_LINK_UP, alarm_event);
2001 }
2002
2003 writeq(VXGE_HW_INTR_MASK_ALL,
2004 &vp_reg->asic_ntwk_vp_err_reg);
2005
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2008
2009 if (skip_alarms)
2010 return VXGE_HW_OK;
2011 }
2012 }
2013
2014 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2015
2016 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2017
2018 if (pic_status &
2019 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2020
2021 val64 = readq(&vp_reg->general_errors_reg);
2022 mask64 = readq(&vp_reg->general_errors_mask);
2023
2024 if ((val64 &
2025 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2026 ~mask64) {
2027 sw_stats->error_stats.ini_serr_det++;
2028
2029 alarm_event = VXGE_HW_SET_LEVEL(
2030 VXGE_HW_EVENT_SERR, alarm_event);
2031 }
2032
2033 if ((val64 &
2034 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2035 ~mask64) {
2036 sw_stats->error_stats.dblgen_fifo0_overflow++;
2037
2038 alarm_event = VXGE_HW_SET_LEVEL(
2039 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2040 }
2041
2042 if ((val64 &
2043 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2044 ~mask64)
2045 sw_stats->error_stats.statsb_pif_chain_error++;
2046
2047 if ((val64 &
2048 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2049 ~mask64)
2050 sw_stats->error_stats.statsb_drop_timeout++;
2051
2052 if ((val64 &
2053 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2054 ~mask64)
2055 sw_stats->error_stats.target_illegal_access++;
2056
2057 if (!skip_alarms) {
2058 writeq(VXGE_HW_INTR_MASK_ALL,
2059 &vp_reg->general_errors_reg);
2060 alarm_event = VXGE_HW_SET_LEVEL(
2061 VXGE_HW_EVENT_ALARM_CLEARED,
2062 alarm_event);
2063 }
2064 }
2065
2066 if (pic_status &
2067 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2068
2069 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2070 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2071
2072 if ((val64 &
2073 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2074 ~mask64) {
2075 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2076
2077 alarm_event = VXGE_HW_SET_LEVEL(
2078 VXGE_HW_EVENT_FIFO_ERR,
2079 alarm_event);
2080 }
2081
2082 if ((val64 &
2083 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2084 ~mask64) {
2085 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2086
2087 alarm_event = VXGE_HW_SET_LEVEL(
2088 VXGE_HW_EVENT_FIFO_ERR,
2089 alarm_event);
2090 }
2091
2092 if ((val64 &
2093 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2094 ~mask64) {
2095 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2096
2097 alarm_event = VXGE_HW_SET_LEVEL(
2098 VXGE_HW_EVENT_FIFO_ERR,
2099 alarm_event);
2100 }
2101
2102 if (!skip_alarms) {
2103 writeq(VXGE_HW_INTR_MASK_ALL,
2104 &vp_reg->kdfcctl_errors_reg);
2105 alarm_event = VXGE_HW_SET_LEVEL(
2106 VXGE_HW_EVENT_ALARM_CLEARED,
2107 alarm_event);
2108 }
2109 }
2110
2111 }
2112
2113 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2114
2115 val64 = readq(&vp_reg->wrdma_alarm_status);
2116
2117 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2118
2119 val64 = readq(&vp_reg->prc_alarm_reg);
2120 mask64 = readq(&vp_reg->prc_alarm_mask);
2121
2122 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2123 ~mask64)
2124 sw_stats->error_stats.prc_ring_bumps++;
2125
2126 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2127 ~mask64) {
2128 sw_stats->error_stats.prc_rxdcm_sc_err++;
2129
2130 alarm_event = VXGE_HW_SET_LEVEL(
2131 VXGE_HW_EVENT_VPATH_ERR,
2132 alarm_event);
2133 }
2134
2135 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2136 & ~mask64) {
2137 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2138
2139 alarm_event = VXGE_HW_SET_LEVEL(
2140 VXGE_HW_EVENT_VPATH_ERR,
2141 alarm_event);
2142 }
2143
2144 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2145 & ~mask64) {
2146 sw_stats->error_stats.prc_quanta_size_err++;
2147
2148 alarm_event = VXGE_HW_SET_LEVEL(
2149 VXGE_HW_EVENT_VPATH_ERR,
2150 alarm_event);
2151 }
2152
2153 if (!skip_alarms) {
2154 writeq(VXGE_HW_INTR_MASK_ALL,
2155 &vp_reg->prc_alarm_reg);
2156 alarm_event = VXGE_HW_SET_LEVEL(
2157 VXGE_HW_EVENT_ALARM_CLEARED,
2158 alarm_event);
2159 }
2160 }
2161 }
2162out:
2163 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2164
2165 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2166 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2167 return VXGE_HW_OK;
2168
2169 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2170
2171 if (alarm_event == VXGE_HW_EVENT_SERR)
2172 return VXGE_HW_ERR_CRITICAL;
2173
2174 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2175 VXGE_HW_ERR_SLOT_FREEZE :
2176 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2177 VXGE_HW_ERR_VPATH;
2178}
2179
2180/*
2181 * vxge_hw_vpath_alarm_process - Process Alarms.
2182 * @vpath: Virtual Path.
2183 * @skip_alarms: Do not clear the alarms
2184 *
2185 * Process vpath alarms.
2186 *
2187 */
2188enum vxge_hw_status vxge_hw_vpath_alarm_process(
2189 struct __vxge_hw_vpath_handle *vp,
2190 u32 skip_alarms)
2191{
2192 enum vxge_hw_status status = VXGE_HW_OK;
2193
2194 if (vp == NULL) {
2195 status = VXGE_HW_ERR_INVALID_HANDLE;
2196 goto exit;
2197 }
2198
2199 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2200exit:
2201 return status;
2202}
2203
2204/**
2205 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2206 * alrms
2207 * @vp: Virtual Path handle.
2208 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2209 * interrupts(Can be repeated). If fifo or ring are not enabled
2210 * the MSIX vector for that should be set to 0
2211 * @alarm_msix_id: MSIX vector for alarm.
2212 *
2213 * This API will associate a given MSIX vector numbers with the four TIM
2214 * interrupts and alarm interrupt.
2215 */
2216enum vxge_hw_status
2217vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2218 int alarm_msix_id)
2219{
2220 u64 val64;
2221 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2222 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2223 u32 first_vp_id = vpath->hldev->first_vp_id;
2224
2225 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2226 (first_vp_id * 4) + tim_msix_id[0]) |
2227 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2228 (first_vp_id * 4) + tim_msix_id[1]) |
2229 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2230 (first_vp_id * 4) + tim_msix_id[2]);
2231
2232 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[3]);
2234
2235 writeq(val64, &vp_reg->interrupt_cfg0);
2236
2237 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2238 (first_vp_id * 4) + alarm_msix_id),
2239 &vp_reg->interrupt_cfg2);
2240
2241 if (vpath->hldev->config.intr_mode ==
2242 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2243 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2244 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2245 0, 32), &vp_reg->one_shot_vect1_en);
2246 }
2247
2248 if (vpath->hldev->config.intr_mode ==
2249 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2252 0, 32), &vp_reg->one_shot_vect2_en);
2253
2254 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2255 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2256 0, 32), &vp_reg->one_shot_vect3_en);
2257 }
2258
2259 return VXGE_HW_OK;
2260}
2261
2262/**
2263 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2264 * @vp: Virtual Path handle.
2265 * @msix_id: MSIX ID
2266 *
2267 * The function masks the msix interrupt for the given msix_id
2268 *
2269 * Returns: 0,
2270 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2271 * status.
2272 * See also:
2273 */
2274void
2275vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2276{
2277 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2278 __vxge_hw_pio_mem_write32_upper(
2279 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2280 (msix_id / 4)), 0, 32),
2281 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2282
2283 return;
2284}
2285
2286/**
2287 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2288 * @vp: Virtual Path handle.
2289 * @msix_id: MSI ID
2290 *
2291 * The function clears the msix interrupt for the given msix_id
2292 *
2293 * Returns: 0,
2294 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2295 * status.
2296 * See also:
2297 */
2298void
2299vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2300{
2301 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2302 if (hldev->config.intr_mode ==
2303 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2304 __vxge_hw_pio_mem_write32_upper(
2305 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2306 (msix_id/4)), 0, 32),
2307 &hldev->common_reg->
2308 clr_msix_one_shot_vec[msix_id%4]);
2309 } else {
2310 __vxge_hw_pio_mem_write32_upper(
2311 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2312 (msix_id/4)), 0, 32),
2313 &hldev->common_reg->
2314 clear_msix_mask_vect[msix_id%4]);
2315 }
2316
2317 return;
2318}
2319
2320/**
2321 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2322 * @vp: Virtual Path handle.
2323 * @msix_id: MSI ID
2324 *
2325 * The function unmasks the msix interrupt for the given msix_id
2326 *
2327 * Returns: 0,
2328 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2329 * status.
2330 * See also:
2331 */
2332void
2333vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2334{
2335 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2336 __vxge_hw_pio_mem_write32_upper(
2337 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2338 (msix_id/4)), 0, 32),
2339 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2340
2341 return;
2342}
2343
2344/**
2345 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2346 * @vp: Virtual Path handle.
2347 *
2348 * The function masks all msix interrupt for the given vpath
2349 *
2350 */
2351void
2352vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2353{
2354
2355 __vxge_hw_pio_mem_write32_upper(
2356 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2357 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2358
2359 return;
2360}
2361
2362/**
2363 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2364 * @vp: Virtual Path handle.
2365 *
2366 * Mask Tx and Rx vpath interrupts.
2367 *
2368 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2369 */
2370void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2371{
2372 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2373 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2374 u64 val64;
2375 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2376
2377 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2378 tim_int_mask1, vp->vpath->vp_id);
2379
2380 val64 = readq(&hldev->common_reg->tim_int_mask0);
2381
2382 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2383 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2384 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2385 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2386 &hldev->common_reg->tim_int_mask0);
2387 }
2388
2389 val64 = readl(&hldev->common_reg->tim_int_mask1);
2390
2391 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2392 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2393 __vxge_hw_pio_mem_write32_upper(
2394 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2395 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2396 &hldev->common_reg->tim_int_mask1);
2397 }
2398
2399 return;
2400}
2401
2402/**
2403 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2404 * @vp: Virtual Path handle.
2405 *
2406 * Unmask Tx and Rx vpath interrupts.
2407 *
2408 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2409 */
2410void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2411{
2412 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2413 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2414 u64 val64;
2415 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2416
2417 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2418 tim_int_mask1, vp->vpath->vp_id);
2419
2420 val64 = readq(&hldev->common_reg->tim_int_mask0);
2421
2422 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2423 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2424 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2425 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2426 &hldev->common_reg->tim_int_mask0);
2427 }
2428
2429 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2430 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2431 __vxge_hw_pio_mem_write32_upper(
2432 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2433 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2434 &hldev->common_reg->tim_int_mask1);
2435 }
2436
2437 return;
2438}
2439
2440/**
2441 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2442 * descriptors and process the same.
2443 * @ring: Handle to the ring object used for receive
2444 *
2445 * The function polls the Rx for the completed descriptors and calls
2446 * the driver via supplied completion callback.
2447 *
2448 * Returns: VXGE_HW_OK, if the polling is completed successful.
2449 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2450 * descriptors available which are yet to be processed.
2451 *
2452 * See also: vxge_hw_vpath_poll_rx()
2453 */
2454enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2455{
2456 u8 t_code;
2457 enum vxge_hw_status status = VXGE_HW_OK;
2458 void *first_rxdh;
2459 u64 val64 = 0;
2460 int new_count = 0;
2461
2462 ring->cmpl_cnt = 0;
2463
2464 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2465 if (status == VXGE_HW_OK)
2466 ring->callback(ring, first_rxdh,
2467 t_code, ring->channel.userdata);
2468
2469 if (ring->cmpl_cnt != 0) {
2470 ring->doorbell_cnt += ring->cmpl_cnt;
2471 if (ring->doorbell_cnt >= ring->rxds_limit) {
2472 /*
2473 * Each RxD is of 4 qwords, update the number of
2474 * qwords replenished
2475 */
2476 new_count = (ring->doorbell_cnt * 4);
2477
2478 /* For each block add 4 more qwords */
2479 ring->total_db_cnt += ring->doorbell_cnt;
2480 if (ring->total_db_cnt >= ring->rxds_per_block) {
2481 new_count += 4;
2482 /* Reset total count */
2483 ring->total_db_cnt %= ring->rxds_per_block;
2484 }
2485 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2486 &ring->vp_reg->prc_rxd_doorbell);
2487 val64 =
2488 readl(&ring->common_reg->titan_general_int_status);
2489 ring->doorbell_cnt = 0;
2490 }
2491 }
2492
2493 return status;
2494}
2495
2496/**
2497 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2498 * the same.
2499 * @fifo: Handle to the fifo object used for non offload send
2500 *
2501 * The function polls the Tx for the completed descriptors and calls
2502 * the driver via supplied completion callback.
2503 *
2504 * Returns: VXGE_HW_OK, if the polling is completed successful.
2505 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2506 * descriptors available which are yet to be processed.
2507 *
2508 * See also: vxge_hw_vpath_poll_tx().
2509 */
2510enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2511 void **skb_ptr)
2512{
2513 enum vxge_hw_fifo_tcode t_code;
2514 void *first_txdlh;
2515 enum vxge_hw_status status = VXGE_HW_OK;
2516 struct __vxge_hw_channel *channel;
2517
2518 channel = &fifo->channel;
2519
2520 status = vxge_hw_fifo_txdl_next_completed(fifo,
2521 &first_txdlh, &t_code);
2522 if (status == VXGE_HW_OK)
2523 if (fifo->callback(fifo, first_txdlh,
2524 t_code, channel->userdata, skb_ptr) != VXGE_HW_OK)
2525 status = VXGE_HW_COMPLETIONS_REMAIN;
2526
2527 return status;
2528}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
new file mode 100644
index 000000000000..7567a1140d07
--- /dev/null
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -0,0 +1,2409 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_TRAFFIC_H
15#define VXGE_TRAFFIC_H
16
17#include "vxge-reg.h"
18#include "vxge-version.h"
19
20#define VXGE_HW_DTR_MAX_T_CODE 16
21#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
22#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
23#define VXGE_HW_MAX_VIRTUAL_PATHS 17
24
25#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
26
27#define VXGE_HW_DEFAULT_32 0xffffffff
28/* frames sizes */
29#define VXGE_HW_HEADER_802_2_SIZE 3
30#define VXGE_HW_HEADER_SNAP_SIZE 5
31#define VXGE_HW_HEADER_VLAN_SIZE 4
32#define VXGE_HW_MAC_HEADER_MAX_SIZE \
33 (ETH_HLEN + \
34 VXGE_HW_HEADER_802_2_SIZE + \
35 VXGE_HW_HEADER_VLAN_SIZE + \
36 VXGE_HW_HEADER_SNAP_SIZE)
37
38#define VXGE_HW_TCPIP_HEADER_MAX_SIZE (64 + 64)
39
40/* 32bit alignments */
41#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
42#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
43#define VXGE_HW_HEADER_802_2_ALIGN 3
44#define VXGE_HW_HEADER_SNAP_ALIGN 1
45
46#define VXGE_HW_L3_CKSUM_OK 0xFFFF
47#define VXGE_HW_L4_CKSUM_OK 0xFFFF
48
49/* Forward declarations */
50struct __vxge_hw_device;
51struct __vxge_hw_vpath_handle;
52struct vxge_hw_vp_config;
53struct __vxge_hw_virtualpath;
54struct __vxge_hw_channel;
55struct __vxge_hw_fifo;
56struct __vxge_hw_ring;
57struct vxge_hw_ring_attr;
58struct vxge_hw_mempool;
59
60#ifndef TRUE
61#define TRUE 1
62#endif
63
64#ifndef FALSE
65#define FALSE 0
66#endif
67
68/*VXGE_HW_STATUS_H*/
69
70#define VXGE_HW_EVENT_BASE 0
71#define VXGE_LL_EVENT_BASE 100
72
73/**
74 * enum vxge_hw_event- Enumerates slow-path HW events.
75 * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
76 * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
77 * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
78 * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
79 * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
80 * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
81 * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
82 * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
83 * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
84 * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
85 * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
86 * slot-freeze from the rest critical events (e.g. ECC) when it is
87 * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
88 *
89 * enum vxge_hw_event enumerates slow-path HW eventis.
90 *
91 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
92 * vxge_uld_link_down_f{}.
93 */
94enum vxge_hw_event {
95 VXGE_HW_EVENT_UNKNOWN = 0,
96 /* HW events */
97 VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
98 VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
99 VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
100 VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
101 VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
102 VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
103 VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
104 VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
105 VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
106 VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
107 VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
108 VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
109 VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
110 VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
111};
112
113#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
114
115/*
116 * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
117 caller.
118 */
119struct vxge_hw_mempool_dma {
120 dma_addr_t addr;
121 struct pci_dev *handle;
122 struct pci_dev *acc_handle;
123};
124
125/*
126 * vxge_hw_mempool_item_f - Mempool item alloc/free callback
127 * @mempoolh: Memory pool handle.
128 * @memblock: Address of memory block
129 * @memblock_index: Index of memory block
130 * @item: Item that gets allocated or freed.
131 * @index: Item's index in the memory pool.
132 * @is_last: True, if this item is the last one in the pool; false - otherwise.
133 * userdata: Per-pool user context.
134 *
135 * Memory pool allocation/deallocation callback.
136 */
137
138/*
139 * struct vxge_hw_mempool - Memory pool.
140 */
141struct vxge_hw_mempool {
142
143 void (*item_func_alloc)(
144 struct vxge_hw_mempool *mempoolh,
145 u32 memblock_index,
146 struct vxge_hw_mempool_dma *dma_object,
147 u32 index,
148 u32 is_last);
149
150 void *userdata;
151 void **memblocks_arr;
152 void **memblocks_priv_arr;
153 struct vxge_hw_mempool_dma *memblocks_dma_arr;
154 struct __vxge_hw_device *devh;
155 u32 memblock_size;
156 u32 memblocks_max;
157 u32 memblocks_allocated;
158 u32 item_size;
159 u32 items_max;
160 u32 items_initial;
161 u32 items_current;
162 u32 items_per_memblock;
163 void **items_arr;
164 u32 items_priv_size;
165};
166
167#define VXGE_HW_MAX_INTR_PER_VP 4
168#define VXGE_HW_VPATH_INTR_TX 0
169#define VXGE_HW_VPATH_INTR_RX 1
170#define VXGE_HW_VPATH_INTR_EINTA 2
171#define VXGE_HW_VPATH_INTR_BMAP 3
172
173#define VXGE_HW_BLOCK_SIZE 4096
174
175/**
176 * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
177 * @intr_enable: Set to 1, if interrupt is enabled.
178 * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
179 * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
180 * asserted, other interrupt-generating entities will cancel the
181 * scheduled timer interrupt.
182 * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
183 * When asserted, an interrupt will be generated every time the
184 * boundary timer expires, even if no traffic has been transmitted
185 * on this interrupt.
186 * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
187 * (Re-) Interrupt Enable: When asserted, an interrupt will be
188 * generated the next time the timer expires, even if no traffic has
189 * been transmitted on this interrupt. (This will only happen once
190 * each time that this value is written to the TIM.) This bit is
191 * cleared by H/W at the end of the current-timer-interval when
192 * the interrupt is triggered.
193 * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
194 * @util_sel: Utilization Selector. Selects which of the workload approximations
195 * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
196 * specified utilization etc.), selects one of
197 * the 17 host configured values.
198 * 0-Virtual Path 0
199 * 1-Virtual Path 1
200 * ...
201 * 16-Virtual Path 17
202 * 17-Legacy Tx network utilization, provided by TPA
203 * 18-Legacy Rx network utilization, provided by FAU
204 * 19-Average of legacy Rx and Tx utilization calculated from link
205 * utilization values.
206 * 20-31-Invalid configurations
207 * 32-Host utilization for Virtual Path 0
208 * 33-Host utilization for Virtual Path 1
209 * ...
210 * 48-Host utilization for Virtual Path 17
211 * 49-Legacy Tx network utilization, provided by TPA
212 * 50-Legacy Rx network utilization, provided by FAU
213 * 51-Average of legacy Rx and Tx utilization calculated from
214 * link utilization values.
215 * 52-63-Invalid configurations
216 * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
217 * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
218 * to 1 enables counting of TxD0 returns (signalled by PCC's),
219 * towards utilization event count values.
220 * @urange_a: Defines the upper limit (in percent) for this utilization range
221 * to be active. This range is considered active
222 * if 0 = UTIL = URNG_A
223 * and the UEC_A field (below) is non-zero.
224 * @uec_a: Utilization Event Count A. If this range is active, the adapter will
225 * wait until UEC_A events have occurred on the interrupt before
226 * generating an interrupt.
227 * @urange_b: Link utilization range B.
228 * @uec_b: Utilization Event Count B.
229 * @urange_c: Link utilization range C.
230 * @uec_c: Utilization Event Count C.
231 * @urange_d: Link utilization range D.
232 * @uec_d: Utilization Event Count D.
233 * Traffic Interrupt Controller Module interrupt configuration.
234 */
235struct vxge_hw_tim_intr_config {
236
237 u32 intr_enable;
238#define VXGE_HW_TIM_INTR_ENABLE 1
239#define VXGE_HW_TIM_INTR_DISABLE 0
240#define VXGE_HW_TIM_INTR_DEFAULT 0
241
242 u32 btimer_val;
243#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
244#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
245#define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff
246
247 u32 timer_ac_en;
248#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
249#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
250
251 u32 timer_ci_en;
252#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
253#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
254
255 u32 timer_ri_en;
256#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
257#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
258
259 u32 rtimer_val;
260#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
261#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
262
263 u32 util_sel;
264#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
265#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
266#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
267#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
268
269 u32 ltimer_val;
270#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
271#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
272
273 /* Line utilization interrupts */
274 u32 urange_a;
275#define VXGE_HW_MIN_TIM_URANGE_A 0
276#define VXGE_HW_MAX_TIM_URANGE_A 100
277
278 u32 uec_a;
279#define VXGE_HW_MIN_TIM_UEC_A 0
280#define VXGE_HW_MAX_TIM_UEC_A 65535
281
282 u32 urange_b;
283#define VXGE_HW_MIN_TIM_URANGE_B 0
284#define VXGE_HW_MAX_TIM_URANGE_B 100
285
286 u32 uec_b;
287#define VXGE_HW_MIN_TIM_UEC_B 0
288#define VXGE_HW_MAX_TIM_UEC_B 65535
289
290 u32 urange_c;
291#define VXGE_HW_MIN_TIM_URANGE_C 0
292#define VXGE_HW_MAX_TIM_URANGE_C 100
293
294 u32 uec_c;
295#define VXGE_HW_MIN_TIM_UEC_C 0
296#define VXGE_HW_MAX_TIM_UEC_C 65535
297
298 u32 uec_d;
299#define VXGE_HW_MIN_TIM_UEC_D 0
300#define VXGE_HW_MAX_TIM_UEC_D 65535
301};
302
303#define VXGE_HW_STATS_OP_READ 0
304#define VXGE_HW_STATS_OP_CLEAR_STAT 1
305#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
306#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
307#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
308
309#define VXGE_HW_STATS_LOC_AGGR 17
310#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
311
312#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
313#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
314
315#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
316#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
317 vxge_bVALn(bits, 0, 32)
318
319#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
320 vxge_bVALn(bits, 32, 32)
321
322#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
323#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
324 vxge_bVALn(bits, 0, 32)
325
326#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
327 vxge_bVALn(bits, 32, 32)
328
329/**
330 * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
331 *
332 * @tx_frms: Count of data frames transmitted on this Aggregator on all
333 * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
334 * However, does include frames discarded by the Distribution
335 * function.
336 * @tx_data_octets: Count of data and padding octets of frames transmitted
337 * on this Aggregator on all its Aggregation ports. Does not include
338 * octets of LACPDUs or Marker PDUs. However, does include octets of
339 * frames discarded by the Distribution function.
340 * @tx_mcast_frms: Count of data frames transmitted (to a group destination
341 * address other than the broadcast address) on this Aggregator on
342 * all its Aggregation ports. Does not include LACPDUs or Marker
343 * PDUs. However, does include frames discarded by the Distribution
344 * function.
345 * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
346 * on all its Aggregation ports. Does not include LACPDUs or Marker
347 * PDUs. However, does include frames discarded by the Distribution
348 * function.
349 * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
350 * that are discarded by the Distribution function. This occurs when
351 * conversation are allocated to different ports and have to be
352 * flushed on old ports
353 * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
354 * experience transmission errors on its Aggregation ports.
355 * @rx_frms: Count of data frames received on this Aggregator on all its
356 * Aggregation ports. Does not include LACPDUs or Marker PDUs.
357 * Also, does not include frames discarded by the Collection
358 * function.
359 * @rx_data_octets: Count of data and padding octets of frames received on this
360 * Aggregator on all its Aggregation ports. Does not include octets
361 * of LACPDUs or Marker PDUs. Also, does not include
362 * octets of frames
363 * discarded by the Collection function.
364 * @rx_mcast_frms: Count of data frames received (from a group destination
365 * address other than the broadcast address) on this Aggregator on
366 * all its Aggregation ports. Does not include LACPDUs or Marker
367 * PDUs. Also, does not include frames discarded by the Collection
368 * function.
369 * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
370 * all its Aggregation ports. Does not include LACPDUs or Marker
371 * PDUs. Also, does not include frames discarded by the Collection
372 * function.
373 * @rx_discarded_frms: Count of data frames received on this Aggregator that are
374 * discarded by the Collection function because the Collection
375 * function was disabled on the port which the frames are received.
376 * @rx_errored_frms: Count of data frames received on this Aggregator that are
377 * discarded by its Aggregation ports, or are discarded by the
378 * Collection function of the Aggregator, or that are discarded by
379 * the Aggregator due to detection of an illegal Slow Protocols PDU.
380 * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
381 * that are discarded by its Aggregation ports due to detection of
382 * an unknown Slow Protocols PDU.
383 *
384 * Per aggregator XMAC RX statistics.
385 */
386struct vxge_hw_xmac_aggr_stats {
387/*0x000*/ u64 tx_frms;
388/*0x008*/ u64 tx_data_octets;
389/*0x010*/ u64 tx_mcast_frms;
390/*0x018*/ u64 tx_bcast_frms;
391/*0x020*/ u64 tx_discarded_frms;
392/*0x028*/ u64 tx_errored_frms;
393/*0x030*/ u64 rx_frms;
394/*0x038*/ u64 rx_data_octets;
395/*0x040*/ u64 rx_mcast_frms;
396/*0x048*/ u64 rx_bcast_frms;
397/*0x050*/ u64 rx_discarded_frms;
398/*0x058*/ u64 rx_errored_frms;
399/*0x060*/ u64 rx_unknown_slow_proto_frms;
400} __packed;
401
402/**
403 * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
404 *
405 * @tx_ttl_frms: Count of successfully transmitted MAC frames
406 * @tx_ttl_octets: Count of total octets of transmitted frames, not including
407 * framing characters (i.e. less framing bits). To determine the
408 * total octets of transmitted frames, including framing characters,
409 * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
410 * otherwise configured, this stat only counts frames that have
411 * 8 bytes of preamble for each frame). This stat can be configured
412 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
413 * including the preamble octets.
414 * @tx_data_octets: Count of data and padding octets of successfully transmitted
415 * frames.
416 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
417 * other than the broadcast address.
418 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
419 * group address.
420 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
421 * Includes discarded frames that are not sent to the network.
422 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
423 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
424 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
425 * are passed to the network.
426 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
427 * due to problems within ICMP.
428 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
429 * containing retransmitted octets.
430 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
431 * @tx_udp: Count of transmitted UDP datagrams.
432 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
433 * generally occurs when a packet is corrupt somehow, including
434 * packets that have IP version mismatches, invalid Layer 2 control
435 * fields, etc. L3/L4 checksums are not offloaded, but the packet
436 * is still be transmitted.
437 * @tx_unknown_protocol: Increments when the TPA encounters an unknown
438 * protocol, such as a new IPv6 extension header, or an unsupported
439 * Routing Type. The packet still has a checksum calculated but it
440 * may be incorrect.
441 * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
442 * Since, the only control frames supported by this device are
443 * PAUSE frames, this register is a count of all transmitted MAC
444 * control frames.
445 * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
446 * on this Aggregation port.
447 * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
448 * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
449 * the network. Increments because of:
450 * 1) An internal processing error
451 * (such as an uncorrectable ECC error). 2) A frame parsing error
452 * during IP checksum calculation.
453 * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
454 * Aggregation port.
455 * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
456 * characters that match a pattern that is programmable through
457 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
458 * is set to /T/ (i.e. the terminate character), thus the statistic
459 * tracks the number of transmitted Terminate characters.
460 * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
461 * characters that match a pattern that is programmable through
462 * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
463 * is set to /S/ (i.e. the start character),
464 * thus the statistic tracks
465 * the number of transmitted Start characters.
466 * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
467 * columns that match a pattern that is programmable through register
468 * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
469 * to 4 x /E/ (i.e. a column containing all error characters), thus
470 * the statistic tracks the number of Error columns transmitted at
471 * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
472 * set to 1, then this stat increments when COLUMN2 is found within
473 * 'n' clocks after COLUMN1. Here, 'n' is defined by
474 * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
475 * to 0, then it means to search anywhere for COLUMN2).
476 * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
477 * columns that match a pattern that is programmable through register
478 * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
479 * to 4 x /I/ (i.e. a column containing all idle characters),
480 * thus the statistic tracks the number of transmitted Idle columns.
481 * @tx_any_err_frms: Count of transmitted frames containing any error that
482 * prevents them from being passed to the network. Increments if
483 * there is an ECC while reading the frame out of the transmit
484 * buffer. Also increments if the transmit protocol assist (TPA)
485 * block determines that the frame should not be sent.
486 * @tx_drop_frms: Count of frames that could not be sent for no other reason
487 * than internal MAC processing. Increments once whenever the
488 * transmit buffer is flushed (due to an ECC error on a memory
489 * descriptor).
490 * @rx_ttl_frms: Count of total received MAC frames, including frames received
491 * with frame-too-long, FCS, or length errors. This stat can be
492 * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
493 * everything, even "frames" as small one byte of preamble.
494 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
495 * frames received with frame-too-long, FCS, or length errors.
496 * @rx_offload_frms: Count of offloaded received frames that are passed to
497 * the host.
498 * @rx_ttl_octets: Count of total octets of received frames, not including
499 * framing characters (i.e. less framing bits). To determine the
500 * total octets of received frames, including framing characters,
501 * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
502 * otherwise configured, this stat only counts frames that have 8
503 * bytes of preamble for each frame). This stat can be configured
504 * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
505 * even the preamble octets of "frames" as small one byte of preamble
506 * @rx_data_octets: Count of data and padding octets of successfully received
507 * frames. Does not include frames received with frame-too-long,
508 * FCS, or length errors.
509 * @rx_offload_octets: Count of total octets, not including framing
510 * characters, of offloaded received frames that are passed
511 * to the host.
512 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
513 * nonbroadcast group address. Does not include frames received
514 * with frame-too-long, FCS, or length errors.
515 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
516 * the broadcast group address. Does not include frames received
517 * with frame-too-long, FCS, or length errors.
518 * @rx_accepted_ucast_frms: Count of successfully received frames containing
519 * a unicast address. Only includes frames that are passed to
520 * the system.
521 * @rx_accepted_nucast_frms: Count of successfully received frames containing
522 * a non-unicast (broadcast or multicast) address. Only includes
523 * frames that are passed to the system. Could include, for instance,
524 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
525 * register is set to pass FCS-errored frames to the host.
526 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
527 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
528 * + 18 bytes (+ 22 bytes if VLAN-tagged).
529 * @rx_usized_frms: Count of received frames of length (including FCS, but not
530 * framing bits) less than 64 octets, that are otherwise well-formed.
531 * In other words, counts runts.
532 * @rx_osized_frms: Count of received frames of length (including FCS, but not
533 * framing bits) more than 1518 octets, that are otherwise
534 * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
535 * is set to 1, then "more than 1518 octets" becomes "more than 1518
536 * (1522 if VLAN-tagged) octets".
537 * @rx_frag_frms: Count of received frames of length (including FCS, but not
538 * framing bits) less than 64 octets that had bad FCS. In other
539 * words, counts fragments.
540 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
541 * framing bits) more than 1518 octets that had bad FCS. In other
542 * words, counts jabbers. Note: If register
543 * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
544 * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
545 * octets".
546 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
547 * FCS, but not framing bits) of exactly 64 octets. Includes frames
548 * received with frame-too-long, FCS, or length errors.
549 * @rx_ttl_65_127_frms: Count of total received MAC frames with length
550 * (including FCS, but not framing bits) of between 65 and 127
551 * octets inclusive. Includes frames received with frame-too-long,
552 * FCS, or length errors.
553 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
554 * (including FCS, but not framing bits) of between 128 and 255
555 * octets inclusive. Includes frames received with frame-too-long,
556 * FCS, or length errors.
557 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
558 * (including FCS, but not framing bits) of between 256 and 511
559 * octets inclusive. Includes frames received with frame-too-long,
560 * FCS, or length errors.
561 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
562 * (including FCS, but not framing bits) of between 512 and 1023
563 * octets inclusive. Includes frames received with frame-too-long,
564 * FCS, or length errors.
565 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
566 * (including FCS, but not framing bits) of between 1024 and 1518
567 * octets inclusive. Includes frames received with frame-too-long,
568 * FCS, or length errors.
569 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
570 * (including FCS, but not framing bits) of between 1519 and 4095
571 * octets inclusive. Includes frames received with frame-too-long,
572 * FCS, or length errors.
573 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
574 * (including FCS, but not framing bits) of between 4096 and 8191
575 * octets inclusive. Includes frames received with frame-too-long,
576 * FCS, or length errors.
577 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
578 * (including FCS, but not framing bits) of between 8192 and
579 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
580 * with frame-too-long, FCS, or length errors.
581 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
582 * (including FCS, but not framing bits) exceeding
583 * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
584 * Includes frames received with frame-too-long,
585 * FCS, or length errors.
586 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
587 * @rx_accepted_ip: Count of received IP datagrams that
588 * are passed to the system.
589 * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
590 * errored IP datagrams.
591 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
592 * bad IP checksum.
593 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
594 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
595 * Note: This stat contains a count of all received TCP segments,
596 * regardless of whether or not they pertain to an established
597 * connection.
598 * @rx_udp: Count of received UDP datagrams.
599 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
600 * bad TCP checksum.
601 * @rx_pause_count: Count of number of pause quanta that the MAC has been in
602 * the paused state. Recall, one pause quantum equates to 512
603 * bit times.
604 * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
605 * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
606 * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
607 * this register is a count of all received MAC control frames.
608 * Note: This stat may be configured to count all layer 2 errors
609 * (i.e. length errors and FCS errors).
610 * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
611 * not include frames received with frame-too-long or
612 * frame-too-short error.
613 * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
614 * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
615 * for VLAN-tagged frames), inclusive, that does not match the
616 * number of data octets (including pad) received. Also contains
617 * a count of received frames with a length/type field less than
618 * 46 (42 for VLAN-tagged frames) and the number of data octets
619 * (including pad) received is greater than 46 (42 for VLAN-tagged
620 * frames).
621 * @rx_out_rng_len_err_frms: Count of received frames with length/type field
622 * between 1501 and 1535 decimal, inclusive.
623 * @rx_drop_frms: Count of received frames that could not be passed to the host.
624 * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
625 * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
626 * for a list of reasons. Because the RMAC drops one frame at a time,
627 * this stat also indicates the number of drop events.
628 * @rx_discarded_frms: Count of received frames containing
629 * any error that prevents
630 * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
631 * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
632 * reasons.
633 * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
634 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
635 * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
636 * host. See PORTn_RX_DROP_FRMS for a list of reasons.
637 * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
638 * port.
639 * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
640 * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
641 * that carry the Slow Protocols EtherType, but contain an unknown
642 * PDU. Or frames that contain the Slow Protocols group MAC address,
643 * but do not carry the Slow Protocols EtherType.
644 * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
645 * this Aggregation port.
646 * @rx_fcs_discard: Count of received frames that are discarded because the
647 * FCS check failed.
648 * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
649 * that carry the Slow Protocols EtherType, but contain a badly
650 * formed PDU. Or frames that carry the Slow Protocols EtherType,
651 * but contain an illegal value of Protocol Subtype.
652 * @rx_switch_discard: Count of received frames that are discarded by the
653 * internal switch because they did not have an entry in the
654 * Filtering Database. This includes frames that had an invalid
655 * destination MAC address or VLAN ID. It also includes frames are
656 * discarded because they did not satisfy the length requirements
657 * of the target VPATH.
658 * @rx_len_discard: Count of received frames that are discarded because of an
659 * invalid frame length (includes fragments, oversized frames and
660 * mismatch between frame length and length/type field). This stat
661 * can be configured
662 * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
663 * @rx_rpa_discard: Count of received frames that were discarded because the
664 * receive protocol assist (RPA) discovered and error in the frame
665 * or was unable to parse the frame.
666 * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
667 * Link Aggregation Control Protocol (LACP) frames, etc.) that are
668 * discarded.
669 * @rx_rts_discard: Count of received frames that are discarded by the receive
670 * traffic steering (RTS) logic. Includes those frame discarded
671 * because the SSC response contradicted the switch table, because
672 * the SSC timed out, or because the target queue could not fit the
673 * frame.
674 * @rx_trash_discard: Count of received frames that are discarded because
675 * receive traffic steering (RTS) steered the frame to the trash
676 * queue.
677 * @rx_buff_full_discard: Count of received frames that are discarded because
678 * internal buffers are full. Includes frames discarded because the
679 * RTS logic is waiting for an SSC lookup that has no timeout bound.
680 * Also, includes frames that are dropped because the MAC2FAU buffer
681 * is nearly full -- this can happen if the external receive buffer
682 * is full and the receive path is backing up.
683 * @rx_red_discard: Count of received frames that are discarded because of RED
684 * (Random Early Discard).
685 * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
686 * characters occuring between times of normal data transmission
687 * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
688 * incremented when either -
689 * 1) The Reconciliation Sublayer (RS) is expecting one control
690 * character and gets another (i.e. is expecting a Start
691 * character, but gets another control character).
692 * 2) Start control character is not in lane 0
693 * Only increments the count by one for each XGMII column.
694 * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
695 * during normal data transmission. If the Reconciliation Sublayer
696 * (RS) receives a control character, other than a terminate control
697 * character, during receipt of data octets then this register is
698 * incremented. Also increments if the start frame delimiter is not
699 * found in the correct location. Only increments the count by one
700 * for each XGMII column.
701 * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
702 * that match a pattern that is programmable through register
703 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
704 * to /E/ (i.e. the error character), thus the statistic tracks the
705 * number of Error characters received at any time.
706 * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
707 * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
708 * Only includes symbol errors that are observed between the XGMII
709 * Start Frame Delimiter and End Frame Delimiter, inclusive. And
710 * only increments the count by one for each frame.
711 * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
712 * that match a pattern that is programmable through register
713 * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
714 * to 4 x /E/ (i.e. a column containing all error characters), thus
715 * the statistic tracks the number of Error columns received at any
716 * time.
717 * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
718 * that match a pattern that is programmable through register
719 * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
720 * to /E/ (i.e. the error character), thus the statistic tracks the
721 * number of Error characters received at any time.
722 * @rx_local_fault: Maintains a count of the number of times that link
723 * transitioned from "up" to "down" due to a local fault.
724 * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
725 * that match a pattern that is programmable through register
726 * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
727 * to 4 x /E/ (i.e. a column containing all error characters), thus
728 * the statistic tracks the number of Error columns received at any
729 * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
730 * to 1, then this stat increments when COLUMN2 is found within 'n'
731 * clocks after COLUMN1. Here, 'n' is defined by
732 * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
733 * 0, then it means to search anywhere for COLUMN2).
734 * @rx_jettison: Count of received frames that are jettisoned because internal
735 * buffers are full.
736 * @rx_remote_fault: Maintains a count of the number of times that link
737 * transitioned from "up" to "down" due to a remote fault.
738 *
739 * XMAC Port Statistics.
740 */
741struct vxge_hw_xmac_port_stats {
742/*0x000*/ u64 tx_ttl_frms;
743/*0x008*/ u64 tx_ttl_octets;
744/*0x010*/ u64 tx_data_octets;
745/*0x018*/ u64 tx_mcast_frms;
746/*0x020*/ u64 tx_bcast_frms;
747/*0x028*/ u64 tx_ucast_frms;
748/*0x030*/ u64 tx_tagged_frms;
749/*0x038*/ u64 tx_vld_ip;
750/*0x040*/ u64 tx_vld_ip_octets;
751/*0x048*/ u64 tx_icmp;
752/*0x050*/ u64 tx_tcp;
753/*0x058*/ u64 tx_rst_tcp;
754/*0x060*/ u64 tx_udp;
755/*0x068*/ u32 tx_parse_error;
756/*0x06c*/ u32 tx_unknown_protocol;
757/*0x070*/ u64 tx_pause_ctrl_frms;
758/*0x078*/ u32 tx_marker_pdu_frms;
759/*0x07c*/ u32 tx_lacpdu_frms;
760/*0x080*/ u32 tx_drop_ip;
761/*0x084*/ u32 tx_marker_resp_pdu_frms;
762/*0x088*/ u32 tx_xgmii_char2_match;
763/*0x08c*/ u32 tx_xgmii_char1_match;
764/*0x090*/ u32 tx_xgmii_column2_match;
765/*0x094*/ u32 tx_xgmii_column1_match;
766/*0x098*/ u32 unused1;
767/*0x09c*/ u16 tx_any_err_frms;
768/*0x09e*/ u16 tx_drop_frms;
769/*0x0a0*/ u64 rx_ttl_frms;
770/*0x0a8*/ u64 rx_vld_frms;
771/*0x0b0*/ u64 rx_offload_frms;
772/*0x0b8*/ u64 rx_ttl_octets;
773/*0x0c0*/ u64 rx_data_octets;
774/*0x0c8*/ u64 rx_offload_octets;
775/*0x0d0*/ u64 rx_vld_mcast_frms;
776/*0x0d8*/ u64 rx_vld_bcast_frms;
777/*0x0e0*/ u64 rx_accepted_ucast_frms;
778/*0x0e8*/ u64 rx_accepted_nucast_frms;
779/*0x0f0*/ u64 rx_tagged_frms;
780/*0x0f8*/ u64 rx_long_frms;
781/*0x100*/ u64 rx_usized_frms;
782/*0x108*/ u64 rx_osized_frms;
783/*0x110*/ u64 rx_frag_frms;
784/*0x118*/ u64 rx_jabber_frms;
785/*0x120*/ u64 rx_ttl_64_frms;
786/*0x128*/ u64 rx_ttl_65_127_frms;
787/*0x130*/ u64 rx_ttl_128_255_frms;
788/*0x138*/ u64 rx_ttl_256_511_frms;
789/*0x140*/ u64 rx_ttl_512_1023_frms;
790/*0x148*/ u64 rx_ttl_1024_1518_frms;
791/*0x150*/ u64 rx_ttl_1519_4095_frms;
792/*0x158*/ u64 rx_ttl_4096_8191_frms;
793/*0x160*/ u64 rx_ttl_8192_max_frms;
794/*0x168*/ u64 rx_ttl_gt_max_frms;
795/*0x170*/ u64 rx_ip;
796/*0x178*/ u64 rx_accepted_ip;
797/*0x180*/ u64 rx_ip_octets;
798/*0x188*/ u64 rx_err_ip;
799/*0x190*/ u64 rx_icmp;
800/*0x198*/ u64 rx_tcp;
801/*0x1a0*/ u64 rx_udp;
802/*0x1a8*/ u64 rx_err_tcp;
803/*0x1b0*/ u64 rx_pause_count;
804/*0x1b8*/ u64 rx_pause_ctrl_frms;
805/*0x1c0*/ u64 rx_unsup_ctrl_frms;
806/*0x1c8*/ u64 rx_fcs_err_frms;
807/*0x1d0*/ u64 rx_in_rng_len_err_frms;
808/*0x1d8*/ u64 rx_out_rng_len_err_frms;
809/*0x1e0*/ u64 rx_drop_frms;
810/*0x1e8*/ u64 rx_discarded_frms;
811/*0x1f0*/ u64 rx_drop_ip;
812/*0x1f8*/ u64 rx_drop_udp;
813/*0x200*/ u32 rx_marker_pdu_frms;
814/*0x204*/ u32 rx_lacpdu_frms;
815/*0x208*/ u32 rx_unknown_pdu_frms;
816/*0x20c*/ u32 rx_marker_resp_pdu_frms;
817/*0x210*/ u32 rx_fcs_discard;
818/*0x214*/ u32 rx_illegal_pdu_frms;
819/*0x218*/ u32 rx_switch_discard;
820/*0x21c*/ u32 rx_len_discard;
821/*0x220*/ u32 rx_rpa_discard;
822/*0x224*/ u32 rx_l2_mgmt_discard;
823/*0x228*/ u32 rx_rts_discard;
824/*0x22c*/ u32 rx_trash_discard;
825/*0x230*/ u32 rx_buff_full_discard;
826/*0x234*/ u32 rx_red_discard;
827/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
828/*0x23c*/ u32 rx_xgmii_data_err_cnt;
829/*0x240*/ u32 rx_xgmii_char1_match;
830/*0x244*/ u32 rx_xgmii_err_sym;
831/*0x248*/ u32 rx_xgmii_column1_match;
832/*0x24c*/ u32 rx_xgmii_char2_match;
833/*0x250*/ u32 rx_local_fault;
834/*0x254*/ u32 rx_xgmii_column2_match;
835/*0x258*/ u32 rx_jettison;
836/*0x25c*/ u32 rx_remote_fault;
837} __packed;
838
839/**
840 * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
841 *
842 * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
843 * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
844 * not including framing characters (i.e. less framing bits).
845 * To determine the total octets of transmitted frames, including
846 * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
847 * this stat (the device always prepends 8 bytes of preamble for
848 * each frame)
849 * @tx_data_octets: Count of data and padding octets of successfully transmitted
850 * frames.
851 * @tx_mcast_frms: Count of successfully transmitted frames to a group address
852 * other than the broadcast address.
853 * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
854 * group address.
855 * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
856 * Includes discarded frames that are not sent to the network.
857 * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
858 * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
859 * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
860 * are passed to the network.
861 * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
862 * to problems within ICMP.
863 * @tx_tcp: Count of transmitted TCP segments. Does not include segments
864 * containing retransmitted octets.
865 * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
866 * @tx_udp: Count of transmitted UDP datagrams.
867 * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
868 * such as a new IPv6 extension header, or an unsupported Routing
869 * Type. The packet still has a checksum calculated but it may be
870 * incorrect.
871 * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
872 * to the network. Increments because of: 1) An internal processing
873 * error (such as an uncorrectable ECC error). 2) A frame parsing
874 * error during IP checksum calculation.
875 * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
876 * generally occurs when a packet is corrupt somehow, including
877 * packets that have IP version mismatches, invalid Layer 2 control
878 * fields, etc. L3/L4 checksums are not offloaded, but the packet
879 * is still be transmitted.
880 * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
881 * of transmitted TCP segments. Does not include segments containing
882 * retransmitted octets.
883 * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
884 * total number of segments retransmitted. Retransmitted segments
885 * that are sourced by the host are counted by the host.
886 * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
887 * of transmitted IP datagrams that could not be passed to the
888 * network.
889 *
890 * XMAC Vpath TX Statistics.
891 */
892struct vxge_hw_xmac_vpath_tx_stats {
893 u64 tx_ttl_eth_frms;
894 u64 tx_ttl_eth_octets;
895 u64 tx_data_octets;
896 u64 tx_mcast_frms;
897 u64 tx_bcast_frms;
898 u64 tx_ucast_frms;
899 u64 tx_tagged_frms;
900 u64 tx_vld_ip;
901 u64 tx_vld_ip_octets;
902 u64 tx_icmp;
903 u64 tx_tcp;
904 u64 tx_rst_tcp;
905 u64 tx_udp;
906 u32 tx_unknown_protocol;
907 u32 tx_lost_ip;
908 u32 unused1;
909 u32 tx_parse_error;
910 u64 tx_tcp_offload;
911 u64 tx_retx_tcp_offload;
912 u64 tx_lost_ip_offload;
913} __packed;
914
915/**
916 * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
917 *
918 * @rx_ttl_eth_frms: Count of successfully received MAC frames.
919 * @rx_vld_frms: Count of successfully received MAC frames. Does not include
920 * frames received with frame-too-long, FCS, or length errors.
921 * @rx_offload_frms: Count of offloaded received frames that are passed to
922 * the host.
923 * @rx_ttl_eth_octets: Count of total octets of received frames, not including
924 * framing characters (i.e. less framing bits). Only counts octets
925 * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
926 * before FCS. To determine the total octets of received frames,
927 * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
928 * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
929 * that have the required 8 bytes of preamble).
930 * @rx_data_octets: Count of data and padding octets of successfully received
931 * frames. Does not include frames received with frame-too-long,
932 * FCS, or length errors.
933 * @rx_offload_octets: Count of total octets, not including framing characters,
934 * of offloaded received frames that are passed to the host.
935 * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
936 * nonbroadcast group address. Does not include frames received with
937 * frame-too-long, FCS, or length errors.
938 * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
939 * broadcast group address. Does not include frames received with
940 * frame-too-long, FCS, or length errors.
941 * @rx_accepted_ucast_frms: Count of successfully received frames containing
942 * a unicast address. Only includes frames that are passed to the
943 * system.
944 * @rx_accepted_nucast_frms: Count of successfully received frames containing
945 * a non-unicast (broadcast or multicast) address. Only includes
946 * frames that are passed to the system. Could include, for instance,
947 * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
948 * register is set to pass FCS-errored frames to the host.
949 * @rx_tagged_frms: Count of received frames containing a VLAN tag.
950 * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
951 * + 18 bytes (+ 22 bytes if VLAN-tagged).
952 * @rx_usized_frms: Count of received frames of length (including FCS, but not
953 * framing bits) less than 64 octets, that are otherwise well-formed.
954 * In other words, counts runts.
955 * @rx_osized_frms: Count of received frames of length (including FCS, but not
956 * framing bits) more than 1518 octets, that are otherwise
957 * well-formed.
958 * @rx_frag_frms: Count of received frames of length (including FCS, but not
959 * framing bits) less than 64 octets that had bad FCS.
960 * In other words, counts fragments.
961 * @rx_jabber_frms: Count of received frames of length (including FCS, but not
962 * framing bits) more than 1518 octets that had bad FCS. In other
963 * words, counts jabbers.
964 * @rx_ttl_64_frms: Count of total received MAC frames with length (including
965 * FCS, but not framing bits) of exactly 64 octets. Includes frames
966 * received with frame-too-long, FCS, or length errors.
967 * @rx_ttl_65_127_frms: Count of total received MAC frames
968 * with length (including
969 * FCS, but not framing bits) of between 65 and 127 octets inclusive.
970 * Includes frames received with frame-too-long, FCS,
971 * or length errors.
972 * @rx_ttl_128_255_frms: Count of total received MAC frames with length
973 * (including FCS, but not framing bits)
974 * of between 128 and 255 octets
975 * inclusive. Includes frames received with frame-too-long, FCS,
976 * or length errors.
977 * @rx_ttl_256_511_frms: Count of total received MAC frames with length
978 * (including FCS, but not framing bits)
979 * of between 256 and 511 octets
980 * inclusive. Includes frames received with frame-too-long, FCS, or
981 * length errors.
982 * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
983 * (including FCS, but not framing bits) of between 512 and 1023
984 * octets inclusive. Includes frames received with frame-too-long,
985 * FCS, or length errors.
986 * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
987 * (including FCS, but not framing bits) of between 1024 and 1518
988 * octets inclusive. Includes frames received with frame-too-long,
989 * FCS, or length errors.
990 * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
991 * (including FCS, but not framing bits) of between 1519 and 4095
992 * octets inclusive. Includes frames received with frame-too-long,
993 * FCS, or length errors.
994 * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
995 * (including FCS, but not framing bits) of between 4096 and 8191
996 * octets inclusive. Includes frames received with frame-too-long,
997 * FCS, or length errors.
998 * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
999 * (including FCS, but not framing bits) of between 8192 and
1000 * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
1001 * with frame-too-long, FCS, or length errors.
1002 * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
1003 * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
1004 * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
1005 * received with frame-too-long, FCS, or length errors.
1006 * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
1007 * @rx_accepted_ip: Count of received IP datagrams that
1008 * are passed to the system.
1009 * @rx_ip_octets: Count of number of octets in received IP datagrams.
1010 * Includes errored IP datagrams.
1011 * @rx_err_ip: Count of received IP datagrams containing errors. For example,
1012 * bad IP checksum.
1013 * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
1014 * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
1015 * Note: This stat contains a count of all received TCP segments,
1016 * regardless of whether or not they pertain to an established
1017 * connection.
1018 * @rx_udp: Count of received UDP datagrams.
1019 * @rx_err_tcp: Count of received TCP segments containing errors. For example,
1020 * bad TCP checksum.
1021 * @rx_lost_frms: Count of received frames that could not be passed to the host.
1022 * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
1023 * for a list of reasons.
1024 * @rx_lost_ip: Count of received IP datagrams that could not be passed to
1025 * the host. See RX_LOST_FRMS for a list of reasons.
1026 * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
1027 * of received IP datagrams that could not be passed to the host.
1028 * See RX_LOST_FRMS for a list of reasons.
1029 * @rx_various_discard: Count of received frames that are discarded because
1030 * the target receive queue is full.
1031 * @rx_sleep_discard: Count of received frames that are discarded because the
1032 * target VPATH is asleep (a Wake-on-LAN magic packet can be used
1033 * to awaken the VPATH).
1034 * @rx_red_discard: Count of received frames that are discarded because of RED
1035 * (Random Early Discard).
1036 * @rx_queue_full_discard: Count of received frames that are discarded because
1037 * the target receive queue is full.
1038 * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
1039 *
1040 * XMAC Vpath RX Statistics.
1041 */
1042struct vxge_hw_xmac_vpath_rx_stats {
1043 u64 rx_ttl_eth_frms;
1044 u64 rx_vld_frms;
1045 u64 rx_offload_frms;
1046 u64 rx_ttl_eth_octets;
1047 u64 rx_data_octets;
1048 u64 rx_offload_octets;
1049 u64 rx_vld_mcast_frms;
1050 u64 rx_vld_bcast_frms;
1051 u64 rx_accepted_ucast_frms;
1052 u64 rx_accepted_nucast_frms;
1053 u64 rx_tagged_frms;
1054 u64 rx_long_frms;
1055 u64 rx_usized_frms;
1056 u64 rx_osized_frms;
1057 u64 rx_frag_frms;
1058 u64 rx_jabber_frms;
1059 u64 rx_ttl_64_frms;
1060 u64 rx_ttl_65_127_frms;
1061 u64 rx_ttl_128_255_frms;
1062 u64 rx_ttl_256_511_frms;
1063 u64 rx_ttl_512_1023_frms;
1064 u64 rx_ttl_1024_1518_frms;
1065 u64 rx_ttl_1519_4095_frms;
1066 u64 rx_ttl_4096_8191_frms;
1067 u64 rx_ttl_8192_max_frms;
1068 u64 rx_ttl_gt_max_frms;
1069 u64 rx_ip;
1070 u64 rx_accepted_ip;
1071 u64 rx_ip_octets;
1072 u64 rx_err_ip;
1073 u64 rx_icmp;
1074 u64 rx_tcp;
1075 u64 rx_udp;
1076 u64 rx_err_tcp;
1077 u64 rx_lost_frms;
1078 u64 rx_lost_ip;
1079 u64 rx_lost_ip_offload;
1080 u16 rx_various_discard;
1081 u16 rx_sleep_discard;
1082 u16 rx_red_discard;
1083 u16 rx_queue_full_discard;
1084 u64 rx_mpa_ok_frms;
1085} __packed;
1086
1087/**
1088 * struct vxge_hw_xmac_stats - XMAC Statistics
1089 *
1090 * @aggr_stats: Statistics on aggregate port(port 0, port 1)
1091 * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
1092 * @vpath_tx_stats: Per vpath XMAC TX stats
1093 * @vpath_rx_stats: Per vpath XMAC RX stats
1094 *
1095 * XMAC Statistics.
1096 */
1097struct vxge_hw_xmac_stats {
1098 struct vxge_hw_xmac_aggr_stats
1099 aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
1100 struct vxge_hw_xmac_port_stats
1101 port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
1102 struct vxge_hw_xmac_vpath_tx_stats
1103 vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1104 struct vxge_hw_xmac_vpath_rx_stats
1105 vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
1106};
1107
1108/**
1109 * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
1110 * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
1111 * for the given VPATH
1112 * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
1113 * @ini_num_cpl_rcvd: The number of PCI read completions received by the
1114 * PIC block
1115 * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
1116 * block to the host
1117 * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
1118 * the PIC block
1119 * @wrcrdtarb_xoff: TBD
1120 * @rdcrdtarb_xoff: TBD
1121 * @vpath_genstats_count0: TBD
1122 * @vpath_genstats_count1: TBD
1123 * @vpath_genstats_count2: TBD
1124 * @vpath_genstats_count3: TBD
1125 * @vpath_genstats_count4: TBD
1126 * @vpath_gennstats_count5: TBD
1127 * @tx_stats: Transmit stats
1128 * @rx_stats: Receive stats
1129 * @prog_event_vnum1: Programmable statistic. Increments when internal logic
1130 * detects a certain event. See register
1131 * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
1132 * @prog_event_vnum0: Programmable statistic. Increments when internal logic
1133 * detects a certain event. See register
1134 * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
1135 * @prog_event_vnum3: Programmable statistic. Increments when internal logic
1136 * detects a certain event. See register
1137 * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
1138 * @prog_event_vnum2: Programmable statistic. Increments when internal logic
1139 * detects a certain event. See register
1140 * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
1141 * @rx_multi_cast_frame_discard: TBD
1142 * @rx_frm_transferred: TBD
1143 * @rxd_returned: TBD
1144 * @rx_mpa_len_fail_frms: Count of received frames
1145 * that fail the MPA length check
1146 * @rx_mpa_mrk_fail_frms: Count of received frames
1147 * that fail the MPA marker check
1148 * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
1149 * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
1150 * frame buffer (and subsequently to the host).
1151 * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
1152 * because the VPATH is in reset
1153 * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
1154 * whenever the received frame matches the VPATH's Wake-on-LAN
1155 * signature(s) CRC.
1156 * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
1157 * because the VPATH is in reset. Includes frames that are discarded
1158 * because the current VPIN does not match that VPIN of the frame
1159 *
1160 * Titan vpath hardware statistics.
1161 */
1162struct vxge_hw_vpath_stats_hw_info {
1163/*0x000*/ u32 ini_num_mwr_sent;
1164/*0x004*/ u32 unused1;
1165/*0x008*/ u32 ini_num_mrd_sent;
1166/*0x00c*/ u32 unused2;
1167/*0x010*/ u32 ini_num_cpl_rcvd;
1168/*0x014*/ u32 unused3;
1169/*0x018*/ u64 ini_num_mwr_byte_sent;
1170/*0x020*/ u64 ini_num_cpl_byte_rcvd;
1171/*0x028*/ u32 wrcrdtarb_xoff;
1172/*0x02c*/ u32 unused4;
1173/*0x030*/ u32 rdcrdtarb_xoff;
1174/*0x034*/ u32 unused5;
1175/*0x038*/ u32 vpath_genstats_count0;
1176/*0x03c*/ u32 vpath_genstats_count1;
1177/*0x040*/ u32 vpath_genstats_count2;
1178/*0x044*/ u32 vpath_genstats_count3;
1179/*0x048*/ u32 vpath_genstats_count4;
1180/*0x04c*/ u32 unused6;
1181/*0x050*/ u32 vpath_genstats_count5;
1182/*0x054*/ u32 unused7;
1183/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
1184/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
1185/*0x220*/ u64 unused9;
1186/*0x228*/ u32 prog_event_vnum1;
1187/*0x22c*/ u32 prog_event_vnum0;
1188/*0x230*/ u32 prog_event_vnum3;
1189/*0x234*/ u32 prog_event_vnum2;
1190/*0x238*/ u16 rx_multi_cast_frame_discard;
1191/*0x23a*/ u8 unused10[6];
1192/*0x240*/ u32 rx_frm_transferred;
1193/*0x244*/ u32 unused11;
1194/*0x248*/ u16 rxd_returned;
1195/*0x24a*/ u8 unused12[6];
1196/*0x252*/ u16 rx_mpa_len_fail_frms;
1197/*0x254*/ u16 rx_mpa_mrk_fail_frms;
1198/*0x256*/ u16 rx_mpa_crc_fail_frms;
1199/*0x258*/ u16 rx_permitted_frms;
1200/*0x25c*/ u64 rx_vp_reset_discarded_frms;
1201/*0x25e*/ u64 rx_wol_frms;
1202/*0x260*/ u64 tx_vp_reset_discarded_frms;
1203} __packed;
1204
1205
1206/**
1207 * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
1208 * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
1209 * by the adapter that were discarded because the VPATH is out of service
1210 * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
1211 * adapter that were discared because the VPATH is out of service
1212 * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
1213 * the posted header credits for upstream PCI writes were depleted
1214 * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
1215 * the posted header credits for upstream PCI writes were depleted
1216 * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
1217 * the posted header credits for upstream PCI writes were depleted
1218 * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
1219 * the posted header credits for upstream PCI writes were depleted
1220 * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
1221 * the posted header credits for upstream PCI writes were depleted
1222 * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
1223 * the posted header credits for upstream PCI writes were depleted
1224 * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
1225 * the posted header credits for upstream PCI writes were depleted
1226 * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
1227 * the posted header credits for upstream PCI writes were depleted
1228 * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
1229 * the posted header credits for upstream PCI writes were depleted
1230 * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
1231 * the posted header credits for upstream PCI writes were depleted
1232 * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
1233 * the posted header credits for upstream PCI writes were depleted
1234 * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
1235 * the posted header credits for upstream PCI writes were depleted
1236 * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
1237 * the posted header credits for upstream PCI writes were depleted
1238 * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
1239 * the posted header credits for upstream PCI writes were depleted
1240 * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
1241 * the posted header credits for upstream PCI writes were depleted
1242 * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
1243 * the posted header credits for upstream PCI writes were depleted
1244 * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
1245 * the posted header credits for upstream PCI writes were depleted
1246 * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
1247 * the posted data credits for upstream PCI writes were depleted
1248 * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
1249 * the posted data credits for upstream PCI writes were depleted
1250 * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
1251 * the posted data credits for upstream PCI writes were depleted
1252 * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
1253 * the posted data credits for upstream PCI writes were depleted
1254 * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
1255 * the posted data credits for upstream PCI writes were depleted
1256 * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
1257 * the posted data credits for upstream PCI writes were depleted
1258 * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
1259 * the posted data credits for upstream PCI writes were depleted
1260 * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
1261 * the posted data credits for upstream PCI writes were depleted
1262 * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
1263 * the posted data credits for upstream PCI writes were depleted
1264 * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
1265 * the posted data credits for upstream PCI writes were depleted
1266 * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
1267 * the posted data credits for upstream PCI writes were depleted
1268 * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
1269 * the posted data credits for upstream PCI writes were depleted
1270 * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
1271 * the posted data credits for upstream PCI writes were depleted
1272 * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
1273 * the posted data credits for upstream PCI writes were depleted
1274 * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
1275 * the posted data credits for upstream PCI writes were depleted
1276 * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
1277 * the posted data credits for upstream PCI writes were depleted
1278 * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
1279 * the posted data credits for upstream PCI writes were depleted
1280 * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
1281 * the non-posted header credits for upstream PCI reads were depleted
1282 * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
1283 * the non-posted header credits for upstream PCI reads were depleted
1284 * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
1285 * the non-posted header credits for upstream PCI reads were depleted
1286 * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
1287 * the non-posted header credits for upstream PCI reads were depleted
1288 * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
1289 * the non-posted header credits for upstream PCI reads were depleted
1290 * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
1291 * the non-posted header credits for upstream PCI reads were depleted
1292 * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
1293 * the non-posted header credits for upstream PCI reads were depleted
1294 * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
1295 * the non-posted header credits for upstream PCI reads were depleted
1296 * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
1297 * the non-posted header credits for upstream PCI reads were depleted
1298 * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
1299 * the non-posted header credits for upstream PCI reads were depleted
1300 * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
1301 * the non-posted header credits for upstream PCI reads were depleted
1302 * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
1303 * the non-posted header credits for upstream PCI reads were depleted
1304 * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
1305 * the non-posted header credits for upstream PCI reads were depleted
1306 * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
1307 * the non-posted header credits for upstream PCI reads were depleted
1308 * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
1309 * the non-posted header credits for upstream PCI reads were depleted
1310 * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
1311 * the non-posted header credits for upstream PCI reads were depleted
1312 * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
1313 * the non-posted header credits for upstream PCI reads were depleted
1314 * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
1315 * the adapter that were discarded because the VPATH instance number does
1316 * not match
1317 * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
1318 * by the adapter that were discarded because the VPATH instance number
1319 * does not match
1320 * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
1321 * to the GENSTATS0_CFG for information on configuring this statistic
1322 * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
1323 * to the GENSTATS1_CFG for information on configuring this statistic
1324 * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
1325 * to the GENSTATS2_CFG for information on configuring this statistic
1326 * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
1327 * to the GENSTATS3_CFG for information on configuring this statistic
1328 * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
1329 * to the GENSTATS4_CFG for information on configuring this statistic
1330 * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
1331 * to the GENSTATS5_CFG for information on configuring this statistic
1332 * @pci.rstdrop_cpl 0x01c8 4
1333 * @pci.rstdrop_msg 0x01cc 4
1334 * @pci.rstdrop_client1 0x01d0 4
1335 * @pci.rstdrop_client0 0x01d4 4
1336 * @pci.rstdrop_client2 0x01d8 4
1337 * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
1338 * header credits were depleted
1339 * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
1340 * header credits were depleted
1341 * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
1342 * header credits were depleted
1343 * @pci.depl_cplh[vplane1] 0x01ea 2
1344 * @pci.depl_nph[vplane1] 0x01ec 2
1345 * @pci.depl_ph[vplane1] 0x01ee 2
1346 * @pci.depl_cplh[vplane2] 0x01f2 2
1347 * @pci.depl_nph[vplane2] 0x01f4 2
1348 * @pci.depl_ph[vplane2] 0x01f6 2
1349 * @pci.depl_cplh[vplane3] 0x01fa 2
1350 * @pci.depl_nph[vplane3] 0x01fc 2
1351 * @pci.depl_ph[vplane3] 0x01fe 2
1352 * @pci.depl_cplh[vplane4] 0x0202 2
1353 * @pci.depl_nph[vplane4] 0x0204 2
1354 * @pci.depl_ph[vplane4] 0x0206 2
1355 * @pci.depl_cplh[vplane5] 0x020a 2
1356 * @pci.depl_nph[vplane5] 0x020c 2
1357 * @pci.depl_ph[vplane5] 0x020e 2
1358 * @pci.depl_cplh[vplane6] 0x0212 2
1359 * @pci.depl_nph[vplane6] 0x0214 2
1360 * @pci.depl_ph[vplane6] 0x0216 2
1361 * @pci.depl_cplh[vplane7] 0x021a 2
1362 * @pci.depl_nph[vplane7] 0x021c 2
1363 * @pci.depl_ph[vplane7] 0x021e 2
1364 * @pci.depl_cplh[vplane8] 0x0222 2
1365 * @pci.depl_nph[vplane8] 0x0224 2
1366 * @pci.depl_ph[vplane8] 0x0226 2
1367 * @pci.depl_cplh[vplane9] 0x022a 2
1368 * @pci.depl_nph[vplane9] 0x022c 2
1369 * @pci.depl_ph[vplane9] 0x022e 2
1370 * @pci.depl_cplh[vplane10] 0x0232 2
1371 * @pci.depl_nph[vplane10] 0x0234 2
1372 * @pci.depl_ph[vplane10] 0x0236 2
1373 * @pci.depl_cplh[vplane11] 0x023a 2
1374 * @pci.depl_nph[vplane11] 0x023c 2
1375 * @pci.depl_ph[vplane11] 0x023e 2
1376 * @pci.depl_cplh[vplane12] 0x0242 2
1377 * @pci.depl_nph[vplane12] 0x0244 2
1378 * @pci.depl_ph[vplane12] 0x0246 2
1379 * @pci.depl_cplh[vplane13] 0x024a 2
1380 * @pci.depl_nph[vplane13] 0x024c 2
1381 * @pci.depl_ph[vplane13] 0x024e 2
1382 * @pci.depl_cplh[vplane14] 0x0252 2
1383 * @pci.depl_nph[vplane14] 0x0254 2
1384 * @pci.depl_ph[vplane14] 0x0256 2
1385 * @pci.depl_cplh[vplane15] 0x025a 2
1386 * @pci.depl_nph[vplane15] 0x025c 2
1387 * @pci.depl_ph[vplane15] 0x025e 2
1388 * @pci.depl_cplh[vplane16] 0x0262 2
1389 * @pci.depl_nph[vplane16] 0x0264 2
1390 * @pci.depl_ph[vplane16] 0x0266 2
1391 * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
1392 * credits were depleted
1393 * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
1394 * credits were depleted
1395 * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
1396 * credits were depleted
1397 * @pci.depl_cpld[vplane1] 0x0272 2
1398 * @pci.depl_npd[vplane1] 0x0274 2
1399 * @pci.depl_pd[vplane1] 0x0276 2
1400 * @pci.depl_cpld[vplane2] 0x027a 2
1401 * @pci.depl_npd[vplane2] 0x027c 2
1402 * @pci.depl_pd[vplane2] 0x027e 2
1403 * @pci.depl_cpld[vplane3] 0x0282 2
1404 * @pci.depl_npd[vplane3] 0x0284 2
1405 * @pci.depl_pd[vplane3] 0x0286 2
1406 * @pci.depl_cpld[vplane4] 0x028a 2
1407 * @pci.depl_npd[vplane4] 0x028c 2
1408 * @pci.depl_pd[vplane4] 0x028e 2
1409 * @pci.depl_cpld[vplane5] 0x0292 2
1410 * @pci.depl_npd[vplane5] 0x0294 2
1411 * @pci.depl_pd[vplane5] 0x0296 2
1412 * @pci.depl_cpld[vplane6] 0x029a 2
1413 * @pci.depl_npd[vplane6] 0x029c 2
1414 * @pci.depl_pd[vplane6] 0x029e 2
1415 * @pci.depl_cpld[vplane7] 0x02a2 2
1416 * @pci.depl_npd[vplane7] 0x02a4 2
1417 * @pci.depl_pd[vplane7] 0x02a6 2
1418 * @pci.depl_cpld[vplane8] 0x02aa 2
1419 * @pci.depl_npd[vplane8] 0x02ac 2
1420 * @pci.depl_pd[vplane8] 0x02ae 2
1421 * @pci.depl_cpld[vplane9] 0x02b2 2
1422 * @pci.depl_npd[vplane9] 0x02b4 2
1423 * @pci.depl_pd[vplane9] 0x02b6 2
1424 * @pci.depl_cpld[vplane10] 0x02ba 2
1425 * @pci.depl_npd[vplane10] 0x02bc 2
1426 * @pci.depl_pd[vplane10] 0x02be 2
1427 * @pci.depl_cpld[vplane11] 0x02c2 2
1428 * @pci.depl_npd[vplane11] 0x02c4 2
1429 * @pci.depl_pd[vplane11] 0x02c6 2
1430 * @pci.depl_cpld[vplane12] 0x02ca 2
1431 * @pci.depl_npd[vplane12] 0x02cc 2
1432 * @pci.depl_pd[vplane12] 0x02ce 2
1433 * @pci.depl_cpld[vplane13] 0x02d2 2
1434 * @pci.depl_npd[vplane13] 0x02d4 2
1435 * @pci.depl_pd[vplane13] 0x02d6 2
1436 * @pci.depl_cpld[vplane14] 0x02da 2
1437 * @pci.depl_npd[vplane14] 0x02dc 2
1438 * @pci.depl_pd[vplane14] 0x02de 2
1439 * @pci.depl_cpld[vplane15] 0x02e2 2
1440 * @pci.depl_npd[vplane15] 0x02e4 2
1441 * @pci.depl_pd[vplane15] 0x02e6 2
1442 * @pci.depl_cpld[vplane16] 0x02ea 2
1443 * @pci.depl_npd[vplane16] 0x02ec 2
1444 * @pci.depl_pd[vplane16] 0x02ee 2
1445 * @xgmac_port[3];
1446 * @xgmac_aggr[2];
1447 * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
1448 * Increments when internal logic detects a certain event. See register
1449 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
1450 * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
1451 * Increments when internal logic detects a certain event. See register
1452 * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
1453 * @xgmac.orp_lro_events 0x0af8 8
1454 * @xgmac.orp_bs_events 0x0b00 8
1455 * @xgmac.orp_iwarp_events 0x0b08 8
1456 * @xgmac.tx_permitted_frms 0x0b14 4
1457 * @xgmac.port2_tx_any_frms 0x0b1d 1
1458 * @xgmac.port1_tx_any_frms 0x0b1e 1
1459 * @xgmac.port0_tx_any_frms 0x0b1f 1
1460 * @xgmac.port2_rx_any_frms 0x0b25 1
1461 * @xgmac.port1_rx_any_frms 0x0b26 1
1462 * @xgmac.port0_rx_any_frms 0x0b27 1
1463 *
1464 * Titan mrpcim hardware statistics.
1465 */
1466struct vxge_hw_device_stats_mrpcim_info {
1467/*0x0000*/ u32 pic_ini_rd_drop;
1468/*0x0004*/ u32 pic_ini_wr_drop;
1469/*0x0008*/ struct {
1470 /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
1471 /*0x0004*/ u32 unused1;
1472 } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
1473/*0x0090*/ struct {
1474 /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
1475 /*0x0004*/ u32 unused2;
1476 } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
1477/*0x0118*/ struct {
1478 /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
1479 /*0x0004*/ u32 unused3;
1480 } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
1481/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
1482/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
1483/*0x01a8*/ u32 pic_genstats_count0;
1484/*0x01ac*/ u32 pic_genstats_count1;
1485/*0x01b0*/ u32 pic_genstats_count2;
1486/*0x01b4*/ u32 pic_genstats_count3;
1487/*0x01b8*/ u32 pic_genstats_count4;
1488/*0x01bc*/ u32 unused4;
1489/*0x01c0*/ u32 pic_genstats_count5;
1490/*0x01c4*/ u32 unused5;
1491/*0x01c8*/ u32 pci_rstdrop_cpl;
1492/*0x01cc*/ u32 pci_rstdrop_msg;
1493/*0x01d0*/ u32 pci_rstdrop_client1;
1494/*0x01d4*/ u32 pci_rstdrop_client0;
1495/*0x01d8*/ u32 pci_rstdrop_client2;
1496/*0x01dc*/ u32 unused6;
1497/*0x01e0*/ struct {
1498 /*0x0000*/ u16 unused7;
1499 /*0x0002*/ u16 pci_depl_cplh;
1500 /*0x0004*/ u16 pci_depl_nph;
1501 /*0x0006*/ u16 pci_depl_ph;
1502 } pci_depl_h_vplane[17];
1503/*0x0268*/ struct {
1504 /*0x0000*/ u16 unused8;
1505 /*0x0002*/ u16 pci_depl_cpld;
1506 /*0x0004*/ u16 pci_depl_npd;
1507 /*0x0006*/ u16 pci_depl_pd;
1508 } pci_depl_d_vplane[17];
1509/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
1510/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
1511/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
1512/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
1513/*0x0af0*/ u64 unused7;
1514/*0x0af8*/ u64 unused8;
1515/*0x0b00*/ u64 unused9;
1516/*0x0b08*/ u64 unused10;
1517/*0x0b10*/ u32 unused11;
1518/*0x0b14*/ u32 xgmac_tx_permitted_frms;
1519/*0x0b18*/ u32 unused12;
1520/*0x0b1c*/ u8 unused13;
1521/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
1522/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
1523/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
1524/*0x0b20*/ u32 unused14;
1525/*0x0b24*/ u8 unused15;
1526/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
1527/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
1528/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
1529} __packed;
1530
1531/**
1532 * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
1533 * @vpath_info: VPath statistics
1534 * @vpath_info_sav: Vpath statistics saved
1535 *
1536 * Titan hardware statistics.
1537 */
1538struct vxge_hw_device_stats_hw_info {
1539 struct vxge_hw_vpath_stats_hw_info
1540 *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1541 struct vxge_hw_vpath_stats_hw_info
1542 vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
1543};
1544
1545/**
1546 * struct vxge_hw_vpath_stats_sw_common_info - HW common
1547 * statistics for queues.
1548 * @full_cnt: Number of times the queue was full
1549 * @usage_cnt: usage count.
1550 * @usage_max: Maximum usage
1551 * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
1552 * @total_compl_cnt: Total descriptor completion count.
1553 *
1554 * Hw queue counters
1555 * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
1556 * struct vxge_hw_vpath_stats_sw_ring_info{},
1557 */
1558struct vxge_hw_vpath_stats_sw_common_info {
1559 u32 full_cnt;
1560 u32 usage_cnt;
1561 u32 usage_max;
1562 u32 reserve_free_swaps_cnt;
1563 u32 total_compl_cnt;
1564};
1565
1566/**
1567 * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
1568 * @common_stats: Common counters for all queues
1569 * @total_posts: Total number of postings on the queue.
1570 * @total_buffers: Total number of buffers posted.
1571 * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
1572 * (index) in this array reflects the transfer code type, for instance
1573 * 0xA - "loss of link".
1574 * Value txd_t_code_err_cnt[i] reflects the
1575 * number of times the corresponding transfer code was encountered.
1576 *
1577 * HW fifo counters
1578 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1579 * struct vxge_hw_vpath_stats_sw_ring_info{},
1580 */
1581struct vxge_hw_vpath_stats_sw_fifo_info {
1582 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1583 u32 total_posts;
1584 u32 total_buffers;
1585 u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1586};
1587
1588/**
1589 * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
1590 * @common_stats: Common counters for all queues
1591 * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
1592 * (index) in this array reflects the transfer code type,
1593 * for instance
1594 * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
1595 * Value rxd_t_code_err_cnt[i] reflects the
1596 * number of times the corresponding transfer code was encountered.
1597 *
1598 * HW ring counters
1599 * See also: struct vxge_hw_vpath_stats_sw_common_info{},
1600 * struct vxge_hw_vpath_stats_sw_fifo_info{},
1601 */
1602struct vxge_hw_vpath_stats_sw_ring_info {
1603 struct vxge_hw_vpath_stats_sw_common_info common_stats;
1604 u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
1605
1606};
1607
1608/**
1609 * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
1610 * @unknown_alarms:
1611 * @network_sustained_fault:
1612 * @network_sustained_ok:
1613 * @kdfcctl_fifo0_overwrite:
1614 * @kdfcctl_fifo0_poison:
1615 * @kdfcctl_fifo0_dma_error:
1616 * @dblgen_fifo0_overflow:
1617 * @statsb_pif_chain_error:
1618 * @statsb_drop_timeout:
1619 * @target_illegal_access:
1620 * @ini_serr_det:
1621 * @prc_ring_bumps:
1622 * @prc_rxdcm_sc_err:
1623 * @prc_rxdcm_sc_abort:
1624 * @prc_quanta_size_err:
1625 *
1626 * HW vpath error statistics
1627 */
1628struct vxge_hw_vpath_stats_sw_err {
1629 u32 unknown_alarms;
1630 u32 network_sustained_fault;
1631 u32 network_sustained_ok;
1632 u32 kdfcctl_fifo0_overwrite;
1633 u32 kdfcctl_fifo0_poison;
1634 u32 kdfcctl_fifo0_dma_error;
1635 u32 dblgen_fifo0_overflow;
1636 u32 statsb_pif_chain_error;
1637 u32 statsb_drop_timeout;
1638 u32 target_illegal_access;
1639 u32 ini_serr_det;
1640 u32 prc_ring_bumps;
1641 u32 prc_rxdcm_sc_err;
1642 u32 prc_rxdcm_sc_abort;
1643 u32 prc_quanta_size_err;
1644};
1645
1646/**
1647 * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
1648 * @soft_reset_cnt: Number of times soft reset is done on this vpath.
1649 * @error_stats: error counters for the vpath
1650 * @ring_stats: counters for ring belonging to the vpath
1651 * @fifo_stats: counters for fifo belonging to the vpath
1652 *
1653 * HW vpath sw statistics
1654 * See also: struct vxge_hw_device_info{} }.
1655 */
1656struct vxge_hw_vpath_stats_sw_info {
1657 u32 soft_reset_cnt;
1658 struct vxge_hw_vpath_stats_sw_err error_stats;
1659 struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
1660 struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
1661};
1662
1663/**
1664 * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
1665 *
1666 * @not_traffic_intr_cnt: Number of times the host was interrupted
1667 * without new completions.
1668 * "Non-traffic interrupt counter".
1669 * @traffic_intr_cnt: Number of traffic interrupts for the device.
1670 * @total_intr_cnt: Total number of traffic interrupts for the device.
1671 * @total_intr_cnt == @traffic_intr_cnt +
1672 * @not_traffic_intr_cnt
1673 * @soft_reset_cnt: Number of times soft reset is done on this device.
1674 * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
1675 * HW per-device statistics.
1676 */
1677struct vxge_hw_device_stats_sw_info {
1678 u32 not_traffic_intr_cnt;
1679 u32 traffic_intr_cnt;
1680 u32 total_intr_cnt;
1681 u32 soft_reset_cnt;
1682 struct vxge_hw_vpath_stats_sw_info
1683 vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
1684};
1685
1686/**
1687 * struct vxge_hw_device_stats_sw_err - HW device error statistics.
1688 * @vpath_alarms: Number of vpath alarms
1689 *
1690 * HW Device error stats
1691 */
1692struct vxge_hw_device_stats_sw_err {
1693 u32 vpath_alarms;
1694};
1695
1696/**
1697 * struct vxge_hw_device_stats - Contains HW per-device statistics,
1698 * including hw.
1699 * @devh: HW device handle.
1700 * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
1701 * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
1702 * space.
1703 * @hw_info_dma_acch: One more DMA handle used subsequently to free the
1704 * DMA object. Note that this and the previous handle have
1705 * physical meaning for Solaris; on Windows and Linux the
1706 * corresponding value will be simply pointer to PCI device.
1707 *
1708 * @hw_dev_info_stats: Titan statistics maintained by the hardware.
1709 * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
1710 * of completions per interrupt.
1711 * @sw_dev_err_stats: HW's "soft" device error statistics.
1712 *
1713 * Structure-container of HW per-device statistics. Note that per-channel
1714 * statistics are kept in separate structures under HW's fifo and ring
1715 * channels.
1716 */
1717struct vxge_hw_device_stats {
1718 /* handles */
1719 struct __vxge_hw_device *devh;
1720
1721 /* HW device hardware statistics */
1722 struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
1723
1724 /* HW device "soft" stats */
1725 struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
1726 struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
1727
1728};
1729
1730enum vxge_hw_status vxge_hw_device_hw_stats_enable(
1731 struct __vxge_hw_device *devh);
1732
1733enum vxge_hw_status vxge_hw_device_stats_get(
1734 struct __vxge_hw_device *devh,
1735 struct vxge_hw_device_stats_hw_info *hw_stats);
1736
1737enum vxge_hw_status vxge_hw_driver_stats_get(
1738 struct __vxge_hw_device *devh,
1739 struct vxge_hw_device_stats_sw_info *sw_stats);
1740
1741enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
1742
1743enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
1744
1745enum vxge_hw_status
1746vxge_hw_mrpcim_stats_access(
1747 struct __vxge_hw_device *devh,
1748 u32 operation,
1749 u32 location,
1750 u32 offset,
1751 u64 *stat);
1752
1753enum vxge_hw_status
1754vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
1755 struct vxge_hw_xmac_aggr_stats *aggr_stats);
1756
1757enum vxge_hw_status
1758vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
1759 struct vxge_hw_xmac_port_stats *port_stats);
1760
1761enum vxge_hw_status
1762vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1763 struct vxge_hw_xmac_stats *xmac_stats);
1764
1765/**
1766 * enum enum vxge_hw_mgmt_reg_type - Register types.
1767 *
1768 * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
1769 * @vxge_hw_mgmt_reg_type_toc: TOC Registers
1770 * @vxge_hw_mgmt_reg_type_common: Common Registers
1771 * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
1772 * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
1773 * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
1774 * @vxge_hw_mgmt_reg_type_vpath: vpath registers
1775 *
1776 * Register type enumaration
1777 */
1778enum vxge_hw_mgmt_reg_type {
1779 vxge_hw_mgmt_reg_type_legacy = 0,
1780 vxge_hw_mgmt_reg_type_toc = 1,
1781 vxge_hw_mgmt_reg_type_common = 2,
1782 vxge_hw_mgmt_reg_type_mrpcim = 3,
1783 vxge_hw_mgmt_reg_type_srpcim = 4,
1784 vxge_hw_mgmt_reg_type_vpmgmt = 5,
1785 vxge_hw_mgmt_reg_type_vpath = 6
1786};
1787
1788enum vxge_hw_status
1789vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
1790 enum vxge_hw_mgmt_reg_type type,
1791 u32 index,
1792 u32 offset,
1793 u64 *value);
1794
1795enum vxge_hw_status
1796vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
1797 enum vxge_hw_mgmt_reg_type type,
1798 u32 index,
1799 u32 offset,
1800 u64 value);
1801
1802/**
1803 * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
1804 * @VXGE_HW_RXD_STATE_NONE: Invalid state.
1805 * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
1806 * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
1807 * device.
1808 * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
1809 * filling-in and posting later.
1810 *
1811 * Titan/HW descriptor states.
1812 *
1813 */
1814enum vxge_hw_rxd_state {
1815 VXGE_HW_RXD_STATE_NONE = 0,
1816 VXGE_HW_RXD_STATE_AVAIL = 1,
1817 VXGE_HW_RXD_STATE_POSTED = 2,
1818 VXGE_HW_RXD_STATE_FREED = 3
1819};
1820
1821/**
1822 * struct vxge_hw_ring_rxd_info - Extended information associated with a
1823 * completed ring descriptor.
1824 * @syn_flag: SYN flag
1825 * @is_icmp: Is ICMP
1826 * @fast_path_eligible: Fast Path Eligible flag
1827 * @l3_cksum: in L3 checksum is valid
1828 * @l3_cksum: Result of IP checksum check (by Titan hardware).
1829 * This field containing VXGE_HW_L3_CKSUM_OK would mean that
1830 * the checksum is correct, otherwise - the datagram is
1831 * corrupted.
1832 * @l4_cksum: in L4 checksum is valid
1833 * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
1834 * This field containing VXGE_HW_L4_CKSUM_OK would mean that
1835 * the checksum is correct. Otherwise - the packet is
1836 * corrupted.
1837 * @frame: Zero or more of enum vxge_hw_frame_type flags.
1838 * See enum vxge_hw_frame_type{}.
1839 * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
1840 * various higher-layer protocols, including (but note restricted to)
1841 * TCP and UDP. See enum vxge_hw_frame_proto{}.
1842 * @is_vlan: If vlan tag is valid
1843 * @vlan: VLAN tag extracted from the received frame.
1844 * @rth_bucket: RTH bucket
1845 * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
1846 * has a matching entry in the Indirection table.
1847 * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
1848 * has a matching entry in the Socket Pair Direct Match table.
1849 * @rth_hash_type: RTH hash code of the function used to calculate the hash.
1850 * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
1851 * hardware if RTH is enabled.
1852 */
1853struct vxge_hw_ring_rxd_info {
1854 u32 syn_flag;
1855 u32 is_icmp;
1856 u32 fast_path_eligible;
1857 u32 l3_cksum_valid;
1858 u32 l3_cksum;
1859 u32 l4_cksum_valid;
1860 u32 l4_cksum;
1861 u32 frame;
1862 u32 proto;
1863 u32 is_vlan;
1864 u32 vlan;
1865 u32 rth_bucket;
1866 u32 rth_it_hit;
1867 u32 rth_spdm_hit;
1868 u32 rth_hash_type;
1869 u32 rth_value;
1870};
1871
1872/**
1873 * enum enum vxge_hw_ring_hash_type - RTH hash types
1874 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1875 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1876 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1877 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1878 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1879 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1880 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1881 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1882 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1883 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1884 *
1885 * RTH hash types
1886 */
1887enum vxge_hw_ring_hash_type {
1888 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1889 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1890 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1891 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1892 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1893 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1894 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1895 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1896 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1897 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1898};
1899
1900enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1901 struct __vxge_hw_ring *ring_handle,
1902 void **rxdh);
1903
1904void
1905vxge_hw_ring_rxd_pre_post(
1906 struct __vxge_hw_ring *ring_handle,
1907 void *rxdh);
1908
1909void
1910vxge_hw_ring_rxd_post_post(
1911 struct __vxge_hw_ring *ring_handle,
1912 void *rxdh);
1913
1914enum vxge_hw_status
1915vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
1916
1917void
1918vxge_hw_ring_rxd_post_post_wmb(
1919 struct __vxge_hw_ring *ring_handle,
1920 void *rxdh);
1921
1922void vxge_hw_ring_rxd_post(
1923 struct __vxge_hw_ring *ring_handle,
1924 void *rxdh);
1925
1926enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1927 struct __vxge_hw_ring *ring_handle,
1928 void **rxdh,
1929 u8 *t_code);
1930
1931enum vxge_hw_status vxge_hw_ring_handle_tcode(
1932 struct __vxge_hw_ring *ring_handle,
1933 void *rxdh,
1934 u8 t_code);
1935
1936void vxge_hw_ring_rxd_free(
1937 struct __vxge_hw_ring *ring_handle,
1938 void *rxdh);
1939
1940/**
1941 * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
1942 * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
1943 * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
1944 * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
1945 * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
1946 * @VXGE_HW_FRAME_PROTO_TCP: TCP.
1947 * @VXGE_HW_FRAME_PROTO_UDP: UDP.
1948 * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
1949 *
1950 * Higher layer ethernet protocols and options.
1951 */
1952enum vxge_hw_frame_proto {
1953 VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
1954 VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
1955 VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
1956 VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
1957 VXGE_HW_FRAME_PROTO_TCP = 0x02,
1958 VXGE_HW_FRAME_PROTO_UDP = 0x01,
1959 VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
1960 VXGE_HW_FRAME_PROTO_UDP)
1961};
1962
1963/**
1964 * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
1965 * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
1966 * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
1967 * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
1968 * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
1969 *
1970 * These gather codes are used to indicate the position of a TxD in a TxD list
1971 */
1972enum vxge_hw_fifo_gather_code {
1973 VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
1974 VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
1975 VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
1976 VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
1977};
1978
1979/**
1980 * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
1981 * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
1982 * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
1983 * frame data) returned with corrupt data.
1984 * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
1985 * with no data.
1986 * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
1987 * frame or LSO MSS that was too long (>9800B).
1988 * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
1989 * Offload operation, due to improper header template,
1990 * unsupported protocol, etc.
1991 * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
1992 * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
1993 * data buffer transfer errors are encountered (see below).
1994 * Otherwise it is set to 0.
1995 *
1996 * These tcodes are returned in various API for TxD status
1997 */
1998enum vxge_hw_fifo_tcode {
1999 VXGE_HW_FIFO_T_CODE_OK = 0x0,
2000 VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
2001 VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
2002 VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
2003 VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
2004 VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
2005 VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
2006};
2007
2008enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
2009 struct __vxge_hw_fifo *fifoh,
2010 void **txdlh,
2011 void **txdl_priv);
2012
2013void vxge_hw_fifo_txdl_buffer_set(
2014 struct __vxge_hw_fifo *fifo_handle,
2015 void *txdlh,
2016 u32 frag_idx,
2017 dma_addr_t dma_pointer,
2018 u32 size);
2019
2020void vxge_hw_fifo_txdl_post(
2021 struct __vxge_hw_fifo *fifo_handle,
2022 void *txdlh);
2023
2024u32 vxge_hw_fifo_free_txdl_count_get(
2025 struct __vxge_hw_fifo *fifo_handle);
2026
2027enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
2028 struct __vxge_hw_fifo *fifoh,
2029 void **txdlh,
2030 enum vxge_hw_fifo_tcode *t_code);
2031
2032enum vxge_hw_status vxge_hw_fifo_handle_tcode(
2033 struct __vxge_hw_fifo *fifoh,
2034 void *txdlh,
2035 enum vxge_hw_fifo_tcode t_code);
2036
2037void vxge_hw_fifo_txdl_free(
2038 struct __vxge_hw_fifo *fifoh,
2039 void *txdlh);
2040
2041/*
2042 * Device
2043 */
2044
2045#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2046#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2047#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
2048
2049/*
2050 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
2051 * @dma_addr: DMA (mapped) address of _this_ descriptor.
2052 * @dma_handle: DMA handle used to map the descriptor onto device.
2053 * @dma_offset: Descriptor's offset in the memory block. HW allocates
2054 * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
2055 * bytes. Each memblock is contiguous DMA-able memory. Each
2056 * memblock contains 1 or more 4KB RxD blocks visible to the
2057 * Titan hardware.
2058 * @dma_object: DMA address and handle of the memory block that contains
2059 * the descriptor. This member is used only in the "checked"
2060 * version of the HW (to enforce certain assertions);
2061 * otherwise it gets compiled out.
2062 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
2063 *
2064 * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
2065 * information associated with the descriptor. Note that driver can ask HW
2066 * to allocate additional per-descriptor space for its own (driver-specific)
2067 * purposes.
2068 */
2069struct __vxge_hw_ring_rxd_priv {
2070 dma_addr_t dma_addr;
2071 struct pci_dev *dma_handle;
2072 ptrdiff_t dma_offset;
2073#ifdef VXGE_DEBUG_ASSERT
2074 struct vxge_hw_mempool_dma *dma_object;
2075#endif
2076};
2077
2078/* ========================= RING PRIVATE API ============================= */
2079u64
2080__vxge_hw_ring_first_block_address_get(
2081 struct __vxge_hw_ring *ringh);
2082
2083enum vxge_hw_status
2084__vxge_hw_ring_create(
2085 struct __vxge_hw_vpath_handle *vpath_handle,
2086 struct vxge_hw_ring_attr *attr);
2087
2088enum vxge_hw_status
2089__vxge_hw_ring_abort(
2090 struct __vxge_hw_ring *ringh);
2091
2092enum vxge_hw_status
2093__vxge_hw_ring_reset(
2094 struct __vxge_hw_ring *ringh);
2095
2096enum vxge_hw_status
2097__vxge_hw_ring_delete(
2098 struct __vxge_hw_vpath_handle *vpath_handle);
2099
2100/* ========================= FIFO PRIVATE API ============================= */
2101
2102struct vxge_hw_fifo_attr;
2103
2104enum vxge_hw_status
2105__vxge_hw_fifo_create(
2106 struct __vxge_hw_vpath_handle *vpath_handle,
2107 struct vxge_hw_fifo_attr *attr);
2108
2109enum vxge_hw_status
2110__vxge_hw_fifo_abort(
2111 struct __vxge_hw_fifo *fifoh);
2112
2113enum vxge_hw_status
2114__vxge_hw_fifo_reset(
2115 struct __vxge_hw_fifo *ringh);
2116
2117enum vxge_hw_status
2118__vxge_hw_fifo_delete(
2119 struct __vxge_hw_vpath_handle *vpath_handle);
2120
2121struct vxge_hw_mempool_cbs {
2122 void (*item_func_alloc)(
2123 struct vxge_hw_mempool *mempoolh,
2124 u32 memblock_index,
2125 struct vxge_hw_mempool_dma *dma_object,
2126 u32 index,
2127 u32 is_last);
2128};
2129
2130void
2131__vxge_hw_mempool_destroy(
2132 struct vxge_hw_mempool *mempool);
2133
2134#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2135 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2136
2137enum vxge_hw_status
2138__vxge_hw_vpath_rts_table_get(
2139 struct __vxge_hw_vpath_handle *vpath_handle,
2140 u32 action,
2141 u32 rts_table,
2142 u32 offset,
2143 u64 *data1,
2144 u64 *data2);
2145
2146enum vxge_hw_status
2147__vxge_hw_vpath_rts_table_set(
2148 struct __vxge_hw_vpath_handle *vpath_handle,
2149 u32 action,
2150 u32 rts_table,
2151 u32 offset,
2152 u64 data1,
2153 u64 data2);
2154
2155enum vxge_hw_status
2156__vxge_hw_vpath_reset(
2157 struct __vxge_hw_device *devh,
2158 u32 vp_id);
2159
2160enum vxge_hw_status
2161__vxge_hw_vpath_sw_reset(
2162 struct __vxge_hw_device *devh,
2163 u32 vp_id);
2164
2165enum vxge_hw_status
2166__vxge_hw_vpath_enable(
2167 struct __vxge_hw_device *devh,
2168 u32 vp_id);
2169
2170void
2171__vxge_hw_vpath_prc_configure(
2172 struct __vxge_hw_device *devh,
2173 u32 vp_id);
2174
2175enum vxge_hw_status
2176__vxge_hw_vpath_kdfc_configure(
2177 struct __vxge_hw_device *devh,
2178 u32 vp_id);
2179
2180enum vxge_hw_status
2181__vxge_hw_vpath_mac_configure(
2182 struct __vxge_hw_device *devh,
2183 u32 vp_id);
2184
2185enum vxge_hw_status
2186__vxge_hw_vpath_tim_configure(
2187 struct __vxge_hw_device *devh,
2188 u32 vp_id);
2189
2190enum vxge_hw_status
2191__vxge_hw_vpath_initialize(
2192 struct __vxge_hw_device *devh,
2193 u32 vp_id);
2194
2195enum vxge_hw_status
2196__vxge_hw_vp_initialize(
2197 struct __vxge_hw_device *devh,
2198 u32 vp_id,
2199 struct vxge_hw_vp_config *config);
2200
2201void
2202__vxge_hw_vp_terminate(
2203 struct __vxge_hw_device *devh,
2204 u32 vp_id);
2205
2206enum vxge_hw_status
2207__vxge_hw_vpath_alarm_process(
2208 struct __vxge_hw_virtualpath *vpath,
2209 u32 skip_alarms);
2210
2211void vxge_hw_device_intr_enable(
2212 struct __vxge_hw_device *devh);
2213
2214u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
2215
2216void vxge_hw_device_intr_disable(
2217 struct __vxge_hw_device *devh);
2218
2219void vxge_hw_device_mask_all(
2220 struct __vxge_hw_device *devh);
2221
2222void vxge_hw_device_unmask_all(
2223 struct __vxge_hw_device *devh);
2224
2225enum vxge_hw_status vxge_hw_device_begin_irq(
2226 struct __vxge_hw_device *devh,
2227 u32 skip_alarms,
2228 u64 *reason);
2229
2230void vxge_hw_device_clear_tx_rx(
2231 struct __vxge_hw_device *devh);
2232
2233/*
2234 * Virtual Paths
2235 */
2236
2237u32 vxge_hw_vpath_id(
2238 struct __vxge_hw_vpath_handle *vpath_handle);
2239
2240enum vxge_hw_vpath_mac_addr_add_mode {
2241 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
2242 VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
2243 VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
2244};
2245
2246enum vxge_hw_status
2247vxge_hw_vpath_mac_addr_add(
2248 struct __vxge_hw_vpath_handle *vpath_handle,
2249 u8 (macaddr)[ETH_ALEN],
2250 u8 (macaddr_mask)[ETH_ALEN],
2251 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2252
2253enum vxge_hw_status
2254vxge_hw_vpath_mac_addr_get(
2255 struct __vxge_hw_vpath_handle *vpath_handle,
2256 u8 (macaddr)[ETH_ALEN],
2257 u8 (macaddr_mask)[ETH_ALEN]);
2258
2259enum vxge_hw_status
2260vxge_hw_vpath_mac_addr_get_next(
2261 struct __vxge_hw_vpath_handle *vpath_handle,
2262 u8 (macaddr)[ETH_ALEN],
2263 u8 (macaddr_mask)[ETH_ALEN]);
2264
2265enum vxge_hw_status
2266vxge_hw_vpath_mac_addr_delete(
2267 struct __vxge_hw_vpath_handle *vpath_handle,
2268 u8 (macaddr)[ETH_ALEN],
2269 u8 (macaddr_mask)[ETH_ALEN]);
2270
2271enum vxge_hw_status
2272vxge_hw_vpath_vid_add(
2273 struct __vxge_hw_vpath_handle *vpath_handle,
2274 u64 vid);
2275
2276enum vxge_hw_status
2277vxge_hw_vpath_vid_get(
2278 struct __vxge_hw_vpath_handle *vpath_handle,
2279 u64 *vid);
2280
2281enum vxge_hw_status
2282vxge_hw_vpath_vid_get_next(
2283 struct __vxge_hw_vpath_handle *vpath_handle,
2284 u64 *vid);
2285
2286enum vxge_hw_status
2287vxge_hw_vpath_vid_delete(
2288 struct __vxge_hw_vpath_handle *vpath_handle,
2289 u64 vid);
2290
2291enum vxge_hw_status
2292vxge_hw_vpath_etype_add(
2293 struct __vxge_hw_vpath_handle *vpath_handle,
2294 u64 etype);
2295
2296enum vxge_hw_status
2297vxge_hw_vpath_etype_get(
2298 struct __vxge_hw_vpath_handle *vpath_handle,
2299 u64 *etype);
2300
2301enum vxge_hw_status
2302vxge_hw_vpath_etype_get_next(
2303 struct __vxge_hw_vpath_handle *vpath_handle,
2304 u64 *etype);
2305
2306enum vxge_hw_status
2307vxge_hw_vpath_etype_delete(
2308 struct __vxge_hw_vpath_handle *vpath_handle,
2309 u64 etype);
2310
2311enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2312 struct __vxge_hw_vpath_handle *vpath_handle);
2313
2314enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2315 struct __vxge_hw_vpath_handle *vpath_handle);
2316
2317enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2318 struct __vxge_hw_vpath_handle *vpath_handle);
2319
2320enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2321 struct __vxge_hw_vpath_handle *vpath_handle);
2322
2323enum vxge_hw_status vxge_hw_vpath_mcast_disable(
2324 struct __vxge_hw_vpath_handle *vpath_handle);
2325
2326enum vxge_hw_status vxge_hw_vpath_poll_rx(
2327 struct __vxge_hw_ring *ringh);
2328
2329enum vxge_hw_status vxge_hw_vpath_poll_tx(
2330 struct __vxge_hw_fifo *fifoh,
2331 void **skb_ptr);
2332
2333enum vxge_hw_status vxge_hw_vpath_alarm_process(
2334 struct __vxge_hw_vpath_handle *vpath_handle,
2335 u32 skip_alarms);
2336
2337enum vxge_hw_status
2338vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2339 int *tim_msix_id, int alarm_msix_id);
2340
2341void
2342vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2343 int msix_id);
2344
2345void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2346
2347void
2348vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
2349 int msix_id);
2350
2351void
2352vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2353 int msix_id);
2354
2355void
2356vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
2357
2358enum vxge_hw_status vxge_hw_vpath_intr_enable(
2359 struct __vxge_hw_vpath_handle *vpath_handle);
2360
2361enum vxge_hw_status vxge_hw_vpath_intr_disable(
2362 struct __vxge_hw_vpath_handle *vpath_handle);
2363
2364void vxge_hw_vpath_inta_mask_tx_rx(
2365 struct __vxge_hw_vpath_handle *vpath_handle);
2366
2367void vxge_hw_vpath_inta_unmask_tx_rx(
2368 struct __vxge_hw_vpath_handle *vpath_handle);
2369
2370void
2371vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2372
2373void
2374vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2375
2376enum vxge_hw_status
2377vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
2378
2379void
2380vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
2381
2382void
2383vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2384 void **dtrh);
2385
2386void
2387vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
2388
2389void
2390vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2391
2392int
2393vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2394
2395/* ========================== PRIVATE API ================================= */
2396
2397enum vxge_hw_status
2398__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
2399
2400enum vxge_hw_status
2401__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
2402
2403enum vxge_hw_status
2404__vxge_hw_device_handle_error(
2405 struct __vxge_hw_device *hldev,
2406 u32 vp_id,
2407 enum vxge_hw_event type);
2408
2409#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
new file mode 100644
index 000000000000..7da02c545ed5
--- /dev/null
+++ b/drivers/net/vxge/vxge-version.h
@@ -0,0 +1,23 @@
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-version.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_VERSION_H
15
16#define VXGE_VERSION_H
17
18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "1"
21#define VXGE_VERSION_BUILD "17129"
22#define VXGE_VERSION_FOR "k"
23#endif
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 032db815b0f9..f3492110b1ad 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -30,6 +30,7 @@ enum parport_pc_pci_cards {
30 titan_210l, 30 titan_210l,
31 netmos_9xx5_combo, 31 netmos_9xx5_combo,
32 netmos_9855, 32 netmos_9855,
33 netmos_9855_2p,
33 avlab_1s1p, 34 avlab_1s1p,
34 avlab_1s2p, 35 avlab_1s2p,
35 avlab_2s1p, 36 avlab_2s1p,
@@ -62,7 +63,7 @@ struct parport_pc_pci {
62 struct parport_pc_pci *card, int failed); 63 struct parport_pc_pci *card, int failed);
63}; 64};
64 65
65static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *card, int autoirq, int autodma) 66static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par, int autoirq, int autodma)
66{ 67{
67 /* the rule described below doesn't hold for this device */ 68 /* the rule described below doesn't hold for this device */
68 if (dev->device == PCI_DEVICE_ID_NETMOS_9835 && 69 if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
@@ -74,9 +75,17 @@ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc
74 * and serial ports. The form is 0x00PS, where <P> is the number of 75 * and serial ports. The form is 0x00PS, where <P> is the number of
75 * parallel ports and <S> is the number of serial ports. 76 * parallel ports and <S> is the number of serial ports.
76 */ 77 */
77 card->numports = (dev->subsystem_device & 0xf0) >> 4; 78 par->numports = (dev->subsystem_device & 0xf0) >> 4;
78 if (card->numports > ARRAY_SIZE(card->addr)) 79 if (par->numports > ARRAY_SIZE(par->addr))
79 card->numports = ARRAY_SIZE(card->addr); 80 par->numports = ARRAY_SIZE(par->addr);
81 /*
82 * This function is currently only called for cards with up to
83 * one parallel port.
84 * Parallel port BAR is either before or after serial ports BARS;
85 * hence, lo should be either 0 or equal to the number of serial ports.
86 */
87 if (par->addr[0].lo != 0)
88 par->addr[0].lo = dev->subsystem_device & 0xf;
80 return 0; 89 return 0;
81} 90}
82 91
@@ -84,7 +93,8 @@ static struct parport_pc_pci cards[] __devinitdata = {
84 /* titan_110l */ { 1, { { 3, -1 }, } }, 93 /* titan_110l */ { 1, { { 3, -1 }, } },
85 /* titan_210l */ { 1, { { 3, -1 }, } }, 94 /* titan_210l */ { 1, { { 3, -1 }, } },
86 /* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init }, 95 /* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
87 /* netmos_9855 */ { 1, { { 2, -1 }, }, netmos_parallel_init }, 96 /* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
97 /* netmos_9855_2p */ { 2, { { 0, -1 }, { 2, -1 }, } },
88 /* avlab_1s1p */ { 1, { { 1, 2}, } }, 98 /* avlab_1s1p */ { 1, { { 1, 2}, } },
89 /* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} }, 99 /* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
90 /* avlab_2s1p */ { 1, { { 2, 3}, } }, 100 /* avlab_2s1p */ { 1, { { 2, 3}, } },
@@ -110,6 +120,10 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
110 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9845, 120 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9845,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo }, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
112 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855, 122 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
123 0x1000, 0x0020, 0, 0, netmos_9855_2p },
124 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
125 0x1000, 0x0022, 0, 0, netmos_9855_2p },
126 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 }, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 },
114 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ 128 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
115 { PCI_VENDOR_ID_AFAVLAB, 0x2110, 129 { PCI_VENDOR_ID_AFAVLAB, 0x2110,
@@ -192,6 +206,12 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
192 .uart_offset = 8, 206 .uart_offset = 8,
193 }, 207 },
194 [netmos_9855] = { 208 [netmos_9855] = {
209 .flags = FL_BASE2 | FL_BASE_BARS,
210 .num_ports = 1,
211 .base_baud = 115200,
212 .uart_offset = 8,
213 },
214 [netmos_9855_2p] = {
195 .flags = FL_BASE4 | FL_BASE_BARS, 215 .flags = FL_BASE4 | FL_BASE_BARS,
196 .num_ports = 1, 216 .num_ports = 1,
197 .base_baud = 115200, 217 .base_baud = 115200,
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 996f64838079..cfe86853feb2 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -94,7 +94,6 @@ struct pnp_dev_node_info node_info;
94 94
95#ifdef CONFIG_HOTPLUG 95#ifdef CONFIG_HOTPLUG
96 96
97static int unloading = 0;
98static struct completion unload_sem; 97static struct completion unload_sem;
99 98
100/* 99/*
@@ -158,7 +157,7 @@ static int pnp_dock_thread(void *unused)
158 int docked = -1, d = 0; 157 int docked = -1, d = 0;
159 158
160 set_freezable(); 159 set_freezable();
161 while (!unloading) { 160 while (1) {
162 int status; 161 int status;
163 162
164 /* 163 /*
@@ -575,8 +574,6 @@ fs_initcall(pnpbios_init);
575 574
576static int __init pnpbios_thread_init(void) 575static int __init pnpbios_thread_init(void)
577{ 576{
578 struct task_struct *task;
579
580#if defined(CONFIG_PPC) 577#if defined(CONFIG_PPC)
581 if (check_legacy_ioport(PNPBIOS_BASE)) 578 if (check_legacy_ioport(PNPBIOS_BASE))
582 return 0; 579 return 0;
@@ -584,10 +581,13 @@ static int __init pnpbios_thread_init(void)
584 if (pnpbios_disabled) 581 if (pnpbios_disabled)
585 return 0; 582 return 0;
586#ifdef CONFIG_HOTPLUG 583#ifdef CONFIG_HOTPLUG
587 init_completion(&unload_sem); 584 {
588 task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); 585 struct task_struct *task;
589 if (!IS_ERR(task)) 586 init_completion(&unload_sem);
590 unloading = 0; 587 task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
588 if (IS_ERR(task))
589 return PTR_ERR(task);
590 }
591#endif 591#endif
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 09d5cd33a3f6..56002f7d26bd 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -225,11 +225,11 @@ config RTC_DRV_PCF8583
225 will be called rtc-pcf8583. 225 will be called rtc-pcf8583.
226 226
227config RTC_DRV_M41T80 227config RTC_DRV_M41T80
228 tristate "ST M41T65/M41T80/81/82/83/84/85/87" 228 tristate "ST M41T62/65/M41T80/81/82/83/84/85/87"
229 help 229 help
230 If you say Y here you will get support for the ST M41T60 230 If you say Y here you will get support for the ST M41T60
231 and M41T80 RTC chips series. Currently, the following chips are 231 and M41T80 RTC chips series. Currently, the following chips are
232 supported: M41T65, M41T80, M41T81, M41T82, M41T83, M41ST84, 232 supported: M41T62, M41T65, M41T80, M41T81, M41T82, M41T83, M41ST84,
233 M41ST85, and M41ST87. 233 M41ST85, and M41ST87.
234 234
235 This driver can also be built as a module. If so, the module 235 This driver can also be built as a module. If so, the module
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 893f7dece239..60fe266f0f49 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -64,10 +64,12 @@
64#define M41T80_FEATURE_BL (1 << 1) /* Battery low indicator */ 64#define M41T80_FEATURE_BL (1 << 1) /* Battery low indicator */
65#define M41T80_FEATURE_SQ (1 << 2) /* Squarewave feature */ 65#define M41T80_FEATURE_SQ (1 << 2) /* Squarewave feature */
66#define M41T80_FEATURE_WD (1 << 3) /* Extra watchdog resolution */ 66#define M41T80_FEATURE_WD (1 << 3) /* Extra watchdog resolution */
67#define M41T80_FEATURE_SQ_ALT (1 << 4) /* RSx bits are in reg 4 */
67 68
68#define DRV_VERSION "0.05" 69#define DRV_VERSION "0.05"
69 70
70static const struct i2c_device_id m41t80_id[] = { 71static const struct i2c_device_id m41t80_id[] = {
72 { "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT },
71 { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD }, 73 { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD },
72 { "m41t80", M41T80_FEATURE_SQ }, 74 { "m41t80", M41T80_FEATURE_SQ },
73 { "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ}, 75 { "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ},
@@ -393,12 +395,15 @@ static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
393{ 395{
394 struct i2c_client *client = to_i2c_client(dev); 396 struct i2c_client *client = to_i2c_client(dev);
395 struct m41t80_data *clientdata = i2c_get_clientdata(client); 397 struct m41t80_data *clientdata = i2c_get_clientdata(client);
396 int val; 398 int val, reg_sqw;
397 399
398 if (!(clientdata->features & M41T80_FEATURE_SQ)) 400 if (!(clientdata->features & M41T80_FEATURE_SQ))
399 return -EINVAL; 401 return -EINVAL;
400 402
401 val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW); 403 reg_sqw = M41T80_REG_SQW;
404 if (clientdata->features & M41T80_FEATURE_SQ_ALT)
405 reg_sqw = M41T80_REG_WDAY;
406 val = i2c_smbus_read_byte_data(client, reg_sqw);
402 if (val < 0) 407 if (val < 0)
403 return -EIO; 408 return -EIO;
404 val = (val >> 4) & 0xf; 409 val = (val >> 4) & 0xf;
@@ -419,7 +424,7 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
419{ 424{
420 struct i2c_client *client = to_i2c_client(dev); 425 struct i2c_client *client = to_i2c_client(dev);
421 struct m41t80_data *clientdata = i2c_get_clientdata(client); 426 struct m41t80_data *clientdata = i2c_get_clientdata(client);
422 int almon, sqw; 427 int almon, sqw, reg_sqw;
423 int val = simple_strtoul(buf, NULL, 0); 428 int val = simple_strtoul(buf, NULL, 0);
424 429
425 if (!(clientdata->features & M41T80_FEATURE_SQ)) 430 if (!(clientdata->features & M41T80_FEATURE_SQ))
@@ -440,13 +445,16 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
440 almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); 445 almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
441 if (almon < 0) 446 if (almon < 0)
442 return -EIO; 447 return -EIO;
443 sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW); 448 reg_sqw = M41T80_REG_SQW;
449 if (clientdata->features & M41T80_FEATURE_SQ_ALT)
450 reg_sqw = M41T80_REG_WDAY;
451 sqw = i2c_smbus_read_byte_data(client, reg_sqw);
444 if (sqw < 0) 452 if (sqw < 0)
445 return -EIO; 453 return -EIO;
446 sqw = (sqw & 0x0f) | (val << 4); 454 sqw = (sqw & 0x0f) | (val << 4);
447 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, 455 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
448 almon & ~M41T80_ALMON_SQWE) < 0 || 456 almon & ~M41T80_ALMON_SQWE) < 0 ||
449 i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0) 457 i2c_smbus_write_byte_data(client, reg_sqw, sqw) < 0)
450 return -EIO; 458 return -EIO;
451 if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, 459 if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
452 almon | M41T80_ALMON_SQWE) < 0) 460 almon | M41T80_ALMON_SQWE) < 0)
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 66955cc9c746..ad164056feb6 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -27,17 +27,162 @@
27#include <linux/bcd.h> 27#include <linux/bcd.h>
28#include <linux/rtc-v3020.h> 28#include <linux/rtc-v3020.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/gpio.h>
30 31
31#include <linux/io.h> 32#include <linux/io.h>
32 33
33#undef DEBUG 34#undef DEBUG
34 35
36struct v3020;
37
38struct v3020_chip_ops {
39 int (*map_io)(struct v3020 *chip, struct platform_device *pdev,
40 struct v3020_platform_data *pdata);
41 void (*unmap_io)(struct v3020 *chip);
42 unsigned char (*read_bit)(struct v3020 *chip);
43 void (*write_bit)(struct v3020 *chip, unsigned char bit);
44};
45
46#define V3020_CS 0
47#define V3020_WR 1
48#define V3020_RD 2
49#define V3020_IO 3
50
51struct v3020_gpio {
52 const char *name;
53 unsigned int gpio;
54};
55
35struct v3020 { 56struct v3020 {
57 /* MMIO access */
36 void __iomem *ioaddress; 58 void __iomem *ioaddress;
37 int leftshift; 59 int leftshift;
60
61 /* GPIO access */
62 struct v3020_gpio *gpio;
63
64 struct v3020_chip_ops *ops;
65
38 struct rtc_device *rtc; 66 struct rtc_device *rtc;
39}; 67};
40 68
69
70static int v3020_mmio_map(struct v3020 *chip, struct platform_device *pdev,
71 struct v3020_platform_data *pdata)
72{
73 if (pdev->num_resources != 1)
74 return -EBUSY;
75
76 if (pdev->resource[0].flags != IORESOURCE_MEM)
77 return -EBUSY;
78
79 chip->leftshift = pdata->leftshift;
80 chip->ioaddress = ioremap(pdev->resource[0].start, 1);
81 if (chip->ioaddress == NULL)
82 return -EBUSY;
83
84 return 0;
85}
86
87static void v3020_mmio_unmap(struct v3020 *chip)
88{
89 iounmap(chip->ioaddress);
90}
91
92static void v3020_mmio_write_bit(struct v3020 *chip, unsigned char bit)
93{
94 writel(bit << chip->leftshift, chip->ioaddress);
95}
96
97static unsigned char v3020_mmio_read_bit(struct v3020 *chip)
98{
99 return readl(chip->ioaddress) & (1 << chip->leftshift);
100}
101
102static struct v3020_chip_ops v3020_mmio_ops = {
103 .map_io = v3020_mmio_map,
104 .unmap_io = v3020_mmio_unmap,
105 .read_bit = v3020_mmio_read_bit,
106 .write_bit = v3020_mmio_write_bit,
107};
108
109static struct v3020_gpio v3020_gpio[] = {
110 { "RTC CS", 0 },
111 { "RTC WR", 0 },
112 { "RTC RD", 0 },
113 { "RTC IO", 0 },
114};
115
116static int v3020_gpio_map(struct v3020 *chip, struct platform_device *pdev,
117 struct v3020_platform_data *pdata)
118{
119 int i, err;
120
121 v3020_gpio[V3020_CS].gpio = pdata->gpio_cs;
122 v3020_gpio[V3020_WR].gpio = pdata->gpio_wr;
123 v3020_gpio[V3020_RD].gpio = pdata->gpio_rd;
124 v3020_gpio[V3020_IO].gpio = pdata->gpio_io;
125
126 for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++) {
127 err = gpio_request(v3020_gpio[i].gpio, v3020_gpio[i].name);
128 if (err)
129 goto err_request;
130
131 gpio_direction_output(v3020_gpio[i].gpio, 1);
132 }
133
134 chip->gpio = v3020_gpio;
135
136 return 0;
137
138err_request:
139 while (--i >= 0)
140 gpio_free(v3020_gpio[i].gpio);
141
142 return err;
143}
144
145static void v3020_gpio_unmap(struct v3020 *chip)
146{
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++)
150 gpio_free(v3020_gpio[i].gpio);
151}
152
153static void v3020_gpio_write_bit(struct v3020 *chip, unsigned char bit)
154{
155 gpio_direction_output(chip->gpio[V3020_IO].gpio, bit);
156 gpio_set_value(chip->gpio[V3020_CS].gpio, 0);
157 gpio_set_value(chip->gpio[V3020_WR].gpio, 0);
158 udelay(1);
159 gpio_set_value(chip->gpio[V3020_WR].gpio, 1);
160 gpio_set_value(chip->gpio[V3020_CS].gpio, 1);
161}
162
163static unsigned char v3020_gpio_read_bit(struct v3020 *chip)
164{
165 int bit;
166
167 gpio_direction_input(chip->gpio[V3020_IO].gpio);
168 gpio_set_value(chip->gpio[V3020_CS].gpio, 0);
169 gpio_set_value(chip->gpio[V3020_RD].gpio, 0);
170 udelay(1);
171 bit = !!gpio_get_value(chip->gpio[V3020_IO].gpio);
172 udelay(1);
173 gpio_set_value(chip->gpio[V3020_RD].gpio, 1);
174 gpio_set_value(chip->gpio[V3020_CS].gpio, 1);
175
176 return bit;
177}
178
179static struct v3020_chip_ops v3020_gpio_ops = {
180 .map_io = v3020_gpio_map,
181 .unmap_io = v3020_gpio_unmap,
182 .read_bit = v3020_gpio_read_bit,
183 .write_bit = v3020_gpio_write_bit,
184};
185
41static void v3020_set_reg(struct v3020 *chip, unsigned char address, 186static void v3020_set_reg(struct v3020 *chip, unsigned char address,
42 unsigned char data) 187 unsigned char data)
43{ 188{
@@ -46,7 +191,7 @@ static void v3020_set_reg(struct v3020 *chip, unsigned char address,
46 191
47 tmp = address; 192 tmp = address;
48 for (i = 0; i < 4; i++) { 193 for (i = 0; i < 4; i++) {
49 writel((tmp & 1) << chip->leftshift, chip->ioaddress); 194 chip->ops->write_bit(chip, (tmp & 1));
50 tmp >>= 1; 195 tmp >>= 1;
51 udelay(1); 196 udelay(1);
52 } 197 }
@@ -54,7 +199,7 @@ static void v3020_set_reg(struct v3020 *chip, unsigned char address,
54 /* Commands dont have data */ 199 /* Commands dont have data */
55 if (!V3020_IS_COMMAND(address)) { 200 if (!V3020_IS_COMMAND(address)) {
56 for (i = 0; i < 8; i++) { 201 for (i = 0; i < 8; i++) {
57 writel((data & 1) << chip->leftshift, chip->ioaddress); 202 chip->ops->write_bit(chip, (data & 1));
58 data >>= 1; 203 data >>= 1;
59 udelay(1); 204 udelay(1);
60 } 205 }
@@ -67,14 +212,14 @@ static unsigned char v3020_get_reg(struct v3020 *chip, unsigned char address)
67 int i; 212 int i;
68 213
69 for (i = 0; i < 4; i++) { 214 for (i = 0; i < 4; i++) {
70 writel((address & 1) << chip->leftshift, chip->ioaddress); 215 chip->ops->write_bit(chip, (address & 1));
71 address >>= 1; 216 address >>= 1;
72 udelay(1); 217 udelay(1);
73 } 218 }
74 219
75 for (i = 0; i < 8; i++) { 220 for (i = 0; i < 8; i++) {
76 data >>= 1; 221 data >>= 1;
77 if (readl(chip->ioaddress) & (1 << chip->leftshift)) 222 if (chip->ops->read_bit(chip))
78 data |= 0x80; 223 data |= 0x80;
79 udelay(1); 224 udelay(1);
80 } 225 }
@@ -164,25 +309,23 @@ static int rtc_probe(struct platform_device *pdev)
164 int i; 309 int i;
165 int temp; 310 int temp;
166 311
167 if (pdev->num_resources != 1)
168 return -EBUSY;
169
170 if (pdev->resource[0].flags != IORESOURCE_MEM)
171 return -EBUSY;
172
173 chip = kzalloc(sizeof *chip, GFP_KERNEL); 312 chip = kzalloc(sizeof *chip, GFP_KERNEL);
174 if (!chip) 313 if (!chip)
175 return -ENOMEM; 314 return -ENOMEM;
176 315
177 chip->leftshift = pdata->leftshift; 316 if (pdata->use_gpio)
178 chip->ioaddress = ioremap(pdev->resource[0].start, 1); 317 chip->ops = &v3020_gpio_ops;
179 if (chip->ioaddress == NULL) 318 else
319 chip->ops = &v3020_mmio_ops;
320
321 retval = chip->ops->map_io(chip, pdev, pdata);
322 if (retval)
180 goto err_chip; 323 goto err_chip;
181 324
182 /* Make sure the v3020 expects a communication cycle 325 /* Make sure the v3020 expects a communication cycle
183 * by reading 8 times */ 326 * by reading 8 times */
184 for (i = 0; i < 8; i++) 327 for (i = 0; i < 8; i++)
185 temp = readl(chip->ioaddress); 328 temp = chip->ops->read_bit(chip);
186 329
187 /* Test chip by doing a write/read sequence 330 /* Test chip by doing a write/read sequence
188 * to the chip ram */ 331 * to the chip ram */
@@ -196,10 +339,17 @@ static int rtc_probe(struct platform_device *pdev)
196 * are all disabled */ 339 * are all disabled */
197 v3020_set_reg(chip, V3020_STATUS_0, 0x0); 340 v3020_set_reg(chip, V3020_STATUS_0, 0x0);
198 341
199 dev_info(&pdev->dev, "Chip available at physical address 0x%llx," 342 if (pdata->use_gpio)
200 "data connected to D%d\n", 343 dev_info(&pdev->dev, "Chip available at GPIOs "
201 (unsigned long long)pdev->resource[0].start, 344 "%d, %d, %d, %d\n",
202 chip->leftshift); 345 chip->gpio[V3020_CS].gpio, chip->gpio[V3020_WR].gpio,
346 chip->gpio[V3020_RD].gpio, chip->gpio[V3020_IO].gpio);
347 else
348 dev_info(&pdev->dev, "Chip available at "
349 "physical address 0x%llx,"
350 "data connected to D%d\n",
351 (unsigned long long)pdev->resource[0].start,
352 chip->leftshift);
203 353
204 platform_set_drvdata(pdev, chip); 354 platform_set_drvdata(pdev, chip);
205 355
@@ -214,7 +364,7 @@ static int rtc_probe(struct platform_device *pdev)
214 return 0; 364 return 0;
215 365
216err_io: 366err_io:
217 iounmap(chip->ioaddress); 367 chip->ops->unmap_io(chip);
218err_chip: 368err_chip:
219 kfree(chip); 369 kfree(chip);
220 370
@@ -229,7 +379,7 @@ static int rtc_remove(struct platform_device *dev)
229 if (rtc) 379 if (rtc)
230 rtc_device_unregister(rtc); 380 rtc_device_unregister(rtc);
231 381
232 iounmap(chip->ioaddress); 382 chip->ops->unmap_io(chip);
233 kfree(chip); 383 kfree(chip);
234 384
235 return 0; 385 return 0;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index aab8123c5966..e8d032b9dfbd 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -94,7 +94,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
94 94
95static void zfcp_wka_port_offline(struct work_struct *work) 95static void zfcp_wka_port_offline(struct work_struct *work)
96{ 96{
97 struct delayed_work *dw = container_of(work, struct delayed_work, work); 97 struct delayed_work *dw = to_delayed_work(work);
98 struct zfcp_wka_port *wka_port = 98 struct zfcp_wka_port *wka_port =
99 container_of(dw, struct zfcp_wka_port, work); 99 container_of(dw, struct zfcp_wka_port, work);
100 100
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index d2866c293dee..26bd03e61855 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -178,8 +178,10 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
178 if (is_active) 178 if (is_active)
179 setsck(spi, spi->mode & SPI_CPOL); 179 setsck(spi, spi->mode & SPI_CPOL);
180 180
181 /* SPI is normally active-low */ 181 if (cs != SPI_GPIO_NO_CHIPSELECT) {
182 gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); 182 /* SPI is normally active-low */
183 gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
184 }
183} 185}
184 186
185static int spi_gpio_setup(struct spi_device *spi) 187static int spi_gpio_setup(struct spi_device *spi)
@@ -191,15 +193,17 @@ static int spi_gpio_setup(struct spi_device *spi)
191 return -EINVAL; 193 return -EINVAL;
192 194
193 if (!spi->controller_state) { 195 if (!spi->controller_state) {
194 status = gpio_request(cs, dev_name(&spi->dev)); 196 if (cs != SPI_GPIO_NO_CHIPSELECT) {
195 if (status) 197 status = gpio_request(cs, dev_name(&spi->dev));
196 return status; 198 if (status)
197 status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); 199 return status;
200 status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
201 }
198 } 202 }
199 if (!status) 203 if (!status)
200 status = spi_bitbang_setup(spi); 204 status = spi_bitbang_setup(spi);
201 if (status) { 205 if (status) {
202 if (!spi->controller_state) 206 if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT)
203 gpio_free(cs); 207 gpio_free(cs);
204 } 208 }
205 return status; 209 return status;
@@ -209,7 +213,8 @@ static void spi_gpio_cleanup(struct spi_device *spi)
209{ 213{
210 unsigned long cs = (unsigned long) spi->controller_data; 214 unsigned long cs = (unsigned long) spi->controller_data;
211 215
212 gpio_free(cs); 216 if (cs != SPI_GPIO_NO_CHIPSELECT)
217 gpio_free(cs);
213 spi_bitbang_cleanup(spi); 218 spi_bitbang_cleanup(spi);
214} 219}
215 220
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index e5752f615e09..80f9cc7137c2 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -719,7 +719,7 @@ void ieee80211_softmac_scan(struct ieee80211_device *ieee)
719#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 719#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
720void ieee80211_softmac_scan_wq(struct work_struct *work) 720void ieee80211_softmac_scan_wq(struct work_struct *work)
721{ 721{
722 struct delayed_work *dwork = container_of(work, struct delayed_work, work); 722 struct delayed_work *dwork = to_delayed_work(work);
723 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq); 723 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
724#else 724#else
725void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee) 725void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
@@ -777,7 +777,7 @@ out:
777#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 777#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
778void ieee80211_softmac_scan_wq(struct work_struct *work) 778void ieee80211_softmac_scan_wq(struct work_struct *work)
779{ 779{
780 struct delayed_work *dwork = container_of(work, struct delayed_work, work); 780 struct delayed_work *dwork = to_delayed_work(work);
781 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, softmac_scan_wq); 781 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, softmac_scan_wq);
782#else 782#else
783void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee) 783void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
@@ -2980,7 +2980,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
2980#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 2980#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
2981void ieee80211_start_ibss_wq(struct work_struct *work) 2981void ieee80211_start_ibss_wq(struct work_struct *work)
2982{ 2982{
2983 struct delayed_work *dwork = container_of(work, struct delayed_work, work); 2983 struct delayed_work *dwork = to_delayed_work(work);
2984 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq); 2984 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
2985#else 2985#else
2986void ieee80211_start_ibss_wq(struct ieee80211_device *ieee) 2986void ieee80211_start_ibss_wq(struct ieee80211_device *ieee)
@@ -3162,7 +3162,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
3162#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 3162#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
3163void ieee80211_associate_retry_wq(struct work_struct *work) 3163void ieee80211_associate_retry_wq(struct work_struct *work)
3164{ 3164{
3165 struct delayed_work *dwork = container_of(work, struct delayed_work, work); 3165 struct delayed_work *dwork = to_delayed_work(work);
3166 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq); 3166 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
3167#else 3167#else
3168void ieee80211_associate_retry_wq(struct ieee80211_device *ieee) 3168void ieee80211_associate_retry_wq(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 66de5cc8ddf1..ff1f23f99f27 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -5438,7 +5438,7 @@ void rtl8180_hw_wakeup_wq (struct work_struct *work)
5438// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq); 5438// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
5439// struct ieee80211_device * ieee = (struct ieee80211_device*) 5439// struct ieee80211_device * ieee = (struct ieee80211_device*)
5440// container_of(work, struct ieee80211_device, watch_dog_wq); 5440// container_of(work, struct ieee80211_device, watch_dog_wq);
5441 struct delayed_work *dwork = container_of(work,struct delayed_work,work); 5441 struct delayed_work *dwork = to_delayed_work(work);
5442 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq); 5442 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq);
5443 struct net_device *dev = ieee->dev; 5443 struct net_device *dev = ieee->dev;
5444#else 5444#else
@@ -5459,7 +5459,7 @@ void rtl8180_hw_sleep_wq (struct work_struct *work)
5459// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq); 5459// struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
5460// struct ieee80211_device * ieee = (struct ieee80211_device*) 5460// struct ieee80211_device * ieee = (struct ieee80211_device*)
5461// container_of(work, struct ieee80211_device, watch_dog_wq); 5461// container_of(work, struct ieee80211_device, watch_dog_wq);
5462 struct delayed_work *dwork = container_of(work,struct delayed_work,work); 5462 struct delayed_work *dwork = to_delayed_work(work);
5463 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_sleep_wq); 5463 struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_sleep_wq);
5464 struct net_device *dev = ieee->dev; 5464 struct net_device *dev = ieee->dev;
5465#else 5465#else
@@ -6407,7 +6407,7 @@ priv->txnpring)/8);
6407void rtl8180_tx_irq_wq(struct work_struct *work) 6407void rtl8180_tx_irq_wq(struct work_struct *work)
6408{ 6408{
6409 //struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq); 6409 //struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq);
6410 struct delayed_work *dwork = container_of(work,struct delayed_work,work); 6410 struct delayed_work *dwork = to_delayed_work(work);
6411 struct ieee80211_device * ieee = (struct ieee80211_device*) 6411 struct ieee80211_device * ieee = (struct ieee80211_device*)
6412 container_of(dwork, struct ieee80211_device, watch_dog_wq); 6412 container_of(dwork, struct ieee80211_device, watch_dog_wq);
6413 struct net_device *dev = ieee->dev; 6413 struct net_device *dev = ieee->dev;
@@ -6691,7 +6691,7 @@ lizhaoming--------------------------- RF power on/power off -----------------
6691#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) 6691#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
6692void GPIOChangeRFWorkItemCallBack(struct work_struct *work) 6692void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
6693{ 6693{
6694 //struct delayed_work *dwork = container_of(work, struct delayed_work, work); 6694 //struct delayed_work *dwork = to_delayed_work(work);
6695 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work); 6695 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
6696 struct net_device *dev = ieee->dev; 6696 struct net_device *dev = ieee->dev;
6697 struct r8180_priv *priv = ieee80211_priv(dev); 6697 struct r8180_priv *priv = ieee80211_priv(dev);
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index f0aac0cf315a..386eaa22d215 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -471,7 +471,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
471 */ 471 */
472static void wusbhc_keep_alive_run(struct work_struct *ws) 472static void wusbhc_keep_alive_run(struct work_struct *ws)
473{ 473{
474 struct delayed_work *dw = container_of(ws, struct delayed_work, work); 474 struct delayed_work *dw = to_delayed_work(ws);
475 struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); 475 struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer);
476 476
477 mutex_lock(&wusbhc->mutex); 477 mutex_lock(&wusbhc->mutex);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index d9627b57eb4d..135ae18bfce8 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -362,6 +362,7 @@ int NVCommonSetup(struct fb_info *info)
362 case 0x0186: 362 case 0x0186:
363 case 0x0187: 363 case 0x0187:
364 case 0x018D: 364 case 0x018D:
365 case 0x01D7:
365 case 0x0228: 366 case 0x0228:
366 case 0x0286: 367 case 0x0286:
367 case 0x028C: 368 case 0x028C:
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 442bd8bbd4a5..3ebe9726a9e5 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -69,7 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
69 return w1_read_bit(dev); 69 return w1_read_bit(dev);
70 else { 70 else {
71 w1_write_bit(dev, 0); 71 w1_write_bit(dev, 0);
72 return(0); 72 return 0;
73 } 73 }
74} 74}
75 75
@@ -184,17 +184,17 @@ static u8 w1_read_bit(struct w1_master *dev)
184 */ 184 */
185u8 w1_triplet(struct w1_master *dev, int bdir) 185u8 w1_triplet(struct w1_master *dev, int bdir)
186{ 186{
187 if ( dev->bus_master->triplet ) 187 if (dev->bus_master->triplet)
188 return(dev->bus_master->triplet(dev->bus_master->data, bdir)); 188 return dev->bus_master->triplet(dev->bus_master->data, bdir);
189 else { 189 else {
190 u8 id_bit = w1_touch_bit(dev, 1); 190 u8 id_bit = w1_touch_bit(dev, 1);
191 u8 comp_bit = w1_touch_bit(dev, 1); 191 u8 comp_bit = w1_touch_bit(dev, 1);
192 u8 retval; 192 u8 retval;
193 193
194 if ( id_bit && comp_bit ) 194 if (id_bit && comp_bit)
195 return(0x03); /* error */ 195 return 0x03; /* error */
196 196
197 if ( !id_bit && !comp_bit ) { 197 if (!id_bit && !comp_bit) {
198 /* Both bits are valid, take the direction given */ 198 /* Both bits are valid, take the direction given */
199 retval = bdir ? 0x04 : 0; 199 retval = bdir ? 0x04 : 0;
200 } else { 200 } else {
@@ -203,11 +203,11 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
203 retval = id_bit ? 0x05 : 0x02; 203 retval = id_bit ? 0x05 : 0x02;
204 } 204 }
205 205
206 if ( dev->bus_master->touch_bit ) 206 if (dev->bus_master->touch_bit)
207 w1_touch_bit(dev, bdir); 207 w1_touch_bit(dev, bdir);
208 else 208 else
209 w1_write_bit(dev, bdir); 209 w1_write_bit(dev, bdir);
210 return(retval); 210 return retval;
211 } 211 }
212} 212}
213 213