aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/pdc_adma.c
blob: e8df0c9ec1e646992c5e3354d04868da4dba2d3f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
/*
 *  pdc_adma.c - Pacific Digital Corporation ADMA
 *
 *  Maintained by:  Mark Lord <mlord@pobox.com>
 *
 *  Copyright 2005 Mark Lord
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2, or (at your option)
 *  any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; see the file COPYING.  If not, write to
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 *
 *  libata documentation is available via 'make {ps|pdf}docs',
 *  as Documentation/DocBook/libata.*
 *
 *
 *  Supports ATA disks in single-packet ADMA mode.
 *  Uses PIO for everything else.
 *
 *  TODO:  Use ADMA transfers for ATAPI devices, when possible.
 *  This requires careful attention to a number of quirks of the chip.
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <asm/io.h>
#include <linux/libata.h>

#define DRV_NAME	"pdc_adma"
#define DRV_VERSION	"0.03"

/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base,port_no)	((base) + ((port_no) * 0x40))

/* macro to calculate base address for ADMA regs */
#define ADMA_REGS(base,port_no)	((base) + 0x80 + ((port_no) * 0x20))

enum {
	ADMA_PORTS		= 2,
	ADMA_CPB_BYTES		= 40,
	ADMA_PRD_BYTES		= LIBATA_MAX_PRD * 16,
	ADMA_PKT_BYTES		= ADMA_CPB_BYTES + ADMA_PRD_BYTES,

	ADMA_DMA_BOUNDARY	= 0xffffffff,

	/* global register offsets */
	ADMA_MODE_LOCK		= 0x00c7,

	/* per-channel register offsets */
	ADMA_CONTROL		= 0x0000, /* ADMA control */
	ADMA_STATUS		= 0x0002, /* ADMA status */
	ADMA_CPB_COUNT		= 0x0004, /* CPB count */
	ADMA_CPB_CURRENT	= 0x000c, /* current CPB address */
	ADMA_CPB_NEXT		= 0x000c, /* next CPB address */
	ADMA_CPB_LOOKUP		= 0x0010, /* CPB lookup table */
	ADMA_FIFO_IN		= 0x0014, /* input FIFO threshold */
	ADMA_FIFO_OUT		= 0x0016, /* output FIFO threshold */

	/* ADMA_CONTROL register bits */
	aNIEN			= (1 << 8), /* irq mask: 1==masked */
	aGO			= (1 << 7), /* packet trigger ("Go!") */
	aRSTADM			= (1 << 5), /* ADMA logic reset */
	aPIOMD4			= 0x0003,   /* PIO mode 4 */

	/* ADMA_STATUS register bits */
	aPSD			= (1 << 6),
	aUIRQ			= (1 << 4),
	aPERR			= (1 << 0),

	/* CPB bits */
	cDONE			= (1 << 0),
	cVLD			= (1 << 0),
	cDAT			= (1 << 2),
	cIEN			= (1 << 3),

	/* PRD bits */
	pORD			= (1 << 4),
	pDIRO			= (1 << 5),
	pEND			= (1 << 7),

	/* ATA register flags */
	rIGN			= (1 << 5),
	rEND			= (1 << 7),

	/* ATA register addresses */
	ADMA_REGS_CONTROL	= 0x0e,
	ADMA_REGS_SECTOR_COUNT	= 0x12,
	ADMA_REGS_LBA_LOW	= 0x13,
	ADMA_REGS_LBA_MID	= 0x14,
	ADMA_REGS_LBA_HIGH	= 0x15,
	ADMA_REGS_DEVICE	= 0x16,
	ADMA_REGS_COMMAND	= 0x17,

	/* PCI device IDs */
	board_1841_idx		= 0,	/* ADMA 2-port controller */
};

typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;

struct adma_port_priv {
	u8			*pkt;
	dma_addr_t		pkt_dma;
	adma_state_t		state;
};

static int adma_ata_init_one (struct pci_dev *pdev,
				const struct pci_device_id *ent);
static irqreturn_t adma_intr (int irq, void *dev_instance,
				struct pt_regs *regs);
static int adma_port_start(struct ata_port *ap);
static void adma_host_stop(struct ata_host_set *host_set);
static void adma_port_stop(struct ata_port *ap);
static void adma_phy_reset(struct ata_port *ap);
static void adma_qc_prep(struct ata_queued_cmd *qc);
static int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_bmdma_stop(struct ata_queued_cmd *qc);
static u8 adma_bmdma_status(struct ata_port *ap);
static void adma_irq_clear(struct ata_port *ap);
static void adma_eng_timeout(struct ata_port *ap);

static struct scsi_host_template adma_ata_sht = {
	.module			= THIS_MODULE,
	.name			= DRV_NAME,
	.ioctl			= ata_scsi_ioctl,
	.queuecommand		= ata_scsi_queuecmd,
	.eh_strategy_handler	= ata_scsi_error,
	.can_queue		= ATA_DEF_QUEUE,
	.this_id		= ATA_SHT_THIS_ID,
	.sg_tablesize		= LIBATA_MAX_PRD,
	.max_sectors		= ATA_MAX_SECTORS,
	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
	.emulated		= ATA_SHT_EMULATED,
	.use_clustering		= ENABLE_CLUSTERING,
	.proc_name		= DRV_NAME,
	.dma_boundary		= ADMA_DMA_BOUNDARY,
	.slave_configure	= ata_scsi_slave_config,
	.bios_param		= ata_std_bios_param,
};

static const struct ata_port_operations adma_ata_ops = {
	.port_disable		= ata_port_disable,
	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.check_atapi_dma	= adma_check_atapi_dma,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,
	.phy_reset		= adma_phy_reset,
	.qc_prep		= adma_qc_prep,
	.qc_issue		= adma_qc_issue,
	.eng_timeout		= adma_eng_timeout,
	.irq_handler		= adma_intr,
	.irq_clear		= adma_irq_clear,
	.port_start		= adma_port_start,
	.port_stop		= adma_port_stop,
	.host_stop		= adma_host_stop,
	.bmdma_stop		= adma_bmdma_stop,
	.bmdma_status		= adma_bmdma_status,
};

static struct ata_port_info adma_port_info[] = {
	/* board_1841_idx */
	{
		.sht		= &adma_ata_sht,
		.host_flags	= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
				  ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
		.pio_mask	= 0x10, /* pio4 */
		.udma_mask	= 0x1f, /* udma0-4 */
		.port_ops	= &adma_ata_ops,
	},
};

static const struct pci_device_id adma_ata_pci_tbl[] = {
	{ PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
	  board_1841_idx },

	{ }	/* terminate list */
};

static struct pci_driver adma_ata_pci_driver = {
	.name			= DRV_NAME,
	.id_table		= adma_ata_pci_tbl,
	.probe			= adma_ata_init_one,
	.remove			= ata_pci_remove_one,
};

static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
{
	return 1;	/* ATAPI DMA not yet supported */
}

static void adma_bmdma_stop(struct ata_queued_cmd *qc)
{
	/* nothing */
}

static u8 adma_bmdma_status(struct ata_port *ap)
{
	return 0;
}

static void adma_irq_clear(struct ata_port *ap)
{
	/* nothing */
}

static void adma_reset_engine(void __iomem *chan)
{
	/* reset ADMA to idle state */
	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
	udelay(2);
	writew(aPIOMD4, chan + ADMA_CONTROL);
	udelay(2);
}

static void adma_reinit_engine(struct ata_port *ap)
{
	struct adma_port_priv *pp = ap->private_data;
	void __iomem *mmio_base = ap->host_set->mmio_base;
	void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);

	/* mask/clear ATA interrupts */
	writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
	ata_check_status(ap);

	/* reset the ADMA engine */
	adma_reset_engine(chan);

	/* set in-FIFO threshold to 0x100 */
	writew(0x100, chan + ADMA_FIFO_IN);

	/* set CPB pointer */
	writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);

	/* set out-FIFO threshold to 0x100 */
	writew(0x100, chan + ADMA_FIFO_OUT);

	/* set CPB count */
	writew(1, chan + ADMA_CPB_COUNT);

	/* read/discard ADMA status */
	readb(chan + ADMA_STATUS);
}

static inline void adma_enter_reg_mode(struct ata_port *ap)
{
	void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);

	writew(aPIOMD4, chan + ADMA_CONTROL);
	readb(chan + ADMA_STATUS);	/* flush */
}

static void adma_phy_reset(struct ata_port *ap)
{
	struct adma_port_priv *pp = ap->private_data;

	pp->state = adma_state_idle;
	adma_reinit_engine(ap);
	ata_port_probe(ap);
	ata_bus_reset(ap);
}

static void adma_eng_timeout(struct ata_port *ap)
{
	struct adma_port_priv *pp = ap->private_data;

	if (pp->state != adma_state_idle) /* healthy paranoia */
		pp->state = adma_state_mmio;
	adma_reinit_engine(ap);
	ata_eng_timeout(ap);
}

static int adma_fill_sg(struct ata_queued_cmd *qc)
{
	struct scatterlist *sg;
	struct ata_port *ap = qc->ap;
	struct adma_port_priv *pp = ap->private_data;
	u8  *buf = pp->pkt;
	int i = (2 + buf[3]) * 8;
	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);

	ata_for_each_sg(sg, qc) {
		u32 addr;
		u32 len;

		addr = (u32)sg_dma_address(sg);
		*(__le32 *)(buf + i) = cpu_to_le32(addr);
		i += 4;

		len = sg_dma_len(sg) >> 3;
		*(__le32 *)(buf + i) = cpu_to_le32(len);
		i += 4;

		if (ata_sg_is_last(sg, qc))
			pFLAGS |= pEND;
		buf[i++] = pFLAGS;
		buf[i++] = qc->dev->dma_mode & 0xf;
		buf[i++] = 0;	/* pPKLW */
		buf[i++] = 0;	/* reserved */

		*(__le32 *)(buf + i)
			= (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
		i += 4;

		VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem,
					(unsigned long)addr, len);
	}
	return i;
}

static void adma_qc_prep(struct ata_queued_cmd *qc)
{
	struct adma_port_priv *pp = qc->ap->private_data;
	u8  *buf = pp->pkt;
	u32 pkt_dma = (u32)pp->pkt_dma;
	int i = 0;

	VPRINTK("ENTER\n");

	adma_enter_reg_mode(qc->ap);
	if (qc->tf.protocol != ATA_PROT_DMA) {
		ata_qc_prep(qc);
		return;
	}

	buf[i++] = 0;	/* Response flags */
	buf[i++] = 0;	/* reserved */
	buf[i++] = cVLD | cDAT | cIEN;
	i++;		/* cLEN, gets filled in below */

	*(__le32 *)(buf+i) = cpu_to_le32(pkt_dma);	/* cNCPB */
	i += 4;		/* cNCPB */
	i += 4;		/* cPRD, gets filled in below */

	buf[i++] = 0;	/* reserved */
	buf[i++] = 0;	/* reserved */
	buf[i++] = 0;	/* reserved */
	buf[i++] = 0;	/* reserved */

	/* ATA registers; must be a multiple of 4 */
	buf[i++] = qc->tf.device;
	buf[i++] = ADMA_REGS_DEVICE;
	if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
		buf[i++] = qc->tf.hob_nsect;
		buf[i++] = ADMA_REGS_SECTOR_COUNT;
		buf[i++] = qc->tf.hob_lbal;
		buf[i++] = ADMA_REGS_LBA_LOW;
		buf[i++] = qc->tf.hob_lbam;
		buf[i++] = ADMA_REGS_LBA_MID;
		buf[i++] = qc->tf.hob_lbah;
		buf[i++] = ADMA_REGS_LBA_HIGH;
	}
	buf[i++] = qc->tf.nsect;
	buf[i++] = ADMA_REGS_SECTOR_COUNT;
	buf[i++] = qc->tf.lbal;
	buf[i++] = ADMA_REGS_LBA_LOW;
	buf[i++] = qc->tf.lbam;
	buf[i++] = ADMA_REGS_LBA_MID;
	buf[i++] = qc->tf.lbah;
	buf[i++] = ADMA_REGS_LBA_HIGH;
	buf[i++] = 0;
	buf[i++] = ADMA_REGS_CONTROL;
	buf[i++] = rIGN;
	buf[i++] = 0;
	buf[i++] = qc->tf.command;
	buf[i++] = ADMA_REGS_COMMAND | rEND;

	buf[3] = (i >> 3) - 2;				/* cLEN */
	*(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i);	/* cPRD */

	i = adma_fill_sg(qc);
	wmb();	/* flush PRDs and pkt to memory */
#if 0
	/* dump out CPB + PRDs for debug */
	{
		int j, len = 0;
		static char obuf[2048];
		for (j = 0; j < i; ++j) {
			len += sprintf(obuf+len, "%02x ", buf[j]);
			if ((j & 7) == 7) {
				printk("%s\n", obuf);
				len = 0;
			}
		}
		if (len)
			printk("%s\n", obuf);
	}
#endif
}

static inline void adma_packet_start(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);

	VPRINTK("ENTER, ap %p\n", ap);

	/* fire up the ADMA engine */
	writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
}

static int adma_qc_issue(struct ata_queued_cmd *qc)
{
	struct adma_port_priv *pp = qc->ap->private_data;

	switch (qc->tf.protocol) {
	case ATA_PROT_DMA:
		pp->state = adma_state_pkt;
		adma_packet_start(qc);
		return 0;

	case ATA_PROT_ATAPI_DMA:
		BUG();
		break;

	default:
		break;
	}

	pp->state = adma_state_mmio;
	return ata_qc_issue_prot(qc);
}

static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
{
	unsigned int handled = 0, port_no;
	u8 __iomem *mmio_base = host_set->mmio_base;

	for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
		struct ata_port *ap = host_set->ports[port_no];
		struct adma_port_priv *pp;
		struct ata_queued_cmd *qc;
		void __iomem *chan = ADMA_REGS(mmio_base, port_no);
		u8 status = readb(chan + ADMA_STATUS);

		if (status == 0)
			continue;
		handled = 1;
		adma_enter_reg_mode(ap);
		if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
			continue;
		pp = ap->private_data;
		if (!pp || pp->state != adma_state_pkt)
			continue;
		qc = ata_qc_from_tag(ap, ap->active_tag);
		if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
			if ((status & (aPERR | aPSD | aUIRQ)))
				qc->err_mask |= AC_ERR_OTHER;
			else if (pp->pkt[0] != cDONE)
				qc->err_mask |= AC_ERR_OTHER;

			ata_qc_complete(qc);
		}
	}
	return handled;
}

static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
{
	unsigned int handled = 0, port_no;

	for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
		struct ata_port *ap;
		ap = host_set->ports[port_no];
		if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) {
			struct ata_queued_cmd *qc;
			struct adma_port_priv *pp = ap->private_data;
			if (!pp || pp->state != adma_state_mmio)
				continue;
			qc = ata_qc_from_tag(ap, ap->active_tag);
			if (qc && (!(qc->tf.ctl & ATA_NIEN))) {

				/* check main status, clearing INTRQ */
				u8 status = ata_check_status(ap);
				if ((status & ATA_BUSY))
					continue;
				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
					ap->id, qc->tf.protocol, status);
		
				/* complete taskfile transaction */
				pp->state = adma_state_idle;
				qc->err_mask |= ac_err_mask(status);
				ata_qc_complete(qc);
				handled = 1;
			}
		}
	}
	return handled;
}

static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
	struct ata_host_set *host_set = dev_instance;
	unsigned int handled = 0;

	VPRINTK("ENTER\n");

	spin_lock(&host_set->lock);
	handled  = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
	spin_unlock(&host_set->lock);

	VPRINTK("EXIT\n");

	return IRQ_RETVAL(handled);
}

static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
{
	port->cmd_addr		=
	port->data_addr		= base + 0x000;
	port->error_addr	=
	port->feature_addr	= base + 0x004;
	port->nsect_addr	= base + 0x008;
	port->lbal_addr		= base + 0x00c;
	port->lbam_addr		= base + 0x010;
	port->lbah_addr		= base + 0x014;
	port->device_addr	= base + 0x018;
	port->status_addr	=
	port->command_addr	= base + 0x01c;
	port->altstatus_addr	=
	port->ctl_addr		= base + 0x038;
}

static int adma_port_start(struct ata_port *ap)
{
	struct device *dev = ap->host_set->dev;
	struct adma_port_priv *pp;
	int rc;

	rc = ata_port_start(ap);
	if (rc)
		return rc;
	adma_enter_reg_mode(ap);
	rc = -ENOMEM;
	pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
	if (!pp)
		goto err_out;
	pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
								GFP_KERNEL);
	if (!pp->pkt)
		goto err_out_kfree;
	/* paranoia? */
	if ((pp->pkt_dma & 7) != 0) {
		printk("bad alignment for pp->pkt_dma: %08x\n",
						(u32)pp->pkt_dma);
		dma_free_coherent(dev, ADMA_PKT_BYTES,
						pp->pkt, pp->pkt_dma);
		goto err_out_kfree;
	}
	memset(pp->pkt, 0, ADMA_PKT_BYTES);
	ap->private_data = pp;
	adma_reinit_engine(ap);
	return 0;

err_out_kfree:
	kfree(pp);
err_out:
	ata_port_stop(ap);
	return rc;
}

static void adma_port_stop(struct ata_port *ap)
{
	struct device *dev = ap->host_set->dev;
	struct adma_port_priv *pp = ap->private_data;

	adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
	if (pp != NULL) {
		ap->private_data = NULL;
		if (pp->pkt != NULL)
			dma_free_coherent(dev, ADMA_PKT_BYTES,
					pp->pkt, pp->pkt_dma);
		kfree(pp);
	}
	ata_port_stop(ap);
}

static void adma_host_stop(struct ata_host_set *host_set)
{
	unsigned int port_no;

	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
		adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));

	ata_pci_host_stop(host_set);
}

static void adma_host_init(unsigned int chip_id,
				struct ata_probe_ent *probe_ent)
{
	unsigned int port_no;
	void __iomem *mmio_base = probe_ent->mmio_base;

	/* enable/lock aGO operation */
	writeb(7, mmio_base + ADMA_MODE_LOCK);

	/* reset the ADMA logic */
	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
		adma_reset_engine(ADMA_REGS(mmio_base, port_no));
}

static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
	int rc;

	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev,
			"32-bit DMA enable failed\n");
		return rc;
	}
	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev,
			"32-bit consistent DMA enable failed\n");
		return rc;
	}
	return 0;
}

static int adma_ata_init_one(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	static int printed_version;
	struct ata_probe_ent *probe_ent = NULL;
	void __iomem *mmio_base;
	unsigned int board_idx = (unsigned int) ent->driver_data;
	int rc, port_no;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	rc = pci_enable_device(pdev);
	if (rc)
		return rc;

	rc = pci_request_regions(pdev, DRV_NAME);
	if (rc)
		goto err_out;

	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
		rc = -ENODEV;
		goto err_out_regions;
	}

	mmio_base = pci_iomap(pdev, 4, 0);
	if (mmio_base == NULL) {
		rc = -ENOMEM;
		goto err_out_regions;
	}

	rc = adma_set_dma_masks(pdev, mmio_base);
	if (rc)
		goto err_out_iounmap;

	probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
	if (probe_ent == NULL) {
		rc = -ENOMEM;
		goto err_out_iounmap;
	}

	probe_ent->dev = pci_dev_to_dev(pdev);
	INIT_LIST_HEAD(&probe_ent->node);

	probe_ent->sht		= adma_port_info[board_idx].sht;
	probe_ent->host_flags	= adma_port_info[board_idx].host_flags;
	probe_ent->pio_mask	= adma_port_info[board_idx].pio_mask;
	probe_ent->mwdma_mask	= adma_port_info[board_idx].mwdma_mask;
	probe_ent->udma_mask	= adma_port_info[board_idx].udma_mask;
	probe_ent->port_ops	= adma_port_info[board_idx].port_ops;

	probe_ent->irq		= pdev->irq;
	probe_ent->irq_flags	= SA_SHIRQ;
	probe_ent->mmio_base	= mmio_base;
	probe_ent->n_ports	= ADMA_PORTS;

	for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
		adma_ata_setup_port(&probe_ent->port[port_no],
			ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
	}

	pci_set_master(pdev);

	/* initialize adapter */
	adma_host_init(board_idx, probe_ent);

	rc = ata_device_add(probe_ent);
	kfree(probe_ent);
	if (rc != ADMA_PORTS)
		goto err_out_iounmap;
	return 0;

err_out_iounmap:
	pci_iounmap(pdev, mmio_base);
err_out_regions:
	pci_release_regions(pdev);
err_out:
	pci_disable_device(pdev);
	return rc;
}

static int __init adma_ata_init(void)
{
	return pci_module_init(&adma_ata_pci_driver);
}

static void __exit adma_ata_exit(void)
{
	pci_unregister_driver(&adma_ata_pci_driver);
}

MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);

module_init(adma_ata_init);
module_exit(adma_ata_exit);
a id='n3078' href='#n3078'>3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134
/******************************************************************************
 * This software may be used and distributed according to the terms of
 * the GNU General Public License (GPL), incorporated herein by reference.
 * Drivers based on or derived from this code fall under the GPL and must
 * retain the authorship, copyright and license notice.  This file is not
 * a complete program and may only be used when the entire operating
 * system is licensed under the GPL.
 * See the file COPYING in this distribution for more information.
 *
 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
 *                Virtualized Server Adapter.
 * Copyright(c) 2002-2010 Exar Corp.
 ******************************************************************************/
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>

#include "vxge-traffic.h"
#include "vxge-config.h"
#include "vxge-main.h"

#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {				\
	status = __vxge_hw_vpath_stats_access(vpath,			\
					      VXGE_HW_STATS_OP_READ,	\
					      offset,			\
					      &val64);			\
	if (status != VXGE_HW_OK)					\
		return status;						\
}

static void
vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
{
	u64 val64;

	val64 = readq(&vp_reg->rxmac_vcfg0);
	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
	writeq(val64, &vp_reg->rxmac_vcfg0);
	val64 = readq(&vp_reg->rxmac_vcfg0);
}

/*
 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
 */
int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
{
	struct vxge_hw_vpath_reg __iomem *vp_reg;
	struct __vxge_hw_virtualpath *vpath;
	u64 val64, rxd_count, rxd_spat;
	int count = 0, total_count = 0;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;

	vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);

	/* Check that the ring controller for this vpath has enough free RxDs
	 * to send frames to the host.  This is done by reading the
	 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
	 * RXD_SPAT value for the vpath.
	 */
	val64 = readq(&vp_reg->prc_cfg6);
	rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
	/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
	 * leg room.
	 */
	rxd_spat *= 2;

	do {
		mdelay(1);

		rxd_count = readq(&vp_reg->prc_rxd_doorbell);

		/* Check that the ring controller for this vpath does
		 * not have any frame in its pipeline.
		 */
		val64 = readq(&vp_reg->frm_in_progress_cnt);
		if ((rxd_count <= rxd_spat) || (val64 > 0))
			count = 0;
		else
			count++;
		total_count++;
	} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
			(total_count < VXGE_HW_MAX_POLLING_COUNT));

	if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
		printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
			__func__);

	return total_count;
}

/* vxge_hw_device_wait_receive_idle - This function waits until all frames
 * stored in the frame buffer for each vpath assigned to the given
 * function (hldev) have been sent to the host.
 */
void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
{
	int i, total_count = 0;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
			continue;

		total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
		if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
			break;
	}
}

/*
 * __vxge_hw_device_register_poll
 * Will poll certain register for specified amount of time.
 * Will poll until masked bit is not cleared.
 */
static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
	u64 val64;
	u32 i = 0;
	enum vxge_hw_status ret = VXGE_HW_FAIL;

	udelay(10);

	do {
		val64 = readq(reg);
		if (!(val64 & mask))
			return VXGE_HW_OK;
		udelay(100);
	} while (++i <= 9);

	i = 0;
	do {
		val64 = readq(reg);
		if (!(val64 & mask))
			return VXGE_HW_OK;
		mdelay(1);
	} while (++i <= max_millis);

	return ret;
}

static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
			  u64 mask, u32 max_millis)
{
	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
	wmb();
	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
	wmb();

	return __vxge_hw_device_register_poll(addr, mask, max_millis);
}

static enum vxge_hw_status
vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
		     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
		     u64 *steer_ctrl)
{
	struct vxge_hw_vpath_reg __iomem *vp_reg;
	enum vxge_hw_status status;
	u64 val64;
	u32 retry = 0, max_retry = 100;

	vp_reg = vpath->vp_reg;

	if (vpath->vp_open) {
		max_retry = 3;
		spin_lock(&vpath->lock);
	}

	writeq(*data0, &vp_reg->rts_access_steer_data0);
	writeq(*data1, &vp_reg->rts_access_steer_data1);
	wmb();

	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
		*steer_ctrl;

	status = __vxge_hw_pio_mem_write64(val64,
					   &vp_reg->rts_access_steer_ctrl,
					   VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
					   VXGE_HW_DEF_DEVICE_POLL_MILLIS);

	/* The __vxge_hw_device_register_poll can udelay for a significant
	 * amount of time, blocking other proccess from the CPU.  If it delays
	 * for ~5secs, a NMI error can occur.  A way around this is to give up
	 * the processor via msleep, but this is not allowed is under lock.
	 * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
	 * 1sec and sleep for 10ms until the firmware operation has completed
	 * or timed-out.
	 */
	while ((status != VXGE_HW_OK) && retry++ < max_retry) {
		if (!vpath->vp_open)
			msleep(20);
		status = __vxge_hw_device_register_poll(
					&vp_reg->rts_access_steer_ctrl,
					VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
					VXGE_HW_DEF_DEVICE_POLL_MILLIS);
	}

	if (status != VXGE_HW_OK)
		goto out;

	val64 = readq(&vp_reg->rts_access_steer_ctrl);
	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
		*data0 = readq(&vp_reg->rts_access_steer_data0);
		*data1 = readq(&vp_reg->rts_access_steer_data1);
		*steer_ctrl = val64;
	} else
		status = VXGE_HW_FAIL;

out:
	if (vpath->vp_open)
		spin_unlock(&vpath->lock);
	return status;
}

enum vxge_hw_status
vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
			     u32 *minor, u32 *build)
{
	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
	struct __vxge_hw_virtualpath *vpath;
	enum vxge_hw_status status;

	vpath = &hldev->virtual_paths[hldev->first_vp_id];

	status = vxge_hw_vpath_fw_api(vpath,
				      VXGE_HW_FW_UPGRADE_ACTION,
				      VXGE_HW_FW_UPGRADE_MEMO,
				      VXGE_HW_FW_UPGRADE_OFFSET_READ,
				      &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		return status;

	*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
	*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
	*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);

	return status;
}

enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
{
	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
	struct __vxge_hw_virtualpath *vpath;
	enum vxge_hw_status status;
	u32 ret;

	vpath = &hldev->virtual_paths[hldev->first_vp_id];

	status = vxge_hw_vpath_fw_api(vpath,
				      VXGE_HW_FW_UPGRADE_ACTION,
				      VXGE_HW_FW_UPGRADE_MEMO,
				      VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
				      &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK) {
		vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
		goto exit;
	}

	ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
	if (ret != 1) {
		vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
				__func__, ret);
		status = VXGE_HW_FAIL;
	}

exit:
	return status;
}

enum vxge_hw_status
vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
{
	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
	struct __vxge_hw_virtualpath *vpath;
	enum vxge_hw_status status;
	int ret_code, sec_code;

	vpath = &hldev->virtual_paths[hldev->first_vp_id];

	/* send upgrade start command */
	status = vxge_hw_vpath_fw_api(vpath,
				      VXGE_HW_FW_UPGRADE_ACTION,
				      VXGE_HW_FW_UPGRADE_MEMO,
				      VXGE_HW_FW_UPGRADE_OFFSET_START,
				      &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK) {
		vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
				__func__);
		return status;
	}

	/* Transfer fw image to adapter 16 bytes at a time */
	for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
		steer_ctrl = 0;

		/* The next 128bits of fwdata to be loaded onto the adapter */
		data0 = *((u64 *)fwdata);
		data1 = *((u64 *)fwdata + 1);

		status = vxge_hw_vpath_fw_api(vpath,
					      VXGE_HW_FW_UPGRADE_ACTION,
					      VXGE_HW_FW_UPGRADE_MEMO,
					      VXGE_HW_FW_UPGRADE_OFFSET_SEND,
					      &data0, &data1, &steer_ctrl);
		if (status != VXGE_HW_OK) {
			vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
					__func__);
			goto out;
		}

		ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
		switch (ret_code) {
		case VXGE_HW_FW_UPGRADE_OK:
			/* All OK, send next 16 bytes. */
			break;
		case VXGE_FW_UPGRADE_BYTES2SKIP:
			/* skip bytes in the stream */
			fwdata += (data0 >> 8) & 0xFFFFFFFF;
			break;
		case VXGE_HW_FW_UPGRADE_DONE:
			goto out;
		case VXGE_HW_FW_UPGRADE_ERR:
			sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
			switch (sec_code) {
			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
				printk(KERN_ERR
				       "corrupted data from .ncf file\n");
				break;
			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
				printk(KERN_ERR "invalid .ncf file\n");
				break;
			case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
				printk(KERN_ERR "buffer overflow\n");
				break;
			case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
				printk(KERN_ERR "failed to flash the image\n");
				break;
			case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
				printk(KERN_ERR
				       "generic error. Unknown error type\n");
				break;
			default:
				printk(KERN_ERR "Unknown error of type %d\n",
				       sec_code);
				break;
			}
			status = VXGE_HW_FAIL;
			goto out;
		default:
			printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
			status = VXGE_HW_FAIL;
			goto out;
		}
		/* point to next 16 bytes */
		fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
	}
out:
	return status;
}

enum vxge_hw_status
vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
				struct eprom_image *img)
{
	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
	struct __vxge_hw_virtualpath *vpath;
	enum vxge_hw_status status;
	int i;

	vpath = &hldev->virtual_paths[hldev->first_vp_id];

	for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
		data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
		data1 = steer_ctrl = 0;

		status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			VXGE_HW_FW_API_GET_EPROM_REV,
			0, &data0, &data1, &steer_ctrl);
		if (status != VXGE_HW_OK)
			break;

		img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
		img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
		img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
		img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
	}

	return status;
}

/*
 * __vxge_hw_channel_free - Free memory allocated for channel
 * This function deallocates memory from the channel and various arrays
 * in the channel
 */
static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
{
	kfree(channel->work_arr);
	kfree(channel->free_arr);
	kfree(channel->reserve_arr);
	kfree(channel->orig_arr);
	kfree(channel);
}

/*
 * __vxge_hw_channel_initialize - Initialize a channel
 * This function initializes a channel by properly setting the
 * various references
 */
static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
{
	u32 i;
	struct __vxge_hw_virtualpath *vpath;

	vpath = channel->vph->vpath;

	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
		for (i = 0; i < channel->length; i++)
			channel->orig_arr[i] = channel->reserve_arr[i];
	}

	switch (channel->type) {
	case VXGE_HW_CHANNEL_TYPE_FIFO:
		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
		channel->stats = &((struct __vxge_hw_fifo *)
				channel)->stats->common_stats;
		break;
	case VXGE_HW_CHANNEL_TYPE_RING:
		vpath->ringh = (struct __vxge_hw_ring *)channel;
		channel->stats = &((struct __vxge_hw_ring *)
				channel)->stats->common_stats;
		break;
	default:
		break;
	}

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_channel_reset - Resets a channel
 * This function resets a channel by properly setting the various references
 */
static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
{
	u32 i;

	for (i = 0; i < channel->length; i++) {
		if (channel->reserve_arr != NULL)
			channel->reserve_arr[i] = channel->orig_arr[i];
		if (channel->free_arr != NULL)
			channel->free_arr[i] = NULL;
		if (channel->work_arr != NULL)
			channel->work_arr[i] = NULL;
	}
	channel->free_ptr = channel->length;
	channel->reserve_ptr = channel->length;
	channel->reserve_top = 0;
	channel->post_index = 0;
	channel->compl_index = 0;

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_device_pci_e_init
 * Initialize certain PCI/PCI-X configuration registers
 * with recommended values. Save config space for future hw resets.
 */
static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
{
	u16 cmd = 0;

	/* Set the PErr Repconse bit and SERR in PCI command register. */
	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
	cmd |= 0x140;
	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);

	pci_save_state(hldev->pdev);
}

/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
 * in progress
 * This routine checks the vpath reset in progress register is turned zero
 */
static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
	enum vxge_hw_status status;
	status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
	return status;
}

/*
 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
 * Set the swapper bits appropriately for the lagacy section.
 */
static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	val64 = readq(&legacy_reg->toc_swapper_fb);

	wmb();

	switch (val64) {
	case VXGE_HW_SWAPPER_INITIAL_VALUE:
		return status;

	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
			&legacy_reg->pifm_rd_swap_en);
		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
			&legacy_reg->pifm_rd_flip_en);
		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
			&legacy_reg->pifm_wr_swap_en);
		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
			&legacy_reg->pifm_wr_flip_en);
		break;

	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
			&legacy_reg->pifm_rd_swap_en);
		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
			&legacy_reg->pifm_wr_swap_en);
		break;

	case VXGE_HW_SWAPPER_BIT_FLIPPED:
		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
			&legacy_reg->pifm_rd_flip_en);
		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
			&legacy_reg->pifm_wr_flip_en);
		break;
	}

	wmb();

	val64 = readq(&legacy_reg->toc_swapper_fb);

	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
		status = VXGE_HW_ERR_SWAPPER_CTRL;

	return status;
}

/*
 * __vxge_hw_device_toc_get
 * This routine sets the swapper and reads the toc pointer and returns the
 * memory mapped address of the toc
 */
static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
	u64 val64;
	struct vxge_hw_toc_reg __iomem *toc = NULL;
	enum vxge_hw_status status;

	struct vxge_hw_legacy_reg __iomem *legacy_reg =
		(struct vxge_hw_legacy_reg __iomem *)bar0;

	status = __vxge_hw_legacy_swapper_set(legacy_reg);
	if (status != VXGE_HW_OK)
		goto exit;

	val64 =	readq(&legacy_reg->toc_first_pointer);
	toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
exit:
	return toc;
}

/*
 * __vxge_hw_device_reg_addr_get
 * This routine sets the swapper and reads the toc pointer and initializes the
 * register location pointers in the device object. It waits until the ric is
 * completed initializing registers.
 */
static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
{
	u64 val64;
	u32 i;
	enum vxge_hw_status status = VXGE_HW_OK;

	hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;

	hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
	if (hldev->toc_reg  == NULL) {
		status = VXGE_HW_FAIL;
		goto exit;
	}

	val64 = readq(&hldev->toc_reg->toc_common_pointer);
	hldev->common_reg =
	(struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);

	val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
	hldev->mrpcim_reg =
		(struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);

	for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
		val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
		hldev->srpcim_reg[i] =
			(struct vxge_hw_srpcim_reg __iomem *)
				(hldev->bar0 + val64);
	}

	for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
		val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
		hldev->vpmgmt_reg[i] =
		(struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
	}

	for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
		val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
		hldev->vpath_reg[i] =
			(struct vxge_hw_vpath_reg __iomem *)
				(hldev->bar0 + val64);
	}

	val64 = readq(&hldev->toc_reg->toc_kdfc);

	switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
	case 0:
		hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
			VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
		break;
	default:
		break;
	}

	status = __vxge_hw_device_vpath_reset_in_prog_check(
			(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
exit:
	return status;
}

/*
 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
 * This routine returns the Access Rights of the driver
 */
static u32
__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
{
	u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;

	switch (host_type) {
	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
		if (func_id == 0) {
			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
		}
		break;
	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
		break;
	case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
		break;
	case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
	case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
	case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
		break;
	case VXGE_HW_SR_VH_FUNCTION0:
	case VXGE_HW_VH_NORMAL_FUNCTION:
		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
		break;
	}

	return access_rights;
}
/*
 * __vxge_hw_device_is_privilaged
 * This routine checks if the device function is privilaged or not
 */

enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
{
	if (__vxge_hw_device_access_rights_get(host_type,
		func_id) &
		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
		return VXGE_HW_OK;
	else
		return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
}

/*
 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
 * Returns the function number of the vpath.
 */
static u32
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
	u64 val64;

	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);

	return
	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
}

/*
 * __vxge_hw_device_host_info_get
 * This routine returns the host type assignments
 */
static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
{
	u64 val64;
	u32 i;

	val64 = readq(&hldev->common_reg->host_type_assignments);

	hldev->host_type =
	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);

	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
			continue;

		hldev->func_id =
			__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);

		hldev->access_rights = __vxge_hw_device_access_rights_get(
			hldev->host_type, hldev->func_id);

		hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
		hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];

		hldev->first_vp_id = i;
		break;
	}
}

/*
 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
 * link width and signalling rate.
 */
static enum vxge_hw_status
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
{
	int exp_cap;
	u16 lnk;

	/* Get the negotiated link width and speed from PCI config space */
	exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
	pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);

	if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
		return VXGE_HW_ERR_INVALID_PCI_INFO;

	switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
	case PCIE_LNK_WIDTH_RESRV:
	case PCIE_LNK_X1:
	case PCIE_LNK_X2:
	case PCIE_LNK_X4:
	case PCIE_LNK_X8:
		break;
	default:
		return VXGE_HW_ERR_INVALID_PCI_INFO;
	}

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_device_initialize
 * Initialize Titan-V hardware.
 */
static enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
	enum vxge_hw_status status = VXGE_HW_OK;

	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
				hldev->func_id)) {
		/* Validate the pci-e link width and speed */
		status = __vxge_hw_verify_pci_e_info(hldev);
		if (status != VXGE_HW_OK)
			goto exit;
	}

exit:
	return status;
}

/*
 * __vxge_hw_vpath_fw_ver_get - Get the fw version
 * Returns FW Version
 */
static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
			   struct vxge_hw_device_hw_info *hw_info)
{
	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
	u64 data0, data1 = 0, steer_ctrl = 0;
	enum vxge_hw_status status;

	status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		goto exit;

	fw_date->day =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
	fw_date->month =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
	fw_date->year =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);

	snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
		 fw_date->month, fw_date->day, fw_date->year);

	fw_version->major =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
	fw_version->minor =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
	fw_version->build =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);

	snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
		 fw_version->major, fw_version->minor, fw_version->build);

	flash_date->day =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
	flash_date->month =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
	flash_date->year =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);

	snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
		 flash_date->month, flash_date->day, flash_date->year);

	flash_version->major =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
	flash_version->minor =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
	flash_version->build =
	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);

	snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
		 flash_version->major, flash_version->minor,
		 flash_version->build);

exit:
	return status;
}

/*
 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
 * part number and product description.
 */
static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
			      struct vxge_hw_device_hw_info *hw_info)
{
	enum vxge_hw_status status;
	u64 data0, data1 = 0, steer_ctrl = 0;
	u8 *serial_number = hw_info->serial_number;
	u8 *part_number = hw_info->part_number;
	u8 *product_desc = hw_info->product_desc;
	u32 i, j = 0;

	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;

	status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		return status;

	((u64 *)serial_number)[0] = be64_to_cpu(data0);
	((u64 *)serial_number)[1] = be64_to_cpu(data1);

	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
	data1 = steer_ctrl = 0;

	status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		return status;

	((u64 *)part_number)[0] = be64_to_cpu(data0);
	((u64 *)part_number)[1] = be64_to_cpu(data1);

	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
		data0 = i;
		data1 = steer_ctrl = 0;

		status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
		if (status != VXGE_HW_OK)
			return status;

		((u64 *)product_desc)[j++] = be64_to_cpu(data0);
		((u64 *)product_desc)[j++] = be64_to_cpu(data1);
	}

	return status;
}

/*
 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
 * Returns pci function mode
 */
static enum vxge_hw_status
__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
				  struct vxge_hw_device_hw_info *hw_info)
{
	u64 data0, data1 = 0, steer_ctrl = 0;
	enum vxge_hw_status status;

	data0 = 0;

	status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_FW_API_GET_FUNC_MODE,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		return status;

	hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
	return status;
}

/*
 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
 *               from MAC address table.
 */
static enum vxge_hw_status
__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
			 u8 *macaddr, u8 *macaddr_mask)
{
	u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
	    data0 = 0, data1 = 0, steer_ctrl = 0;
	enum vxge_hw_status status;
	int i;

	do {
		status = vxge_hw_vpath_fw_api(vpath, action,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
			0, &data0, &data1, &steer_ctrl);
		if (status != VXGE_HW_OK)
			goto exit;

		data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
									data1);

		for (i = ETH_ALEN; i > 0; i--) {
			macaddr[i - 1] = (u8) (data0 & 0xFF);
			data0 >>= 8;

			macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
			data1 >>= 8;
		}

		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
		data0 = 0, data1 = 0, steer_ctrl = 0;

	} while (!is_valid_ether_addr(macaddr));
exit:
	return status;
}

/**
 * vxge_hw_device_hw_info_get - Get the hw information
 * Returns the vpath mask that has the bits set for each vpath allocated
 * for the driver, FW version information and the first mac addresse for
 * each vpath
 */
enum vxge_hw_status __devinit
vxge_hw_device_hw_info_get(void __iomem *bar0,
			   struct vxge_hw_device_hw_info *hw_info)
{
	u32 i;
	u64 val64;
	struct vxge_hw_toc_reg __iomem *toc;
	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
	struct vxge_hw_common_reg __iomem *common_reg;
	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
	enum vxge_hw_status status;
	struct __vxge_hw_virtualpath vpath;

	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));

	toc = __vxge_hw_device_toc_get(bar0);
	if (toc == NULL) {
		status = VXGE_HW_ERR_CRITICAL;
		goto exit;
	}

	val64 = readq(&toc->toc_common_pointer);
	common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);

	status = __vxge_hw_device_vpath_reset_in_prog_check(
		(u64 __iomem *)&common_reg->vpath_rst_in_prog);
	if (status != VXGE_HW_OK)
		goto exit;

	hw_info->vpath_mask = readq(&common_reg->vpath_assignments);

	val64 = readq(&common_reg->host_type_assignments);

	hw_info->host_type =
	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
			continue;

		val64 = readq(&toc->toc_vpmgmt_pointer[i]);

		vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
				(bar0 + val64);

		hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
			hw_info->func_id) &
			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {

			val64 = readq(&toc->toc_mrpcim_pointer);

			mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
					(bar0 + val64);

			writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
			wmb();
		}

		val64 = readq(&toc->toc_vpath_pointer[i]);

		vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
			       (bar0 + val64);
		vpath.vp_open = 0;

		status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
		if (status != VXGE_HW_OK)
			goto exit;

		status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
		if (status != VXGE_HW_OK)
			goto exit;

		status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
		if (status != VXGE_HW_OK)
			goto exit;

		break;
	}

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
			continue;

		val64 = readq(&toc->toc_vpath_pointer[i]);
		vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
			       (bar0 + val64);
		vpath.vp_open = 0;

		status =  __vxge_hw_vpath_addr_get(&vpath,
				hw_info->mac_addrs[i],
				hw_info->mac_addr_masks[i]);
		if (status != VXGE_HW_OK)
			goto exit;
	}
exit:
	return status;
}

/*
 * __vxge_hw_blockpool_destroy - Deallocates the block pool
 */
static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
	struct __vxge_hw_device *hldev;
	struct list_head *p, *n;
	u16 ret;

	if (blockpool == NULL) {
		ret = 1;
		goto exit;
	}

	hldev = blockpool->hldev;

	list_for_each_safe(p, n, &blockpool->free_block_list) {
		pci_unmap_single(hldev->pdev,
			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
			((struct __vxge_hw_blockpool_entry *)p)->length,
			PCI_DMA_BIDIRECTIONAL);

		vxge_os_dma_free(hldev->pdev,
			((struct __vxge_hw_blockpool_entry *)p)->memblock,
			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);

		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
		kfree(p);
		blockpool->pool_size--;
	}

	list_for_each_safe(p, n, &blockpool->free_entry_list) {
		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
		kfree((void *)p);
	}
	ret = 0;
exit:
	return;
}

/*
 * __vxge_hw_blockpool_create - Create block pool
 */
static enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
			   struct __vxge_hw_blockpool *blockpool,
			   u32 pool_size,
			   u32 pool_max)
{
	u32 i;
	struct __vxge_hw_blockpool_entry *entry = NULL;
	void *memblock;
	dma_addr_t dma_addr;
	struct pci_dev *dma_handle;
	struct pci_dev *acc_handle;
	enum vxge_hw_status status = VXGE_HW_OK;

	if (blockpool == NULL) {
		status = VXGE_HW_FAIL;
		goto blockpool_create_exit;
	}

	blockpool->hldev = hldev;
	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
	blockpool->pool_size = 0;
	blockpool->pool_max = pool_max;
	blockpool->req_out = 0;

	INIT_LIST_HEAD(&blockpool->free_block_list);
	INIT_LIST_HEAD(&blockpool->free_entry_list);

	for (i = 0; i < pool_size + pool_max; i++) {
		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
				GFP_KERNEL);
		if (entry == NULL) {
			__vxge_hw_blockpool_destroy(blockpool);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto blockpool_create_exit;
		}
		list_add(&entry->item, &blockpool->free_entry_list);
	}

	for (i = 0; i < pool_size; i++) {
		memblock = vxge_os_dma_malloc(
				hldev->pdev,
				VXGE_HW_BLOCK_SIZE,
				&dma_handle,
				&acc_handle);
		if (memblock == NULL) {
			__vxge_hw_blockpool_destroy(blockpool);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto blockpool_create_exit;
		}

		dma_addr = pci_map_single(hldev->pdev, memblock,
				VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
		if (unlikely(pci_dma_mapping_error(hldev->pdev,
				dma_addr))) {
			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
			__vxge_hw_blockpool_destroy(blockpool);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto blockpool_create_exit;
		}

		if (!list_empty(&blockpool->free_entry_list))
			entry = (struct __vxge_hw_blockpool_entry *)
				list_first_entry(&blockpool->free_entry_list,
					struct __vxge_hw_blockpool_entry,
					item);

		if (entry == NULL)
			entry =
			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
					GFP_KERNEL);
		if (entry != NULL) {
			list_del(&entry->item);
			entry->length = VXGE_HW_BLOCK_SIZE;
			entry->memblock = memblock;
			entry->dma_addr = dma_addr;
			entry->acc_handle = acc_handle;
			entry->dma_handle = dma_handle;
			list_add(&entry->item,
					  &blockpool->free_block_list);
			blockpool->pool_size++;
		} else {
			__vxge_hw_blockpool_destroy(blockpool);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto blockpool_create_exit;
		}
	}

blockpool_create_exit:
	return status;
}

/*
 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
 * Check the fifo configuration
 */
static enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
{
	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
	    (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
		return VXGE_HW_BADCFG_FIFO_BLOCKS;

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
 * Check the vpath configuration
 */
static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
	enum vxge_hw_status status;

	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
	    (vp_config->min_bandwidth >	VXGE_HW_VPATH_BANDWIDTH_MAX))
		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;

	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
	if (status != VXGE_HW_OK)
		return status;

	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
		return VXGE_HW_BADCFG_VPATH_MTU;

	if ((vp_config->rpa_strip_vlan_tag !=
		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
		(vp_config->rpa_strip_vlan_tag !=
		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
		(vp_config->rpa_strip_vlan_tag !=
		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_device_config_check - Check device configuration.
 * Check the device configuration
 */
static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
{
	u32 i;
	enum vxge_hw_status status;

	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
	    (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
		return VXGE_HW_BADCFG_INTR_MODE;

	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
	    (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
		return VXGE_HW_BADCFG_RTS_MAC_EN;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		status = __vxge_hw_device_vpath_config_check(
				&new_config->vp_config[i]);
		if (status != VXGE_HW_OK)
			return status;
	}

	return VXGE_HW_OK;
}

/*
 * vxge_hw_device_initialize - Initialize Titan device.
 * Initialize Titan device. Note that all the arguments of this public API
 * are 'IN', including @hldev. Driver cooperates with
 * OS to find new Titan device, locate its PCI and memory spaces.
 *
 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
 * to enable the latter to perform Titan hardware initialization.
 */
enum vxge_hw_status __devinit
vxge_hw_device_initialize(
	struct __vxge_hw_device **devh,
	struct vxge_hw_device_attr *attr,
	struct vxge_hw_device_config *device_config)
{
	u32 i;
	u32 nblocks = 0;
	struct __vxge_hw_device *hldev = NULL;
	enum vxge_hw_status status = VXGE_HW_OK;

	status = __vxge_hw_device_config_check(device_config);
	if (status != VXGE_HW_OK)
		goto exit;

	hldev = vzalloc(sizeof(struct __vxge_hw_device));
	if (hldev == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	hldev->magic = VXGE_HW_DEVICE_MAGIC;

	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);

	/* apply config */
	memcpy(&hldev->config, device_config,
		sizeof(struct vxge_hw_device_config));

	hldev->bar0 = attr->bar0;
	hldev->pdev = attr->pdev;

	hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
	hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
	hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;

	__vxge_hw_device_pci_e_init(hldev);

	status = __vxge_hw_device_reg_addr_get(hldev);
	if (status != VXGE_HW_OK) {
		vfree(hldev);
		goto exit;
	}

	__vxge_hw_device_host_info_get(hldev);

	/* Incrementing for stats blocks */
	nblocks++;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
			continue;

		if (device_config->vp_config[i].ring.enable ==
			VXGE_HW_RING_ENABLE)
			nblocks += device_config->vp_config[i].ring.ring_blocks;

		if (device_config->vp_config[i].fifo.enable ==
			VXGE_HW_FIFO_ENABLE)
			nblocks += device_config->vp_config[i].fifo.fifo_blocks;
		nblocks++;
	}

	if (__vxge_hw_blockpool_create(hldev,
		&hldev->block_pool,
		device_config->dma_blockpool_initial + nblocks,
		device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {

		vxge_hw_device_terminate(hldev);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	status = __vxge_hw_device_initialize(hldev);
	if (status != VXGE_HW_OK) {
		vxge_hw_device_terminate(hldev);
		goto exit;
	}

	*devh = hldev;
exit:
	return status;
}

/*
 * vxge_hw_device_terminate - Terminate Titan device.
 * Terminate HW device.
 */
void
vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
{
	vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);

	hldev->magic = VXGE_HW_DEVICE_DEAD;
	__vxge_hw_blockpool_destroy(&hldev->block_pool);
	vfree(hldev);
}

/*
 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
 *                           and offset and perform an operation
 */
static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
			     u32 operation, u32 offset, u64 *stat)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto vpath_stats_access_exit;
	}

	vp_reg = vpath->vp_reg;

	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);

	status = __vxge_hw_pio_mem_write64(val64,
				&vp_reg->xmac_stats_access_cmd,
				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
				vpath->hldev->config.device_poll_millis);
	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
		*stat = readq(&vp_reg->xmac_stats_access_data);
	else
		*stat = 0;

vpath_stats_access_exit:
	return status;
}

/*
 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
 */
static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
			struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
{
	u64 *val64;
	int i;
	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
	enum vxge_hw_status status = VXGE_HW_OK;

	val64 = (u64 *)vpath_tx_stats;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}

	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
		status = __vxge_hw_vpath_stats_access(vpath,
					VXGE_HW_STATS_OP_READ,
					offset, val64);
		if (status != VXGE_HW_OK)
			goto exit;
		offset++;
		val64++;
	}
exit:
	return status;
}

/*
 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
 */
static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
			struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
	u64 *val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	int i;
	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
	val64 = (u64 *) vpath_rx_stats;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}
	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
		status = __vxge_hw_vpath_stats_access(vpath,
					VXGE_HW_STATS_OP_READ,
					offset >> 3, val64);
		if (status != VXGE_HW_OK)
			goto exit;

		offset += 8;
		val64++;
	}
exit:
	return status;
}

/*
 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
 */
static enum vxge_hw_status
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
			  struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}
	vp_reg = vpath->vp_reg;

	val64 = readq(&vp_reg->vpath_debug_stats0);
	hw_stats->ini_num_mwr_sent =
		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);

	val64 = readq(&vp_reg->vpath_debug_stats1);
	hw_stats->ini_num_mrd_sent =
		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);

	val64 = readq(&vp_reg->vpath_debug_stats2);
	hw_stats->ini_num_cpl_rcvd =
		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);

	val64 = readq(&vp_reg->vpath_debug_stats3);
	hw_stats->ini_num_mwr_byte_sent =
		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);

	val64 = readq(&vp_reg->vpath_debug_stats4);
	hw_stats->ini_num_cpl_byte_rcvd =
		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);

	val64 = readq(&vp_reg->vpath_debug_stats5);
	hw_stats->wrcrdtarb_xoff =
		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);

	val64 = readq(&vp_reg->vpath_debug_stats6);
	hw_stats->rdcrdtarb_xoff =
		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);

	val64 = readq(&vp_reg->vpath_genstats_count01);
	hw_stats->vpath_genstats_count0 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
		val64);

	val64 = readq(&vp_reg->vpath_genstats_count01);
	hw_stats->vpath_genstats_count1 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
		val64);

	val64 = readq(&vp_reg->vpath_genstats_count23);
	hw_stats->vpath_genstats_count2 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
		val64);

	val64 = readq(&vp_reg->vpath_genstats_count01);
	hw_stats->vpath_genstats_count3 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
		val64);

	val64 = readq(&vp_reg->vpath_genstats_count4);
	hw_stats->vpath_genstats_count4 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
		val64);

	val64 = readq(&vp_reg->vpath_genstats_count5);
	hw_stats->vpath_genstats_count5 =
	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
		val64);

	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
	if (status != VXGE_HW_OK)
		goto exit;

	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
	if (status != VXGE_HW_OK)
		goto exit;

	VXGE_HW_VPATH_STATS_PIO_READ(
		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);

	hw_stats->prog_event_vnum0 =
			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);

	hw_stats->prog_event_vnum1 =
			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);

	VXGE_HW_VPATH_STATS_PIO_READ(
		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);

	hw_stats->prog_event_vnum2 =
			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);

	hw_stats->prog_event_vnum3 =
			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);

	val64 = readq(&vp_reg->rx_multi_cast_stats);
	hw_stats->rx_multi_cast_frame_discard =
		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);

	val64 = readq(&vp_reg->rx_frm_transferred);
	hw_stats->rx_frm_transferred =
		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);

	val64 = readq(&vp_reg->rxd_returned);
	hw_stats->rxd_returned =
		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);

	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
	hw_stats->rx_mpa_len_fail_frms =
		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
	hw_stats->rx_mpa_mrk_fail_frms =
		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
	hw_stats->rx_mpa_crc_fail_frms =
		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);

	val64 = readq(&vp_reg->dbg_stats_rx_fau);
	hw_stats->rx_permitted_frms =
		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
	hw_stats->rx_vp_reset_discarded_frms =
	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
	hw_stats->rx_wol_frms =
		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);

	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
	hw_stats->tx_vp_reset_discarded_frms =
	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
		val64);
exit:
	return status;
}

/*
 * vxge_hw_device_stats_get - Get the device hw statistics.
 * Returns the vpath h/w stats for the device.
 */
enum vxge_hw_status
vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
			struct vxge_hw_device_stats_hw_info *hw_stats)
{
	u32 i;
	enum vxge_hw_status status = VXGE_HW_OK;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
			(hldev->virtual_paths[i].vp_open ==
				VXGE_HW_VP_NOT_OPEN))
			continue;

		memcpy(hldev->virtual_paths[i].hw_stats_sav,
				hldev->virtual_paths[i].hw_stats,
				sizeof(struct vxge_hw_vpath_stats_hw_info));

		status = __vxge_hw_vpath_stats_get(
			&hldev->virtual_paths[i],
			hldev->virtual_paths[i].hw_stats);
	}

	memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
			sizeof(struct vxge_hw_device_stats_hw_info));

	return status;
}

/*
 * vxge_hw_driver_stats_get - Get the device sw statistics.
 * Returns the vpath s/w stats for the device.
 */
enum vxge_hw_status vxge_hw_driver_stats_get(
			struct __vxge_hw_device *hldev,
			struct vxge_hw_device_stats_sw_info *sw_stats)
{
	enum vxge_hw_status status = VXGE_HW_OK;

	memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
		sizeof(struct vxge_hw_device_stats_sw_info));

	return status;
}

/*
 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
 *                           and offset and perform an operation
 * Get the statistics from the given location and offset.
 */
enum vxge_hw_status
vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
			    u32 operation, u32 location, u32 offset, u64 *stat)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	status = __vxge_hw_device_is_privilaged(hldev->host_type,
			hldev->func_id);
	if (status != VXGE_HW_OK)
		goto exit;

	val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
		VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
		VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
		VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);

	status = __vxge_hw_pio_mem_write64(val64,
				&hldev->mrpcim_reg->xmac_stats_sys_cmd,
				VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
				hldev->config.device_poll_millis);

	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
		*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
	else
		*stat = 0;
exit:
	return status;
}

/*
 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
 * Get the Statistics on aggregate port
 */
static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
				   struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
	u64 *val64;
	int i;
	u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
	enum vxge_hw_status status = VXGE_HW_OK;

	val64 = (u64 *)aggr_stats;

	status = __vxge_hw_device_is_privilaged(hldev->host_type,
			hldev->func_id);
	if (status != VXGE_HW_OK)
		goto exit;

	for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
		status = vxge_hw_mrpcim_stats_access(hldev,
					VXGE_HW_STATS_OP_READ,
					VXGE_HW_STATS_LOC_AGGR,
					((offset + (104 * port)) >> 3), val64);
		if (status != VXGE_HW_OK)
			goto exit;

		offset += 8;
		val64++;
	}
exit:
	return status;
}

/*
 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
 * Get the Statistics on port
 */
static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
				   struct vxge_hw_xmac_port_stats *port_stats)
{
	u64 *val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	int i;
	u32 offset = 0x0;
	val64 = (u64 *) port_stats;

	status = __vxge_hw_device_is_privilaged(hldev->host_type,
			hldev->func_id);
	if (status != VXGE_HW_OK)
		goto exit;

	for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
		status = vxge_hw_mrpcim_stats_access(hldev,
					VXGE_HW_STATS_OP_READ,
					VXGE_HW_STATS_LOC_AGGR,
					((offset + (608 * port)) >> 3), val64);
		if (status != VXGE_HW_OK)
			goto exit;

		offset += 8;
		val64++;
	}

exit:
	return status;
}

/*
 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
 * Get the XMAC Statistics
 */
enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
			      struct vxge_hw_xmac_stats *xmac_stats)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	u32 i;

	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
					0, &xmac_stats->aggr_stats[0]);
	if (status != VXGE_HW_OK)
		goto exit;

	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
				1, &xmac_stats->aggr_stats[1]);
	if (status != VXGE_HW_OK)
		goto exit;

	for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {

		status = vxge_hw_device_xmac_port_stats_get(hldev,
					i, &xmac_stats->port_stats[i]);
		if (status != VXGE_HW_OK)
			goto exit;
	}

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {

		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
			continue;

		status = __vxge_hw_vpath_xmac_tx_stats_get(
					&hldev->virtual_paths[i],
					&xmac_stats->vpath_tx_stats[i]);
		if (status != VXGE_HW_OK)
			goto exit;

		status = __vxge_hw_vpath_xmac_rx_stats_get(
					&hldev->virtual_paths[i],
					&xmac_stats->vpath_rx_stats[i]);
		if (status != VXGE_HW_OK)
			goto exit;
	}
exit:
	return status;
}

/*
 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
 * This routine is used to dynamically change the debug output
 */
void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
			      enum vxge_debug_level level, u32 mask)
{
	if (hldev == NULL)
		return;

#if defined(VXGE_DEBUG_TRACE_MASK) || \
	defined(VXGE_DEBUG_ERR_MASK)
	hldev->debug_module_mask = mask;
	hldev->debug_level = level;
#endif

#if defined(VXGE_DEBUG_ERR_MASK)
	hldev->level_err = level & VXGE_ERR;
#endif

#if defined(VXGE_DEBUG_TRACE_MASK)
	hldev->level_trace = level & VXGE_TRACE;
#endif
}

/*
 * vxge_hw_device_error_level_get - Get the error level
 * This routine returns the current error level set
 */
u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
{
#if defined(VXGE_DEBUG_ERR_MASK)
	if (hldev == NULL)
		return VXGE_ERR;
	else
		return hldev->level_err;
#else
	return 0;
#endif
}

/*
 * vxge_hw_device_trace_level_get - Get the trace level
 * This routine returns the current trace level set
 */
u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
{
#if defined(VXGE_DEBUG_TRACE_MASK)
	if (hldev == NULL)
		return VXGE_TRACE;
	else
		return hldev->level_trace;
#else
	return 0;
#endif
}

/*
 * vxge_hw_getpause_data -Pause frame frame generation and reception.
 * Returns the Pause frame generation and reception capability of the NIC.
 */
enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
						 u32 port, u32 *tx, u32 *rx)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
		status = VXGE_HW_ERR_INVALID_DEVICE;
		goto exit;
	}

	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
		status = VXGE_HW_ERR_INVALID_PORT;
		goto exit;
	}

	if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
		status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
		goto exit;
	}

	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
		*tx = 1;
	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
		*rx = 1;
exit:
	return status;
}

/*
 * vxge_hw_device_setpause_data -  set/reset pause frame generation.
 * It can be used to set or reset Pause frame generation or reception
 * support of the NIC.
 */
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
						 u32 port, u32 tx, u32 rx)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
		status = VXGE_HW_ERR_INVALID_DEVICE;
		goto exit;
	}

	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
		status = VXGE_HW_ERR_INVALID_PORT;
		goto exit;
	}

	status = __vxge_hw_device_is_privilaged(hldev->host_type,
			hldev->func_id);
	if (status != VXGE_HW_OK)
		goto exit;

	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
	if (tx)
		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
	else
		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
	if (rx)
		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
	else
		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;

	writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
exit:
	return status;
}

u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
{
	int link_width, exp_cap;
	u16 lnk;

	exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
	pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
	link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
	return link_width;
}

/*
 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
 * This function returns the index of memory block
 */
static inline u32
__vxge_hw_ring_block_memblock_idx(u8 *block)
{
	return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
}

/*
 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
 * This function sets index to a memory block
 */
static inline void
__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
{
	*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
}

/*
 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
 * in RxD block
 * Sets the next block pointer in RxD block
 */
static inline void
__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
{
	*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
}

/*
 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
 *             first block
 * Returns the dma address of the first RxD block
 */
static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
	struct vxge_hw_mempool_dma *dma_object;

	dma_object = ring->mempool->memblocks_dma_arr;
	vxge_assert(dma_object != NULL);

	return dma_object->addr;
}

/*
 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
 * This function returns the dma address of a given item
 */
static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
					       void *item)
{
	u32 memblock_idx;
	void *memblock;
	struct vxge_hw_mempool_dma *memblock_dma_object;
	ptrdiff_t dma_item_offset;

	/* get owner memblock index */
	memblock_idx = __vxge_hw_ring_block_memblock_idx(item);

	/* get owner memblock by memblock index */
	memblock = mempoolh->memblocks_arr[memblock_idx];

	/* get memblock DMA object by memblock index */
	memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;

	/* calculate offset in the memblock of this item */
	dma_item_offset = (u8 *)item - (u8 *)memblock;

	return memblock_dma_object->addr + dma_item_offset;
}

/*
 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
 * This function returns the dma address of a given item
 */
static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
					 struct __vxge_hw_ring *ring, u32 from,
					 u32 to)
{
	u8 *to_item , *from_item;
	dma_addr_t to_dma;

	/* get "from" RxD block */
	from_item = mempoolh->items_arr[from];
	vxge_assert(from_item);

	/* get "to" RxD block */
	to_item = mempoolh->items_arr[to];
	vxge_assert(to_item);

	/* return address of the beginning of previous RxD block */
	to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);

	/* set next pointer for this RxD block to point on
	 * previous item's DMA start address */
	__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
}

/*
 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
 * block callback
 * This function is callback passed to __vxge_hw_mempool_create to create memory
 * pool for RxD block
 */
static void
__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
				  u32 memblock_index,
				  struct vxge_hw_mempool_dma *dma_object,
				  u32 index, u32 is_last)
{
	u32 i;
	void *item = mempoolh->items_arr[index];
	struct __vxge_hw_ring *ring =
		(struct __vxge_hw_ring *)mempoolh->userdata;

	/* format rxds array */
	for (i = 0; i < ring->rxds_per_block; i++) {
		void *rxdblock_priv;
		void *uld_priv;
		struct vxge_hw_ring_rxd_1 *rxdp;

		u32 reserve_index = ring->channel.reserve_ptr -
				(index * ring->rxds_per_block + i + 1);
		u32 memblock_item_idx;

		ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
						i * ring->rxd_size;

		/* Note: memblock_item_idx is index of the item within
		 *       the memblock. For instance, in case of three RxD-blocks
		 *       per memblock this value can be 0, 1 or 2. */
		rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
					memblock_index, item,
					&memblock_item_idx);

		rxdp = (struct vxge_hw_ring_rxd_1 *)
				ring->channel.reserve_arr[reserve_index];

		uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);

		/* pre-format Host_Control */
		rxdp->host_control = (u64)(size_t)uld_priv;
	}

	__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);

	if (is_last) {
		/* link last one with first one */
		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
	}

	if (index > 0) {
		/* link this RxD block with previous one */
		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
	}
}

/*
 * __vxge_hw_ring_replenish - Initial replenish of RxDs
 * This function replenishes the RxDs from reserve array to work array
 */
enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
	void *rxd;
	struct __vxge_hw_channel *channel;
	enum vxge_hw_status status = VXGE_HW_OK;

	channel = &ring->channel;

	while (vxge_hw_channel_dtr_count(channel) > 0) {

		status = vxge_hw_ring_rxd_reserve(ring, &rxd);

		vxge_assert(status == VXGE_HW_OK);

		if (ring->rxd_init) {
			status = ring->rxd_init(rxd, channel->userdata);
			if (status != VXGE_HW_OK) {
				vxge_hw_ring_rxd_free(ring, rxd);
				goto exit;
			}
		}

		vxge_hw_ring_rxd_post(ring, rxd);
	}
	status = VXGE_HW_OK;
exit:
	return status;
}

/*
 * __vxge_hw_channel_allocate - Allocate memory for channel
 * This function allocates required memory for the channel and various arrays
 * in the channel
 */
static struct __vxge_hw_channel *
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
			   enum __vxge_hw_channel_type type,
			   u32 length, u32 per_dtr_space,
			   void *userdata)
{
	struct __vxge_hw_channel *channel;
	struct __vxge_hw_device *hldev;
	int size = 0;
	u32 vp_id;

	hldev = vph->vpath->hldev;
	vp_id = vph->vpath->vp_id;

	switch (type) {
	case VXGE_HW_CHANNEL_TYPE_FIFO:
		size = sizeof(struct __vxge_hw_fifo);
		break;
	case VXGE_HW_CHANNEL_TYPE_RING:
		size = sizeof(struct __vxge_hw_ring);
		break;
	default:
		break;
	}

	channel = kzalloc(size, GFP_KERNEL);
	if (channel == NULL)
		goto exit0;
	INIT_LIST_HEAD(&channel->item);

	channel->common_reg = hldev->common_reg;
	channel->first_vp_id = hldev->first_vp_id;
	channel->type = type;
	channel->devh = hldev;
	channel->vph = vph;
	channel->userdata = userdata;
	channel->per_dtr_space = per_dtr_space;
	channel->length = length;
	channel->vp_id = vp_id;

	channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
	if (channel->work_arr == NULL)
		goto exit1;

	channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
	if (channel->free_arr == NULL)
		goto exit1;
	channel->free_ptr = length;

	channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
	if (channel->reserve_arr == NULL)
		goto exit1;
	channel->reserve_ptr = length;
	channel->reserve_top = 0;

	channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
	if (channel->orig_arr == NULL)
		goto exit1;

	return channel;
exit1:
	__vxge_hw_channel_free(channel);

exit0:
	return NULL;
}

/*
 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
 * Adds a block to block pool
 */
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
					void *block_addr,
					u32 length,
					struct pci_dev *dma_h,
					struct pci_dev *acc_handle)
{
	struct __vxge_hw_blockpool *blockpool;
	struct __vxge_hw_blockpool_entry *entry = NULL;
	dma_addr_t dma_addr;
	enum vxge_hw_status status = VXGE_HW_OK;
	u32 req_out;

	blockpool = &devh->block_pool;

	if (block_addr == NULL) {
		blockpool->req_out--;
		status = VXGE_HW_FAIL;
		goto exit;
	}

	dma_addr = pci_map_single(devh->pdev, block_addr, length,
				PCI_DMA_BIDIRECTIONAL);

	if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
		blockpool->req_out--;
		status = VXGE_HW_FAIL;
		goto exit;
	}

	if (!list_empty(&blockpool->free_entry_list))
		entry = (struct __vxge_hw_blockpool_entry *)
			list_first_entry(&blockpool->free_entry_list,
				struct __vxge_hw_blockpool_entry,
				item);

	if (entry == NULL)
		entry =	vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
	else
		list_del(&entry->item);

	if (entry != NULL) {
		entry->length = length;
		entry->memblock = block_addr;
		entry->dma_addr = dma_addr;
		entry->acc_handle = acc_handle;
		entry->dma_handle = dma_h;
		list_add(&entry->item, &blockpool->free_block_list);
		blockpool->pool_size++;
		status = VXGE_HW_OK;
	} else
		status = VXGE_HW_ERR_OUT_OF_MEMORY;

	blockpool->req_out--;

	req_out = blockpool->req_out;
exit:
	return;
}

static inline void
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
{
	gfp_t flags;
	void *vaddr;

	if (in_interrupt())
		flags = GFP_ATOMIC | GFP_DMA;
	else
		flags = GFP_KERNEL | GFP_DMA;

	vaddr = kmalloc((size), flags);

	vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}

/*
 * __vxge_hw_blockpool_blocks_add - Request additional blocks
 */
static
void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
{
	u32 nreq = 0, i;

	if ((blockpool->pool_size  +  blockpool->req_out) <
		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
		blockpool->req_out += nreq;
	}

	for (i = 0; i < nreq; i++)
		vxge_os_dma_malloc_async(
			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}

/*
 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
 * Allocates a block of memory of given size, either from block pool
 * or by calling vxge_os_dma_malloc()
 */
static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
					struct vxge_hw_mempool_dma *dma_object)
{
	struct __vxge_hw_blockpool_entry *entry = NULL;
	struct __vxge_hw_blockpool  *blockpool;
	void *memblock = NULL;
	enum vxge_hw_status status = VXGE_HW_OK;

	blockpool = &devh->block_pool;

	if (size != blockpool->block_size) {

		memblock = vxge_os_dma_malloc(devh->pdev, size,
						&dma_object->handle,
						&dma_object->acc_handle);

		if (memblock == NULL) {
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto exit;
		}

		dma_object->addr = pci_map_single(devh->pdev, memblock, size,
					PCI_DMA_BIDIRECTIONAL);

		if (unlikely(pci_dma_mapping_error(devh->pdev,
				dma_object->addr))) {
			vxge_os_dma_free(devh->pdev, memblock,
				&dma_object->acc_handle);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto exit;
		}

	} else {

		if (!list_empty(&blockpool->free_block_list))
			entry = (struct __vxge_hw_blockpool_entry *)
				list_first_entry(&blockpool->free_block_list,
					struct __vxge_hw_blockpool_entry,
					item);

		if (entry != NULL) {
			list_del(&entry->item);
			dma_object->addr = entry->dma_addr;
			dma_object->handle = entry->dma_handle;
			dma_object->acc_handle = entry->acc_handle;
			memblock = entry->memblock;

			list_add(&entry->item,
				&blockpool->free_entry_list);
			blockpool->pool_size--;
		}

		if (memblock != NULL)
			__vxge_hw_blockpool_blocks_add(blockpool);
	}
exit:
	return memblock;
}

/*
 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
 */
static void
__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
{
	struct list_head *p, *n;

	list_for_each_safe(p, n, &blockpool->free_block_list) {

		if (blockpool->pool_size < blockpool->pool_max)
			break;

		pci_unmap_single(
			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
			((struct __vxge_hw_blockpool_entry *)p)->length,
			PCI_DMA_BIDIRECTIONAL);

		vxge_os_dma_free(
			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
			((struct __vxge_hw_blockpool_entry *)p)->memblock,
			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);

		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);

		list_add(p, &blockpool->free_entry_list);

		blockpool->pool_size--;

	}
}

/*
 * __vxge_hw_blockpool_free - Frees the memory allcoated with
 *				__vxge_hw_blockpool_malloc
 */
static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
				     void *memblock, u32 size,
				     struct vxge_hw_mempool_dma *dma_object)
{
	struct __vxge_hw_blockpool_entry *entry = NULL;
	struct __vxge_hw_blockpool  *blockpool;
	enum vxge_hw_status status = VXGE_HW_OK;

	blockpool = &devh->block_pool;

	if (size != blockpool->block_size) {
		pci_unmap_single(devh->pdev, dma_object->addr, size,
			PCI_DMA_BIDIRECTIONAL);
		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
	} else {

		if (!list_empty(&blockpool->free_entry_list))
			entry = (struct __vxge_hw_blockpool_entry *)
				list_first_entry(&blockpool->free_entry_list,
					struct __vxge_hw_blockpool_entry,
					item);

		if (entry == NULL)
			entry =	vmalloc(sizeof(
					struct __vxge_hw_blockpool_entry));
		else
			list_del(&entry->item);

		if (entry != NULL) {
			entry->length = size;
			entry->memblock = memblock;
			entry->dma_addr = dma_object->addr;
			entry->acc_handle = dma_object->acc_handle;
			entry->dma_handle = dma_object->handle;
			list_add(&entry->item,
					&blockpool->free_block_list);
			blockpool->pool_size++;
			status = VXGE_HW_OK;
		} else
			status = VXGE_HW_ERR_OUT_OF_MEMORY;

		if (status == VXGE_HW_OK)
			__vxge_hw_blockpool_blocks_remove(blockpool);
	}
}

/*
 * vxge_hw_mempool_destroy
 */
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
	u32 i, j;
	struct __vxge_hw_device *devh = mempool->devh;

	for (i = 0; i < mempool->memblocks_allocated; i++) {
		struct vxge_hw_mempool_dma *dma_object;

		vxge_assert(mempool->memblocks_arr[i]);
		vxge_assert(mempool->memblocks_dma_arr + i);

		dma_object = mempool->memblocks_dma_arr + i;

		for (j = 0; j < mempool->items_per_memblock; j++) {
			u32 index = i * mempool->items_per_memblock + j;

			/* to skip last partially filled(if any) memblock */
			if (index >= mempool->items_current)
				break;
		}

		vfree(mempool->memblocks_priv_arr[i]);

		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
				mempool->memblock_size, dma_object);
	}

	vfree(mempool->items_arr);
	vfree(mempool->memblocks_dma_arr);
	vfree(mempool->memblocks_priv_arr);
	vfree(mempool->memblocks_arr);
	vfree(mempool);
}

/*
 * __vxge_hw_mempool_grow
 * Will resize mempool up to %num_allocate value.
 */
static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
		       u32 *num_allocated)
{
	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
	u32 n_items = mempool->items_per_memblock;
	u32 start_block_idx = mempool->memblocks_allocated;
	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
	enum vxge_hw_status status = VXGE_HW_OK;

	*num_allocated = 0;

	if (end_block_idx > mempool->memblocks_max) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	for (i = start_block_idx; i < end_block_idx; i++) {
		u32 j;
		u32 is_last = ((end_block_idx - 1) == i);
		struct vxge_hw_mempool_dma *dma_object =
			mempool->memblocks_dma_arr + i;
		void *the_memblock;

		/* allocate memblock's private part. Each DMA memblock
		 * has a space allocated for item's private usage upon
		 * mempool's user request. Each time mempool grows, it will
		 * allocate new memblock and its private part at once.
		 * This helps to minimize memory usage a lot. */
		mempool->memblocks_priv_arr[i] =
				vzalloc(mempool->items_priv_size * n_items);
		if (mempool->memblocks_priv_arr[i] == NULL) {
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto exit;
		}

		/* allocate DMA-capable memblock */
		mempool->memblocks_arr[i] =
			__vxge_hw_blockpool_malloc(mempool->devh,
				mempool->memblock_size, dma_object);
		if (mempool->memblocks_arr[i] == NULL) {
			vfree(mempool->memblocks_priv_arr[i]);
			status = VXGE_HW_ERR_OUT_OF_MEMORY;
			goto exit;
		}

		(*num_allocated)++;
		mempool->memblocks_allocated++;

		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);

		the_memblock = mempool->memblocks_arr[i];

		/* fill the items hash array */
		for (j = 0; j < n_items; j++) {
			u32 index = i * n_items + j;

			if (first_time && index >= mempool->items_initial)
				break;

			mempool->items_arr[index] =
				((char *)the_memblock + j*mempool->item_size);

			/* let caller to do more job on each item */
			if (mempool->item_func_alloc != NULL)
				mempool->item_func_alloc(mempool, i,
					dma_object, index, is_last);

			mempool->items_current = index + 1;
		}

		if (first_time && mempool->items_current ==
					mempool->items_initial)
			break;
	}
exit:
	return status;
}

/*
 * vxge_hw_mempool_create
 * This function will create memory pool object. Pool may grow but will
 * never shrink. Pool consists of number of dynamically allocated blocks
 * with size enough to hold %items_initial number of items. Memory is
 * DMA-able but client must map/unmap before interoperating with the device.
 */
static struct vxge_hw_mempool *
__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
			 u32 memblock_size,
			 u32 item_size,
			 u32 items_priv_size,
			 u32 items_initial,
			 u32 items_max,
			 struct vxge_hw_mempool_cbs *mp_callback,
			 void *userdata)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	u32 memblocks_to_allocate;
	struct vxge_hw_mempool *mempool = NULL;
	u32 allocated;

	if (memblock_size < item_size) {
		status = VXGE_HW_FAIL;
		goto exit;
	}

	mempool = vzalloc(sizeof(struct vxge_hw_mempool));
	if (mempool == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	mempool->devh			= devh;
	mempool->memblock_size		= memblock_size;
	mempool->items_max		= items_max;
	mempool->items_initial		= items_initial;
	mempool->item_size		= item_size;
	mempool->items_priv_size	= items_priv_size;
	mempool->item_func_alloc	= mp_callback->item_func_alloc;
	mempool->userdata		= userdata;

	mempool->memblocks_allocated = 0;

	mempool->items_per_memblock = memblock_size / item_size;

	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
					mempool->items_per_memblock;

	/* allocate array of memblocks */
	mempool->memblocks_arr =
		vzalloc(sizeof(void *) * mempool->memblocks_max);
	if (mempool->memblocks_arr == NULL) {
		__vxge_hw_mempool_destroy(mempool);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		mempool = NULL;
		goto exit;
	}

	/* allocate array of private parts of items per memblocks */
	mempool->memblocks_priv_arr =
		vzalloc(sizeof(void *) * mempool->memblocks_max);
	if (mempool->memblocks_priv_arr == NULL) {
		__vxge_hw_mempool_destroy(mempool);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		mempool = NULL;
		goto exit;
	}

	/* allocate array of memblocks DMA objects */
	mempool->memblocks_dma_arr =
		vzalloc(sizeof(struct vxge_hw_mempool_dma) *
			mempool->memblocks_max);
	if (mempool->memblocks_dma_arr == NULL) {
		__vxge_hw_mempool_destroy(mempool);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		mempool = NULL;
		goto exit;
	}

	/* allocate hash array of items */
	mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
	if (mempool->items_arr == NULL) {
		__vxge_hw_mempool_destroy(mempool);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		mempool = NULL;
		goto exit;
	}

	/* calculate initial number of memblocks */
	memblocks_to_allocate = (mempool->items_initial +
				 mempool->items_per_memblock - 1) /
						mempool->items_per_memblock;

	/* pre-allocate the mempool */
	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
					&allocated);
	if (status != VXGE_HW_OK) {
		__vxge_hw_mempool_destroy(mempool);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		mempool = NULL;
		goto exit;
	}

exit:
	return mempool;
}

/*
 * __vxge_hw_ring_abort - Returns the RxD
 * This function terminates the RxDs of ring
 */
static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
	void *rxdh;
	struct __vxge_hw_channel *channel;

	channel = &ring->channel;

	for (;;) {
		vxge_hw_channel_dtr_try_complete(channel, &rxdh);

		if (rxdh == NULL)
			break;

		vxge_hw_channel_dtr_complete(channel);

		if (ring->rxd_term)
			ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
				channel->userdata);

		vxge_hw_channel_dtr_free(channel, rxdh);
	}

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_ring_reset - Resets the ring
 * This function resets the ring during vpath reset operation
 */
static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_channel *channel;

	channel = &ring->channel;

	__vxge_hw_ring_abort(ring);

	status = __vxge_hw_channel_reset(channel);

	if (status != VXGE_HW_OK)
		goto exit;

	if (ring->rxd_init) {
		status = vxge_hw_ring_replenish(ring);
		if (status != VXGE_HW_OK)
			goto exit;
	}
exit:
	return status;
}

/*
 * __vxge_hw_ring_delete - Removes the ring
 * This function freeup the memory pool and removes the ring
 */
static enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_ring *ring = vp->vpath->ringh;

	__vxge_hw_ring_abort(ring);

	if (ring->mempool)
		__vxge_hw_mempool_destroy(ring->mempool);

	vp->vpath->ringh = NULL;
	__vxge_hw_channel_free(&ring->channel);

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_ring_create - Create a Ring
 * This function creates Ring and initializes it.
 */
static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
		      struct vxge_hw_ring_attr *attr)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_ring *ring;
	u32 ring_length;
	struct vxge_hw_ring_config *config;
	struct __vxge_hw_device *hldev;
	u32 vp_id;
	struct vxge_hw_mempool_cbs ring_mp_callback;

	if ((vp == NULL) || (attr == NULL)) {
		status = VXGE_HW_FAIL;
		goto exit;
	}

	hldev = vp->vpath->hldev;
	vp_id = vp->vpath->vp_id;

	config = &hldev->config.vp_config[vp_id].ring;

	ring_length = config->ring_blocks *
			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);

	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
						VXGE_HW_CHANNEL_TYPE_RING,
						ring_length,
						attr->per_rxd_space,
						attr->userdata);
	if (ring == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	vp->vpath->ringh = ring;
	ring->vp_id = vp_id;
	ring->vp_reg = vp->vpath->vp_reg;
	ring->common_reg = hldev->common_reg;
	ring->stats = &vp->vpath->sw_stats->ring_stats;
	ring->config = config;
	ring->callback = attr->callback;
	ring->rxd_init = attr->rxd_init;
	ring->rxd_term = attr->rxd_term;
	ring->buffer_mode = config->buffer_mode;
	ring->rxds_limit = config->rxds_limit;

	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
	ring->rxd_priv_size =
		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
	ring->per_rxd_space = attr->per_rxd_space;

	ring->rxd_priv_size =
		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;

	/* how many RxDs can fit into one block. Depends on configured
	 * buffer_mode. */
	ring->rxds_per_block =
		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);

	/* calculate actual RxD block private size */
	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
	ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
	ring->mempool = __vxge_hw_mempool_create(hldev,
				VXGE_HW_BLOCK_SIZE,
				VXGE_HW_BLOCK_SIZE,
				ring->rxdblock_priv_size,
				ring->config->ring_blocks,
				ring->config->ring_blocks,
				&ring_mp_callback,
				ring);
	if (ring->mempool == NULL) {
		__vxge_hw_ring_delete(vp);
		return VXGE_HW_ERR_OUT_OF_MEMORY;
	}

	status = __vxge_hw_channel_initialize(&ring->channel);
	if (status != VXGE_HW_OK) {
		__vxge_hw_ring_delete(vp);
		goto exit;
	}

	/* Note:
	 * Specifying rxd_init callback means two things:
	 * 1) rxds need to be initialized by driver at channel-open time;
	 * 2) rxds need to be posted at channel-open time
	 *    (that's what the initial_replenish() below does)
	 * Currently we don't have a case when the 1) is done without the 2).
	 */
	if (ring->rxd_init) {
		status = vxge_hw_ring_replenish(ring);
		if (status != VXGE_HW_OK) {
			__vxge_hw_ring_delete(vp);
			goto exit;
		}
	}

	/* initial replenish will increment the counter in its post() routine,
	 * we have to reset it */
	ring->stats->common_stats.usage_cnt = 0;
exit:
	return status;
}

/*
 * vxge_hw_device_config_default_get - Initialize device config with defaults.
 * Initialize Titan device config with default values.
 */
enum vxge_hw_status __devinit
vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
{
	u32 i;

	device_config->dma_blockpool_initial =
					VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
	device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
	device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
	device_config->rth_en = VXGE_HW_RTH_DEFAULT;
	device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
	device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
	device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		device_config->vp_config[i].vp_id = i;

		device_config->vp_config[i].min_bandwidth =
				VXGE_HW_VPATH_BANDWIDTH_DEFAULT;

		device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;

		device_config->vp_config[i].ring.ring_blocks =
				VXGE_HW_DEF_RING_BLOCKS;

		device_config->vp_config[i].ring.buffer_mode =
				VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;

		device_config->vp_config[i].ring.scatter_mode =
				VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;

		device_config->vp_config[i].ring.rxds_limit =
				VXGE_HW_DEF_RING_RXDS_LIMIT;

		device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;

		device_config->vp_config[i].fifo.fifo_blocks =
				VXGE_HW_MIN_FIFO_BLOCKS;

		device_config->vp_config[i].fifo.max_frags =
				VXGE_HW_MAX_FIFO_FRAGS;

		device_config->vp_config[i].fifo.memblock_size =
				VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;

		device_config->vp_config[i].fifo.alignment_size =
				VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;

		device_config->vp_config[i].fifo.intr =
				VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;

		device_config->vp_config[i].fifo.no_snoop_bits =
				VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
		device_config->vp_config[i].tti.intr_enable =
				VXGE_HW_TIM_INTR_DEFAULT;

		device_config->vp_config[i].tti.btimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.timer_ac_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.timer_ci_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.timer_ri_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.rtimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.util_sel =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.ltimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.urange_a =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.uec_a =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.urange_b =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.uec_b =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.urange_c =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.uec_c =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].tti.uec_d =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.intr_enable =
				VXGE_HW_TIM_INTR_DEFAULT;

		device_config->vp_config[i].rti.btimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.timer_ac_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.timer_ci_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.timer_ri_en =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.rtimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.util_sel =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.ltimer_val =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.urange_a =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.uec_a =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.urange_b =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.uec_b =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.urange_c =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.uec_c =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].rti.uec_d =
				VXGE_HW_USE_FLASH_DEFAULT;

		device_config->vp_config[i].mtu =
				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;

		device_config->vp_config[i].rpa_strip_vlan_tag =
			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
	}

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
 * Set the swapper bits appropriately for the vpath.
 */
static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
	u64 val64;

	val64 = readq(&vpath_reg->vpath_general_cfg1);
	wmb();
	val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
	writeq(val64, &vpath_reg->vpath_general_cfg1);
	wmb();
#endif
	return VXGE_HW_OK;
}

/*
 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
 * Set the swapper bits appropriately for the vpath.
 */
static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
			   struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
	u64 val64;

	val64 = readq(&legacy_reg->pifm_wr_swap_en);

	if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
		val64 = readq(&vpath_reg->kdfcctl_cfg0);
		wmb();

		val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0	|
			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1	|
			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;

		writeq(val64, &vpath_reg->kdfcctl_cfg0);
		wmb();
	}

	return VXGE_HW_OK;
}

/*
 * vxge_hw_mgmt_reg_read - Read Titan register.
 */
enum vxge_hw_status
vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
		      enum vxge_hw_mgmt_reg_type type,
		      u32 index, u32 offset, u64 *value)
{
	enum vxge_hw_status status = VXGE_HW_OK;

	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
		status = VXGE_HW_ERR_INVALID_DEVICE;
		goto exit;
	}

	switch (type) {
	case vxge_hw_mgmt_reg_type_legacy:
		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->legacy_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_toc:
		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->toc_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_common:
		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->common_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_mrpcim:
		if (!(hldev->access_rights &
			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
			break;
		}
		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_srpcim:
		if (!(hldev->access_rights &
			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
			break;
		}
		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->srpcim_reg[index] +
				offset);
		break;
	case vxge_hw_mgmt_reg_type_vpmgmt:
		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
				offset);
		break;
	case vxge_hw_mgmt_reg_type_vpath:
		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		*value = readq((void __iomem *)hldev->vpath_reg[index] +
				offset);
		break;
	default:
		status = VXGE_HW_ERR_INVALID_TYPE;
		break;
	}

exit:
	return status;
}

/*
 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
 */
enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
{
	struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
	enum vxge_hw_status status = VXGE_HW_OK;
	int i = 0, j = 0;

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (!((vpath_mask) & vxge_mBIT(i)))
			continue;
		vpmgmt_reg = hldev->vpmgmt_reg[i];
		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
				return VXGE_HW_FAIL;
		}
	}
	return status;
}
/*
 * vxge_hw_mgmt_reg_Write - Write Titan register.
 */
enum vxge_hw_status
vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
		      enum vxge_hw_mgmt_reg_type type,
		      u32 index, u32 offset, u64 value)
{
	enum vxge_hw_status status = VXGE_HW_OK;

	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
		status = VXGE_HW_ERR_INVALID_DEVICE;
		goto exit;
	}

	switch (type) {
	case vxge_hw_mgmt_reg_type_legacy:
		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->legacy_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_toc:
		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->toc_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_common:
		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->common_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_mrpcim:
		if (!(hldev->access_rights &
			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
			break;
		}
		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
		break;
	case vxge_hw_mgmt_reg_type_srpcim:
		if (!(hldev->access_rights &
			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
			break;
		}
		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
			offset);

		break;
	case vxge_hw_mgmt_reg_type_vpmgmt:
		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
			offset);
		break;
	case vxge_hw_mgmt_reg_type_vpath:
		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
			status = VXGE_HW_ERR_INVALID_INDEX;
			break;
		}
		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
			status = VXGE_HW_ERR_INVALID_OFFSET;
			break;
		}
		writeq(value, (void __iomem *)hldev->vpath_reg[index] +
			offset);
		break;
	default:
		status = VXGE_HW_ERR_INVALID_TYPE;
		break;
	}
exit:
	return status;
}

/*
 * __vxge_hw_fifo_abort - Returns the TxD
 * This function terminates the TxDs of fifo
 */
static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
	void *txdlh;

	for (;;) {
		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);

		if (txdlh == NULL)
			break;

		vxge_hw_channel_dtr_complete(&fifo->channel);

		if (fifo->txdl_term) {
			fifo->txdl_term(txdlh,
			VXGE_HW_TXDL_STATE_POSTED,
			fifo->channel.userdata);
		}

		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
	}

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_fifo_reset - Resets the fifo
 * This function resets the fifo during vpath reset operation
 */
static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
	enum vxge_hw_status status = VXGE_HW_OK;

	__vxge_hw_fifo_abort(fifo);
	status = __vxge_hw_channel_reset(&fifo->channel);

	return status;
}

/*
 * __vxge_hw_fifo_delete - Removes the FIFO
 * This function freeup the memory pool and removes the FIFO
 */
static enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;

	__vxge_hw_fifo_abort(fifo);

	if (fifo->mempool)
		__vxge_hw_mempool_destroy(fifo->mempool);

	vp->vpath->fifoh = NULL;

	__vxge_hw_channel_free(&fifo->channel);

	return VXGE_HW_OK;
}

/*
 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
 * list callback
 * This function is callback passed to __vxge_hw_mempool_create to create memory
 * pool for TxD list
 */
static void
__vxge_hw_fifo_mempool_item_alloc(
	struct vxge_hw_mempool *mempoolh,
	u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
	u32 index, u32 is_last)
{
	u32 memblock_item_idx;
	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
	struct vxge_hw_fifo_txd *txdp =
		(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
	struct __vxge_hw_fifo *fifo =
			(struct __vxge_hw_fifo *)mempoolh->userdata;
	void *memblock = mempoolh->memblocks_arr[memblock_index];

	vxge_assert(txdp);

	txdp->host_control = (u64) (size_t)
	__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
					&memblock_item_idx);

	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);

	vxge_assert(txdl_priv);

	fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;

	/* pre-format HW's TxDL's private */
	txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
	txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
	txdl_priv->dma_handle = dma_object->handle;
	txdl_priv->memblock   = memblock;
	txdl_priv->first_txdp = txdp;
	txdl_priv->next_txdl_priv = NULL;
	txdl_priv->alloc_frags = 0;
}

/*
 * __vxge_hw_fifo_create - Create a FIFO
 * This function creates FIFO and initializes it.
 */
static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
		      struct vxge_hw_fifo_attr *attr)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_fifo *fifo;
	struct vxge_hw_fifo_config *config;
	u32 txdl_size, txdl_per_memblock;
	struct vxge_hw_mempool_cbs fifo_mp_callback;
	struct __vxge_hw_virtualpath *vpath;

	if ((vp == NULL) || (attr == NULL)) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}
	vpath = vp->vpath;
	config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;

	txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);

	txdl_per_memblock = config->memblock_size / txdl_size;

	fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
					VXGE_HW_CHANNEL_TYPE_FIFO,
					config->fifo_blocks * txdl_per_memblock,
					attr->per_txdl_space, attr->userdata);

	if (fifo == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	vpath->fifoh = fifo;
	fifo->nofl_db = vpath->nofl_db;

	fifo->vp_id = vpath->vp_id;
	fifo->vp_reg = vpath->vp_reg;
	fifo->stats = &vpath->sw_stats->fifo_stats;

	fifo->config = config;

	/* apply "interrupts per txdl" attribute */
	fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;

	if (fifo->config->intr)
		fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;

	fifo->no_snoop_bits = config->no_snoop_bits;

	/*
	 * FIFO memory management strategy:
	 *
	 * TxDL split into three independent parts:
	 *	- set of TxD's
	 *	- TxD HW private part
	 *	- driver private part
	 *
	 * Adaptative memory allocation used. i.e. Memory allocated on
	 * demand with the size which will fit into one memory block.
	 * One memory block may contain more than one TxDL.
	 *
	 * During "reserve" operations more memory can be allocated on demand
	 * for example due to FIFO full condition.
	 *
	 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
	 * routine which will essentially stop the channel and free resources.
	 */

	/* TxDL common private size == TxDL private  +  driver private */
	fifo->priv_size =
		sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
	fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
			VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;

	fifo->per_txdl_space = attr->per_txdl_space;

	/* recompute txdl size to be cacheline aligned */
	fifo->txdl_size = txdl_size;
	fifo->txdl_per_memblock = txdl_per_memblock;

	fifo->txdl_term = attr->txdl_term;
	fifo->callback = attr->callback;

	if (fifo->txdl_per_memblock == 0) {
		__vxge_hw_fifo_delete(vp);
		status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
		goto exit;
	}

	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;

	fifo->mempool =
		__vxge_hw_mempool_create(vpath->hldev,
			fifo->config->memblock_size,
			fifo->txdl_size,
			fifo->priv_size,
			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
			&fifo_mp_callback,
			fifo);

	if (fifo->mempool == NULL) {
		__vxge_hw_fifo_delete(vp);
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto exit;
	}

	status = __vxge_hw_channel_initialize(&fifo->channel);
	if (status != VXGE_HW_OK) {
		__vxge_hw_fifo_delete(vp);
		goto exit;
	}

	vxge_assert(fifo->channel.reserve_ptr);
exit:
	return status;
}

/*
 * __vxge_hw_vpath_pci_read - Read the content of given address
 *                          in pci config space.
 * Read from the vpath pci config space.
 */
static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
			 u32 phy_func_0, u32 offset, u32 *val)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;

	val64 =	VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);

	if (phy_func_0)
		val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;

	writeq(val64, &vp_reg->pci_config_access_cfg1);
	wmb();
	writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
			&vp_reg->pci_config_access_cfg2);
	wmb();

	status = __vxge_hw_device_register_poll(
			&vp_reg->pci_config_access_cfg2,
			VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);

	if (status != VXGE_HW_OK)
		goto exit;

	val64 = readq(&vp_reg->pci_config_access_status);

	if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
		status = VXGE_HW_FAIL;
		*val = 0;
	} else
		*val = (u32)vxge_bVALn(val64, 32, 32);
exit:
	return status;
}

/**
 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
 * @hldev: HW device.
 * @on_off: TRUE if flickering to be on, FALSE to be off
 *
 * Flicker the link LED.
 */
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
{
	struct __vxge_hw_virtualpath *vpath;
	u64 data0, data1 = 0, steer_ctrl = 0;
	enum vxge_hw_status status;

	if (hldev == NULL) {
		status = VXGE_HW_ERR_INVALID_DEVICE;
		goto exit;
	}

	vpath = &hldev->virtual_paths[hldev->first_vp_id];

	data0 = on_off;
	status = vxge_hw_vpath_fw_api(vpath,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
			0, &data0, &data1, &steer_ctrl);
exit:
	return status;
}

/*
 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
 */
enum vxge_hw_status
__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
			      u32 action, u32 rts_table, u32 offset,
			      u64 *data0, u64 *data1)
{
	enum vxge_hw_status status;
	u64 steer_ctrl = 0;

	if (vp == NULL) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}

	if ((rts_table ==
	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
	    (rts_table ==
	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
	    (rts_table ==
	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
	    (rts_table ==
	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
		steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
	}

	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
				      data0, data1, &steer_ctrl);
	if (status != VXGE_HW_OK)
		goto exit;

	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
	    (rts_table !=
	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
		*data1 = 0;
exit:
	return status;
}

/*
 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
 */
enum vxge_hw_status
__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
			      u32 rts_table, u32 offset, u64 steer_data0,
			      u64 steer_data1)
{
	u64 data0, data1 = 0, steer_ctrl = 0;
	enum vxge_hw_status status;

	if (vp == NULL) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}

	data0 = steer_data0;

	if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
	    (rts_table ==
	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
		data1 = steer_data1;

	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
				      &data0, &data1, &steer_ctrl);
exit:
	return status;
}

/*
 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
 */
enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
			struct __vxge_hw_vpath_handle *vp,
			enum vxge_hw_rth_algoritms algorithm,
			struct vxge_hw_rth_hash_types *hash_type,
			u16 bucket_size)
{
	u64 data0, data1;
	enum vxge_hw_status status = VXGE_HW_OK;

	if (vp == NULL) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}

	status = __vxge_hw_vpath_rts_table_get(vp,
		     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
		     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
			0, &data0, &data1);
	if (status != VXGE_HW_OK)
		goto exit;

	data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));

	data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);

	if (hash_type->hash_type_tcpipv4_en)
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;

	if (hash_type->hash_type_ipv4_en)
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;

	if (hash_type->hash_type_tcpipv6_en)
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;

	if (hash_type->hash_type_ipv6_en)
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;

	if (hash_type->hash_type_tcpipv6ex_en)
		data0 |=
		VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;

	if (hash_type->hash_type_ipv6ex_en)
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;

	if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
		data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
	else
		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;

	status = __vxge_hw_vpath_rts_table_set(vp,
		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
		0, data0, 0);
exit:
	return status;
}

static void
vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
				u16 flag, u8 *itable)
{
	switch (flag) {
	case 1:
		*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
			itable[j]);
	case 2:
		*data0 |=
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
			itable[j]);
	case 3:
		*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
			itable[j]);
	case 4:
		*data1 |=
			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
			itable[j]);
	default:
		return;
	}
}
/*
 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
 */
enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
			struct __vxge_hw_vpath_handle **vpath_handles,
			u32 vpath_count,
			u8 *mtable,
			u8 *itable,
			u32 itable_size)
{
	u32 i, j, action, rts_table;
	u64 data0;
	u64 data1;
	u32 max_entries;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_vpath_handle *vp = vpath_handles[0];

	if (vp == NULL) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}

	max_entries = (((u32)1) << itable_size);

	if (vp->vpath->hldev->config.rth_it_type
				== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
		rts_table =
			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;

		for (j = 0; j < max_entries; j++) {

			data1 = 0;

			data0 =
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
				itable[j]);

			status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
				action, rts_table, j, data0, data1);

			if (status != VXGE_HW_OK)
				goto exit;
		}

		for (j = 0; j < max_entries; j++) {

			data1 = 0;

			data0 =
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
				itable[j]);

			status = __vxge_hw_vpath_rts_table_set(
				vpath_handles[mtable[itable[j]]], action,
				rts_table, j, data0, data1);

			if (status != VXGE_HW_OK)
				goto exit;
		}
	} else {
		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
		rts_table =
			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
		for (i = 0; i < vpath_count; i++) {

			for (j = 0; j < max_entries;) {

				data0 = 0;
				data1 = 0;

				while (j < max_entries) {
					if (mtable[itable[j]] != i) {
						j++;
						continue;
					}
					vxge_hw_rts_rth_data0_data1_get(j,
						&data0, &data1, 1, itable);
					j++;
					break;
				}

				while (j < max_entries) {
					if (mtable[itable[j]] != i) {
						j++;
						continue;
					}
					vxge_hw_rts_rth_data0_data1_get(j,
						&data0, &data1, 2, itable);
					j++;
					break;
				}

				while (j < max_entries) {
					if (mtable[itable[j]] != i) {
						j++;
						continue;
					}
					vxge_hw_rts_rth_data0_data1_get(j,
						&data0, &data1, 3, itable);
					j++;
					break;
				}

				while (j < max_entries) {
					if (mtable[itable[j]] != i) {
						j++;
						continue;
					}
					vxge_hw_rts_rth_data0_data1_get(j,
						&data0, &data1, 4, itable);
					j++;
					break;
				}

				if (data0 != 0) {
					status = __vxge_hw_vpath_rts_table_set(
							vpath_handles[i],
							action, rts_table,
							0, data0, data1);

					if (status != VXGE_HW_OK)
						goto exit;
				}
			}
		}
	}
exit:
	return status;
}

/**
 * vxge_hw_vpath_check_leak - Check for memory leak
 * @ringh: Handle to the ring object used for receive
 *
 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
 * Returns: VXGE_HW_FAIL, if leak has occurred.
 *
 */
enum vxge_hw_status
vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	u64 rxd_new_count, rxd_spat;

	if (ring == NULL)
		return status;

	rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
	rxd_spat = readq(&ring->vp_reg->prc_cfg6);
	rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);

	if (rxd_new_count >= rxd_spat)
		status = VXGE_HW_FAIL;

	return status;
}

/*
 * __vxge_hw_vpath_mgmt_read
 * This routine reads the vpath_mgmt registers
 */
static enum vxge_hw_status
__vxge_hw_vpath_mgmt_read(
	struct __vxge_hw_device *hldev,
	struct __vxge_hw_virtualpath *vpath)
{
	u32 i, mtu = 0, max_pyld = 0;
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {

		val64 = readq(&vpath->vpmgmt_reg->
				rxmac_cfg0_port_vpmgmt_clone[i]);
		max_pyld =
			(u32)
			VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
			(val64);
		if (mtu < max_pyld)
			mtu = max_pyld;
	}

	vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;

	val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);

	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
		if (val64 & vxge_mBIT(i))
			vpath->vsport_number = i;
	}

	val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);

	if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
	else
		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);

	return status;
}

/*
 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
 * This routine checks the vpath_rst_in_prog register to see if
 * adapter completed the reset process for the vpath
 */
static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
	enum vxge_hw_status status;

	status = __vxge_hw_device_register_poll(
			&vpath->hldev->common_reg->vpath_rst_in_prog,
			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
				1 << (16 - vpath->vp_id)),
			vpath->hldev->config.device_poll_millis);

	return status;
}

/*
 * __vxge_hw_vpath_reset
 * This routine resets the vpath on the device
 */
static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;

	val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));

	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
				&hldev->common_reg->cmn_rsthdlr_cfg0);

	return status;
}

/*
 * __vxge_hw_vpath_sw_reset
 * This routine resets the vpath structures
 */
static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;

	vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];

	if (vpath->ringh) {
		status = __vxge_hw_ring_reset(vpath->ringh);
		if (status != VXGE_HW_OK)
			goto exit;
	}

	if (vpath->fifoh)
		status = __vxge_hw_fifo_reset(vpath->fifoh);
exit:
	return status;
}

/*
 * __vxge_hw_vpath_prc_configure
 * This routine configures the prc registers of virtual path using the config
 * passed
 */
static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vp_config *vp_config;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;
	vp_config = vpath->vp_config;

	if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
		return;

	val64 = readq(&vp_reg->prc_cfg1);
	val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
	writeq(val64, &vp_reg->prc_cfg1);

	val64 = readq(&vpath->vp_reg->prc_cfg6);
	val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
	writeq(val64, &vpath->vp_reg->prc_cfg6);

	val64 = readq(&vp_reg->prc_cfg7);

	if (vpath->vp_config->ring.scatter_mode !=
		VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {

		val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);

		switch (vpath->vp_config->ring.scatter_mode) {
		case VXGE_HW_RING_SCATTER_MODE_A:
			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
					VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
			break;
		case VXGE_HW_RING_SCATTER_MODE_B:
			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
					VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
			break;
		case VXGE_HW_RING_SCATTER_MODE_C:
			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
					VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
			break;
		}
	}

	writeq(val64, &vp_reg->prc_cfg7);

	writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
				__vxge_hw_ring_first_block_address_get(
					vpath->ringh) >> 3), &vp_reg->prc_cfg5);

	val64 = readq(&vp_reg->prc_cfg4);
	val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
	val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);

	val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
			VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);

	if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
		val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
	else
		val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;

	writeq(val64, &vp_reg->prc_cfg4);
}

/*
 * __vxge_hw_vpath_kdfc_configure
 * This routine configures the kdfc registers of virtual path using the
 * config passed
 */
static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	u64 vpath_stride;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;
	status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);

	if (status != VXGE_HW_OK)
		goto exit;

	val64 = readq(&vp_reg->kdfc_drbl_triplet_total);

	vpath->max_kdfc_db =
		(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
			val64+1)/2;

	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {

		vpath->max_nofl_db = vpath->max_kdfc_db;

		if (vpath->max_nofl_db <
			((vpath->vp_config->fifo.memblock_size /
			(vpath->vp_config->fifo.max_frags *
			sizeof(struct vxge_hw_fifo_txd))) *
			vpath->vp_config->fifo.fifo_blocks)) {

			return VXGE_HW_BADCFG_FIFO_BLOCKS;
		}
		val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
				(vpath->max_nofl_db*2)-1);
	}

	writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);

	writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
		&vp_reg->kdfc_fifo_trpl_ctrl);

	val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);

	val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
		   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));

	val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
#ifndef __BIG_ENDIAN
		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
#endif
		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);

	writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
	writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
	wmb();
	vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);

	vpath->nofl_db =
		(struct __vxge_hw_non_offload_db_wrapper __iomem *)
		(hldev->kdfc + (vp_id *
		VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
					vpath_stride)));
exit:
	return status;
}

/*
 * __vxge_hw_vpath_mac_configure
 * This routine configures the mac of virtual path using the config passed
 */
static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vp_config *vp_config;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;
	vp_config = vpath->vp_config;

	writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
			vpath->vsport_number), &vp_reg->xmac_vsport_choice);

	if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {

		val64 = readq(&vp_reg->xmac_rpa_vcfg);

		if (vp_config->rpa_strip_vlan_tag !=
			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
			if (vp_config->rpa_strip_vlan_tag)
				val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
			else
				val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
		}

		writeq(val64, &vp_reg->xmac_rpa_vcfg);
		val64 = readq(&vp_reg->rxmac_vcfg0);

		if (vp_config->mtu !=
				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
			val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
			if ((vp_config->mtu  +
				VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
					vp_config->mtu  +
					VXGE_HW_MAC_HEADER_MAX_SIZE);
			else
				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
					vpath->max_mtu);
		}

		writeq(val64, &vp_reg->rxmac_vcfg0);

		val64 = readq(&vp_reg->rxmac_vcfg1);

		val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
			VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);

		if (hldev->config.rth_it_type ==
				VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
			val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
				0x2) |
				VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
		}

		writeq(val64, &vp_reg->rxmac_vcfg1);
	}
	return status;
}

/*
 * __vxge_hw_vpath_tim_configure
 * This routine configures the tim registers of virtual path using the config
 * passed
 */
static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vpath_reg __iomem *vp_reg;
	struct vxge_hw_vp_config *config;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;
	config = vpath->vp_config;

	writeq(0, &vp_reg->tim_dest_addr);
	writeq(0, &vp_reg->tim_vpath_map);
	writeq(0, &vp_reg->tim_bitmap);
	writeq(0, &vp_reg->tim_remap);

	if (config->ring.enable == VXGE_HW_RING_ENABLE)
		writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
			(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
			VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);

	val64 = readq(&vp_reg->tim_pci_cfg);
	val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
	writeq(val64, &vp_reg->tim_pci_cfg);

	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {

		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);

		if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
				0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
					config->tti.btimer_val);
		}

		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;

		if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->tti.timer_ac_en)
				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
			else
				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
		}

		if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->tti.timer_ci_en)
				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
			else
				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
		}

		if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
					config->tti.urange_a);
		}

		if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
					config->tti.urange_b);
		}

		if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
					config->tti.urange_c);
		}

		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);

		if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
						config->tti.uec_a);
		}

		if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
						config->tti.uec_b);
		}

		if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
						config->tti.uec_c);
		}

		if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
						config->tti.uec_d);
		}

		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);

		if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->tti.timer_ri_en)
				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
			else
				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
		}

		if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
					0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
					config->tti.rtimer_val);
		}

		if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
		}

		if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
					0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
					config->tti.ltimer_val);
		}

		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
	}

	if (config->ring.enable == VXGE_HW_RING_ENABLE) {

		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);

		if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
					0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
					config->rti.btimer_val);
		}

		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;

		if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->rti.timer_ac_en)
				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
			else
				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
		}

		if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->rti.timer_ci_en)
				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
			else
				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
		}

		if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
					config->rti.urange_a);
		}

		if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
					config->rti.urange_b);
		}

		if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
					config->rti.urange_c);
		}

		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);

		if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
						config->rti.uec_a);
		}

		if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
						config->rti.uec_b);
		}

		if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
						config->rti.uec_c);
		}

		if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
						config->rti.uec_d);
		}

		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);

		if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
			if (config->rti.timer_ri_en)
				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
			else
				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
		}

		if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
					0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
					config->rti.rtimer_val);
		}

		if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
		}

		if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
					0x3ffffff);
			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
					config->rti.ltimer_val);
		}

		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
	}

	val64 = 0;
	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);

	val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
	val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
	val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
	writeq(val64, &vp_reg->tim_wrkld_clc);

	return status;
}

void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
{
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vpath_reg __iomem *vp_reg;
	struct vxge_hw_vp_config *config;
	u64 val64;

	vpath = &hldev->virtual_paths[vp_id];
	vp_reg = vpath->vp_reg;
	config = vpath->vp_config;

	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
	    config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
	}
}

/*
 * __vxge_hw_vpath_initialize
 * This routine is the final phase of init which initializes the
 * registers of the vpath using the configuration passed.
 */
static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
	u64 val64;
	u32 val32;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;
	struct vxge_hw_vpath_reg __iomem *vp_reg;

	vpath = &hldev->virtual_paths[vp_id];

	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
		goto exit;
	}
	vp_reg = vpath->vp_reg;

	status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
	if (status != VXGE_HW_OK)
		goto exit;

	status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
	if (status != VXGE_HW_OK)
		goto exit;

	status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
	if (status != VXGE_HW_OK)
		goto exit;

	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
	if (status != VXGE_HW_OK)
		goto exit;

	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);

	/* Get MRRS value from device control */
	status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
	if (status == VXGE_HW_OK) {
		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
		val64 &=
		    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
		val64 |=
		    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);

		val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
	}

	val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
	val64 |=
	    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
		    VXGE_HW_MAX_PAYLOAD_SIZE_512);

	val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
	writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);

exit:
	return status;
}

/*
 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
 * This routine closes all channels it opened and freeup memory
 */
static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
	struct __vxge_hw_virtualpath *vpath;

	vpath = &hldev->virtual_paths[vp_id];

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
		goto exit;

	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
		vpath->hldev->tim_int_mask1, vpath->vp_id);
	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;

	memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
exit:
	return;
}

/*
 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
 * This routine is the initial phase of init which resets the vpath and
 * initializes the software support structures.
 */
static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
			struct vxge_hw_vp_config *config)
{
	struct __vxge_hw_virtualpath *vpath;
	enum vxge_hw_status status = VXGE_HW_OK;

	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
		goto exit;
	}

	vpath = &hldev->virtual_paths[vp_id];

	spin_lock_init(&hldev->virtual_paths[vp_id].lock);
	vpath->vp_id = vp_id;
	vpath->vp_open = VXGE_HW_VP_OPEN;
	vpath->hldev = hldev;
	vpath->vp_config = config;
	vpath->vp_reg = hldev->vpath_reg[vp_id];
	vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];

	__vxge_hw_vpath_reset(hldev, vp_id);

	status = __vxge_hw_vpath_reset_check(vpath);
	if (status != VXGE_HW_OK) {
		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
		goto exit;
	}

	status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
	if (status != VXGE_HW_OK) {
		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
		goto exit;
	}

	INIT_LIST_HEAD(&vpath->vpath_handles);

	vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];

	VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
		hldev->tim_int_mask1, vp_id);

	status = __vxge_hw_vpath_initialize(hldev, vp_id);
	if (status != VXGE_HW_OK)
		__vxge_hw_vp_terminate(hldev, vp_id);
exit:
	return status;
}

/*
 * vxge_hw_vpath_mtu_set - Set MTU.
 * Set new MTU value. Example, to use jumbo frames:
 * vxge_hw_vpath_mtu_set(my_device, 9600);
 */
enum vxge_hw_status
vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
{
	u64 val64;
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;

	if (vp == NULL) {
		status = VXGE_HW_ERR_INVALID_HANDLE;
		goto exit;
	}
	vpath = vp->vpath;

	new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;

	if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
		status = VXGE_HW_ERR_INVALID_MTU_SIZE;

	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);

	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
	val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);

	writeq(val64, &vpath->vp_reg->rxmac_vcfg0);

	vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;

exit:
	return status;
}

/*
 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
 * Enable the DMA vpath statistics. The function is to be called to re-enable
 * the adapter to update stats into the host memory
 */
static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
	enum vxge_hw_status status = VXGE_HW_OK;
	struct __vxge_hw_virtualpath *vpath;

	vpath = vp->vpath;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}

	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
			sizeof(struct vxge_hw_vpath_stats_hw_info));

	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
exit:
	return status;
}

/*
 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
 * This function allocates a block from block pool or from the system
 */
static struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
{
	struct __vxge_hw_blockpool_entry *entry = NULL;
	struct __vxge_hw_blockpool  *blockpool;

	blockpool = &devh->block_pool;

	if (size == blockpool->block_size) {

		if (!list_empty(&blockpool->free_block_list))
			entry = (struct __vxge_hw_blockpool_entry *)
				list_first_entry(&blockpool->free_block_list,
					struct __vxge_hw_blockpool_entry,
					item);

		if (entry != NULL) {
			list_del(&entry->item);
			blockpool->pool_size--;
		}
	}

	if (entry != NULL)
		__vxge_hw_blockpool_blocks_add(blockpool);

	return entry;
}

/*
 * vxge_hw_vpath_open - Open a virtual path on a given adapter
 * This function is used to open access to virtual path of an
 * adapter for offload, GRO operations. This function returns
 * synchronously.
 */
enum vxge_hw_status
vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
		   struct vxge_hw_vpath_attr *attr,
		   struct __vxge_hw_vpath_handle **vpath_handle)
{
	struct __vxge_hw_virtualpath *vpath;
	struct __vxge_hw_vpath_handle *vp;
	enum vxge_hw_status status;

	vpath = &hldev->virtual_paths[attr->vp_id];

	if (vpath->vp_open == VXGE_HW_VP_OPEN) {
		status = VXGE_HW_ERR_INVALID_STATE;
		goto vpath_open_exit1;
	}

	status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
			&hldev->config.vp_config[attr->vp_id]);
	if (status != VXGE_HW_OK)
		goto vpath_open_exit1;

	vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
	if (vp == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto vpath_open_exit2;
	}

	vp->vpath = vpath;

	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
		status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
		if (status != VXGE_HW_OK)
			goto vpath_open_exit6;
	}

	if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
		status = __vxge_hw_ring_create(vp, &attr->ring_attr);
		if (status != VXGE_HW_OK)
			goto vpath_open_exit7;

		__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
	}

	vpath->fifoh->tx_intr_num =
		(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
			VXGE_HW_VPATH_INTR_TX;

	vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
				VXGE_HW_BLOCK_SIZE);
	if (vpath->stats_block == NULL) {
		status = VXGE_HW_ERR_OUT_OF_MEMORY;
		goto vpath_open_exit8;
	}

	vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
			stats_block->memblock;
	memset(vpath->hw_stats, 0,
		sizeof(struct vxge_hw_vpath_stats_hw_info));

	hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
						vpath->hw_stats;

	vpath->hw_stats_sav =
		&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
	memset(vpath->hw_stats_sav, 0,
			sizeof(struct vxge_hw_vpath_stats_hw_info));

	writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);

	status = vxge_hw_vpath_stats_enable(vp);
	if (status != VXGE_HW_OK)
		goto vpath_open_exit8;

	list_add(&vp->item, &vpath->vpath_handles);

	hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);

	*vpath_handle = vp;

	attr->fifo_attr.userdata = vpath->fifoh;
	attr->ring_attr.userdata = vpath->ringh;

	return VXGE_HW_OK;

vpath_open_exit8:
	if (vpath->ringh != NULL)
		__vxge_hw_ring_delete(vp);
vpath_open_exit7:
	if (vpath->fifoh != NULL)
		__vxge_hw_fifo_delete(vp);
vpath_open_exit6:
	vfree(vp);
vpath_open_exit2:
	__vxge_hw_vp_terminate(hldev, attr->vp_id);
vpath_open_exit1:

	return status;
}

/**
 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
 * (vpath) open
 * @vp: Handle got from previous vpath open
 *
 * This function is used to close access to virtual path opened
 * earlier.
 */
void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_virtualpath *vpath = vp->vpath;
	struct __vxge_hw_ring *ring = vpath->ringh;
	struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
	u64 new_count, val64, val164;

	if (vdev->titan1) {
		new_count = readq(&vpath->vp_reg->rxdmem_size);
		new_count &= 0x1fff;
	} else
		new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;

	val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);

	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
		&vpath->vp_reg->prc_rxd_doorbell);
	readl(&vpath->vp_reg->prc_rxd_doorbell);

	val164 /= 2;
	val64 = readq(&vpath->vp_reg->prc_cfg6);
	val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
	val64 &= 0x1ff;

	/*
	 * Each RxD is of 4 qwords
	 */
	new_count -= (val64 + 1);
	val64 = min(val164, new_count) / 4;

	ring->rxds_limit = min(ring->rxds_limit, val64);
	if (ring->rxds_limit < 4)
		ring->rxds_limit = 4;
}

/*
 * __vxge_hw_blockpool_block_free - Frees a block from block pool
 * @devh: Hal device
 * @entry: Entry of block to be freed
 *
 * This function frees a block from block pool
 */
static void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
			       struct __vxge_hw_blockpool_entry *entry)
{
	struct __vxge_hw_blockpool  *blockpool;

	blockpool = &devh->block_pool;

	if (entry->length == blockpool->block_size) {
		list_add(&entry->item, &blockpool->free_block_list);
		blockpool->pool_size++;
	}

	__vxge_hw_blockpool_blocks_remove(blockpool);
}

/*
 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
 * This function is used to close access to virtual path opened
 * earlier.
 */
enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_virtualpath *vpath = NULL;
	struct __vxge_hw_device *devh = NULL;
	u32 vp_id = vp->vpath->vp_id;
	u32 is_empty = TRUE;
	enum vxge_hw_status status = VXGE_HW_OK;

	vpath = vp->vpath;
	devh = vpath->hldev;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto vpath_close_exit;
	}

	list_del(&vp->item);

	if (!list_empty(&vpath->vpath_handles)) {
		list_add(&vp->item, &vpath->vpath_handles);
		is_empty = FALSE;
	}

	if (!is_empty) {
		status = VXGE_HW_FAIL;
		goto vpath_close_exit;
	}

	devh->vpaths_deployed &= ~vxge_mBIT(vp_id);

	if (vpath->ringh != NULL)
		__vxge_hw_ring_delete(vp);

	if (vpath->fifoh != NULL)
		__vxge_hw_fifo_delete(vp);

	if (vpath->stats_block != NULL)
		__vxge_hw_blockpool_block_free(devh, vpath->stats_block);

	vfree(vp);

	__vxge_hw_vp_terminate(devh, vp_id);

	spin_lock(&vpath->lock);
	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
	spin_unlock(&vpath->lock);

vpath_close_exit:
	return status;
}

/*
 * vxge_hw_vpath_reset - Resets vpath
 * This function is used to request a reset of vpath
 */
enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
{
	enum vxge_hw_status status;
	u32 vp_id;
	struct __vxge_hw_virtualpath *vpath = vp->vpath;

	vp_id = vpath->vp_id;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}

	status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
	if (status == VXGE_HW_OK)
		vpath->sw_stats->soft_reset_cnt++;
exit:
	return status;
}

/*
 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
 * This function poll's for the vpath reset completion and re initializes
 * the vpath.
 */
enum vxge_hw_status
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_virtualpath *vpath = NULL;
	enum vxge_hw_status status;
	struct __vxge_hw_device *hldev;
	u32 vp_id;

	vp_id = vp->vpath->vp_id;
	vpath = vp->vpath;
	hldev = vpath->hldev;

	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
		goto exit;
	}

	status = __vxge_hw_vpath_reset_check(vpath);
	if (status != VXGE_HW_OK)
		goto exit;

	status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
	if (status != VXGE_HW_OK)
		goto exit;

	status = __vxge_hw_vpath_initialize(hldev, vp_id);
	if (status != VXGE_HW_OK)
		goto exit;

	if (vpath->ringh != NULL)
		__vxge_hw_vpath_prc_configure(hldev, vp_id);

	memset(vpath->hw_stats, 0,
		sizeof(struct vxge_hw_vpath_stats_hw_info));

	memset(vpath->hw_stats_sav, 0,
		sizeof(struct vxge_hw_vpath_stats_hw_info));

	writeq(vpath->stats_block->dma_addr,
		&vpath->vp_reg->stats_cfg);

	status = vxge_hw_vpath_stats_enable(vp);

exit:
	return status;
}

/*
 * vxge_hw_vpath_enable - Enable vpath.
 * This routine clears the vpath reset thereby enabling a vpath
 * to start forwarding frames and generating interrupts.
 */
void
vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
{
	struct __vxge_hw_device *hldev;
	u64 val64;

	hldev = vp->vpath->hldev;

	val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
		1 << (16 - vp->vpath->vp_id));

	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
		&hldev->common_reg->cmn_rsthdlr_cfg1);
}