aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hrtimer.h
blob: d19a5c2d2270ebb9bbe01a167a97875e255da357 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
/*
 *  include/linux/hrtimer.h
 *
 *  hrtimers - High-resolution kernel timers
 *
 *   Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
 *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
 *
 *  data type definitions, declarations, prototypes
 *
 *  Started by: Thomas Gleixner and Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H

#include <linux/rbtree.h>
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/percpu.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>

struct hrtimer_clock_base;
struct hrtimer_cpu_base;

/*
 * Mode arguments of xxx_hrtimer functions:
 */
enum hrtimer_mode {
	HRTIMER_MODE_ABS = 0x0,		/* Time value is absolute */
	HRTIMER_MODE_REL = 0x1,		/* Time value is relative to now */
	HRTIMER_MODE_PINNED = 0x02,	/* Timer is bound to CPU */
	HRTIMER_MODE_ABS_PINNED = 0x02,
	HRTIMER_MODE_REL_PINNED = 0x03,
};

/*
 * Return values for the callback function
 */
enum hrtimer_restart {
	HRTIMER_NORESTART,	/* Timer is not restarted */
	HRTIMER_RESTART,	/* Timer must be restarted */
};

/*
 * Values to track state of the timer
 *
 * Possible states:
 *
 * 0x00		inactive
 * 0x01		enqueued into rbtree
 * 0x02		callback function running
 * 0x04		timer is migrated to another cpu
 *
 * Special cases:
 * 0x03		callback function running and enqueued
 *		(was requeued on another CPU)
 * 0x05		timer was migrated on CPU hotunplug
 *
 * The "callback function running and enqueued" status is only possible on
 * SMP. It happens for example when a posix timer expired and the callback
 * queued a signal. Between dropping the lock which protects the posix timer
 * and reacquiring the base lock of the hrtimer, another CPU can deliver the
 * signal and rearm the timer. We have to preserve the callback running state,
 * as otherwise the timer could be removed before the softirq code finishes the
 * the handling of the timer.
 *
 * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
 * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
 * also affects HRTIMER_STATE_MIGRATE where the preservation is not
 * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
 * enqueued on the new cpu.
 *
 * All state transitions are protected by cpu_base->lock.
 */
#define HRTIMER_STATE_INACTIVE	0x00
#define HRTIMER_STATE_ENQUEUED	0x01
#define HRTIMER_STATE_CALLBACK	0x02
#define HRTIMER_STATE_MIGRATE	0x04

/**
 * struct hrtimer - the basic hrtimer structure
 * @node:	timerqueue node, which also manages node.expires,
 *		the absolute expiry time in the hrtimers internal
 *		representation. The time is related to the clock on
 *		which the timer is based. Is setup by adding
 *		slack to the _softexpires value. For non range timers
 *		identical to _softexpires.
 * @_softexpires: the absolute earliest expiry time of the hrtimer.
 *		The time which was given as expiry time when the timer
 *		was armed.
 * @function:	timer expiry callback function
 * @base:	pointer to the timer base (per cpu and per clock)
 * @state:	state information (See bit values above)
 * @start_site:	timer statistics field to store the site where the timer
 *		was started
 * @start_comm: timer statistics field to store the name of the process which
 *		started the timer
 * @start_pid: timer statistics field to store the pid of the task which
 *		started the timer
 *
 * The hrtimer structure must be initialized by hrtimer_init()
 */
struct hrtimer {
	struct timerqueue_node		node;
	ktime_t				_softexpires;
	enum hrtimer_restart		(*function)(struct hrtimer *);
	struct hrtimer_clock_base	*base;
	unsigned long			state;
#ifdef CONFIG_TIMER_STATS
	int				start_pid;
	void				*start_site;
	char				start_comm[16];
#endif
};

/**
 * struct hrtimer_sleeper - simple sleeper structure
 * @timer:	embedded timer structure
 * @task:	task to wake up
 *
 * task is set to NULL, when the timer expires.
 */
struct hrtimer_sleeper {
	struct hrtimer timer;
	struct task_struct *task;
};

/**
 * struct hrtimer_clock_base - the timer base for a specific clock
 * @cpu_base:		per cpu clock base
 * @index:		clock type index for per_cpu support when moving a
 *			timer to a base on another cpu.
 * @clockid:		clock id for per_cpu support
 * @active:		red black tree root node for the active timers
 * @resolution:		the resolution of the clock, in nanoseconds
 * @get_time:		function to retrieve the current time of the clock
 * @softirq_time:	the time when running the hrtimer queue in the softirq
 * @offset:		offset of this clock to the monotonic base
 */
struct hrtimer_clock_base {
	struct hrtimer_cpu_base	*cpu_base;
	int			index;
	clockid_t		clockid;
	struct timerqueue_head	active;
	ktime_t			resolution;
	ktime_t			(*get_time)(void);
	ktime_t			softirq_time;
	ktime_t			offset;
};

enum  hrtimer_base_type {
	HRTIMER_BASE_MONOTONIC,
	HRTIMER_BASE_REALTIME,
	HRTIMER_BASE_BOOTTIME,
	HRTIMER_BASE_TAI,
	HRTIMER_MAX_CLOCK_BASES,
};

/*
 * struct hrtimer_cpu_base - the per cpu clock bases
 * @lock:		lock protecting the base and associated clock bases
 *			and timers
 * @active_bases:	Bitfield to mark bases with active timers
 * @clock_was_set:	Indicates that clock was set from irq context.
 * @expires_next:	absolute time of the next event which was scheduled
 *			via clock_set_next_event()
 * @hres_active:	State of high resolution mode
 * @hang_detected:	The last hrtimer interrupt detected a hang
 * @nr_events:		Total number of hrtimer interrupt events
 * @nr_retries:		Total number of hrtimer interrupt retries
 * @nr_hangs:		Total number of hrtimer interrupt hangs
 * @max_hang_time:	Maximum time spent in hrtimer_interrupt
 * @clock_base:		array of clock bases for this cpu
 */
struct hrtimer_cpu_base {
	raw_spinlock_t			lock;
	unsigned int			active_bases;
	unsigned int			clock_was_set;
#ifdef CONFIG_HIGH_RES_TIMERS
	ktime_t				expires_next;
	int				hres_active;
	int				hang_detected;
	unsigned long			nr_events;
	unsigned long			nr_retries;
	unsigned long			nr_hangs;
	ktime_t				max_hang_time;
#endif
	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
};

static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
	timer->node.expires = time;
	timer->_softexpires = time;
}

static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
{
	timer->_softexpires = time;
	timer->node.expires = ktime_add_safe(time, delta);
}

static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
{
	timer->_softexpires = time;
	timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}

static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
	timer->node.expires.tv64 = tv64;
	timer->_softexpires.tv64 = tv64;
}

static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
	timer->node.expires = ktime_add_safe(timer->node.expires, time);
	timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}

static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
{
	timer->node.expires = ktime_add_ns(timer->node.expires, ns);
	timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
}

static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
{
	return timer->node.expires;
}

static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
{
	return timer->_softexpires;
}

static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
	return timer->node.expires.tv64;
}
static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
	return timer->_softexpires.tv64;
}

static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
	return ktime_to_ns(timer->node.expires);
}

static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
	return ktime_sub(timer->node.expires, timer->base->get_time());
}

#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;

extern void hrtimer_interrupt(struct clock_event_device *dev);

/*
 * In high resolution mode the time reference must be read accurate
 */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
	return timer->base->get_time();
}

static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
	return timer->base->cpu_base->hres_active;
}

extern void hrtimer_peek_ahead_timers(void);

/*
 * The resolution of the clocks. The resolution value is returned in
 * the clock_getres() system call to give application programmers an
 * idea of the (in)accuracy of timers. Timer values are rounded up to
 * this resolution values.
 */
# define HIGH_RES_NSEC		1
# define KTIME_HIGH_RES		(ktime_t) { .tv64 = HIGH_RES_NSEC }
# define MONOTONIC_RES_NSEC	HIGH_RES_NSEC
# define KTIME_MONOTONIC_RES	KTIME_HIGH_RES

extern void clock_was_set_delayed(void);

#else

# define MONOTONIC_RES_NSEC	LOW_RES_NSEC
# define KTIME_MONOTONIC_RES	KTIME_LOW_RES

static inline void hrtimer_peek_ahead_timers(void) { }

/*
 * In non high resolution mode the time reference is taken from
 * the base softirq time variable.
 */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
	return timer->base->softirq_time;
}

static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
	return 0;
}

static inline void clock_was_set_delayed(void) { }

#endif

extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
#else
static inline void timerfd_clock_was_set(void) { }
#endif
extern void hrtimers_resume(void);

extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
extern ktime_t ktime_get_boottime(void);
extern ktime_t ktime_get_monotonic_offset(void);
extern ktime_t ktime_get_clocktai(void);
extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
					 ktime_t *offs_tai);

DECLARE_PER_CPU(struct tick_device, tick_cpu_device);


/* Exported timer functions: */

/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
			 enum hrtimer_mode mode);

#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
				  enum hrtimer_mode mode);

extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
static inline void hrtimer_init_on_stack(struct hrtimer *timer,
					 clockid_t which_clock,
					 enum hrtimer_mode mode)
{
	hrtimer_init(timer, which_clock, mode);
}
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif

/* Basic timer operations: */
extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
			 const enum hrtimer_mode mode);
extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
			unsigned long range_ns, const enum hrtimer_mode mode);
extern int
__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
			 unsigned long delta_ns,
			 const enum hrtimer_mode mode, int wakeup);

extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);

static inline int hrtimer_start_expires(struct hrtimer *timer,
						enum hrtimer_mode mode)
{
	unsigned long delta;
	ktime_t soft, hard;
	soft = hrtimer_get_softexpires(timer);
	hard = hrtimer_get_expires(timer);
	delta = ktime_to_ns(ktime_sub(hard, soft));
	return hrtimer_start_range_ns(timer, soft, delta, mode);
}

static inline int hrtimer_restart(struct hrtimer *timer)
{
	return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}

/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);

extern ktime_t hrtimer_get_next_event(void);

/*
 * A timer is active, when it is enqueued into the rbtree or the
 * callback function is running or it's in the state of being migrated
 * to another cpu.
 */
static inline int hrtimer_active(const struct hrtimer *timer)
{
	return timer->state != HRTIMER_STATE_INACTIVE;
}

/*
 * Helper function to check, whether the timer is on one of the queues
 */
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
	return timer->state & HRTIMER_STATE_ENQUEUED;
}

/*
 * Helper function to check, whether the timer is running the callback
 * function
 */
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
	return timer->state & HRTIMER_STATE_CALLBACK;
}

/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);

/* Forward a hrtimer so it expires after the hrtimer's current now */
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
				      ktime_t interval)
{
	return hrtimer_forward(timer, timer->base->get_time(), interval);
}

/* Precise sleep: */
extern long hrtimer_nanosleep(struct timespec *rqtp,
			      struct timespec __user *rmtp,
			      const enum hrtimer_mode mode,
			      const clockid_t clockid);
extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);

extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
				 struct task_struct *tsk);

extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
						const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
		unsigned long delta, const enum hrtimer_mode mode, int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);

/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
extern void hrtimer_run_pending(void);

/* Bootup initialization: */
extern void __init hrtimers_init(void);

#if BITS_PER_LONG < 64
extern u64 ktime_divns(const ktime_t kt, s64 div);
#else /* BITS_PER_LONG < 64 */
# define ktime_divns(kt, div)		(u64)((kt).tv64 / (div))
#endif

/* Show pending timers: */
extern void sysrq_timer_list_show(void);

#endif
='#n1328'>1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226








































































































































































































































































































































































































































































































































































































































































































































                                                                                                        
                                                           

































                                                                                             

































































































































































































































































































































































































































































































































                                                                                                                                                                                                                                                                             
                                                                                                    












































                                                                                                                     
                                                                           











                                                                                 
                                                     



















































































                                                                                                                                                                                                   
                                                                                       




                                                                                            
                                                                                         






































































































                                                                                                             
                                                                              




                                                                                 
                                                                                














































                                                                                                                                                             
                                                                              



                                                                                                          
                                                                                



























































































































































































































































































































































































































































































































































































































































                                                                                                                                                                                                                                                                                                                      
                                                    












                                                                          
/*
 * QLogic ISP2x00 SCSI-FCP
 * Written by Erik H. Moe, ehm@cris.com
 * Copyright 1995, Erik H. Moe
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; either version 2, or (at your option) any
 * later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

/* Renamed and updated to 1.3.x by Michael Griffith <grif@cs.ucr.edu> */

/* This is a version of the isp1020 driver which was modified by
 * Chris Loveland <cwl@iol.unh.edu> to support the isp2100 and isp2200
 *
 * Big endian support and dynamic DMA mapping added
 * by Jakub Jelinek <jakub@redhat.com>.
 *
 * Conversion to final pci64 DMA interfaces
 * by David S. Miller <davem@redhat.com>.
 */

/*
 * $Date: 1995/09/22 02:23:15 $
 * $Revision: 0.5 $
 *
 * $Log: isp1020.c,v $
 * Revision 0.5  1995/09/22  02:23:15  root
 * do auto request sense
 *
 * Revision 0.4  1995/08/07  04:44:33  root
 * supply firmware with driver.
 * numerous bug fixes/general cleanup of code.
 *
 * Revision 0.3  1995/07/16  16:15:39  root
 * added reset/abort code.
 *
 * Revision 0.2  1995/06/29  03:14:19  root
 * fixed biosparam.
 * added queue protocol.
 *
 * Revision 0.1  1995/06/25  01:55:45  root
 * Initial release.
 *
 */

#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "scsi.h"
#include <scsi/scsi_host.h>

#define pci64_dma_hi32(a) ((u32) (0xffffffff & (((u64)(a))>>32)))
#define pci64_dma_lo32(a) ((u32) (0xffffffff & (((u64)(a)))))
#define pci64_dma_build(hi,lo) \
	((dma_addr_t)(((u64)(lo))|(((u64)(hi))<<32)))

/*
 * With the qlogic interface, every queue slot can hold a SCSI
 * command with up to 2 scatter/gather entries.  If we need more
 * than 2 entries, continuation entries can be used that hold
 * another 5 entries each.  Unlike for other drivers, this means
 * that the maximum number of scatter/gather entries we can
 * support at any given time is a function of the number of queue
 * slots available.  That is, host->can_queue and host->sg_tablesize
 * are dynamic and _not_ independent.  This all works fine because
 * requests are queued serially and the scatter/gather limit is
 * determined for each queue request anew.
 */

#define DATASEGS_PER_COMMAND 2
#define DATASEGS_PER_CONT 5

#define QLOGICFC_REQ_QUEUE_LEN 255     /* must be power of two - 1 */
#define QLOGICFC_MAX_SG(ql)	(DATASEGS_PER_COMMAND + (((ql) > 0) ? DATASEGS_PER_CONT*((ql) - 1) : 0))
#define QLOGICFC_CMD_PER_LUN    8

/* Configuration section **************************************************** */

/* Set the following macro to 1 to reload the ISP2x00's firmware.  This is
   version 1.17.30 of the isp2100's firmware and version 2.00.40 of the 
   isp2200's firmware. 
*/

#define USE_NVRAM_DEFAULTS      1

#define ISP2x00_PORTDB          1

/* Set the following to 1 to include fabric support, fabric support is 
 * currently not as well tested as the other aspects of the driver */

#define ISP2x00_FABRIC          1

/*  Macros used for debugging */
#define DEBUG_ISP2x00		0
#define DEBUG_ISP2x00_INT	0
#define DEBUG_ISP2x00_INTR	0
#define DEBUG_ISP2x00_SETUP	0
#define DEBUG_ISP2x00_FABRIC    0
#define TRACE_ISP 		0 


#define DEFAULT_LOOP_COUNT	1000000000

#define ISP_TIMEOUT (2*HZ)
/* End Configuration section ************************************************ */

#include <linux/module.h>

#if TRACE_ISP

#define TRACE_BUF_LEN	(32*1024)

struct {
	u_long next;
	struct {
		u_long time;
		u_int index;
		u_int addr;
		u_char *name;
	} buf[TRACE_BUF_LEN];
} trace;

#define TRACE(w, i, a)						\
{								\
	unsigned long flags;					\
								\
	save_flags(flags);					\
	cli();							\
	trace.buf[trace.next].name  = (w);			\
	trace.buf[trace.next].time  = jiffies;			\
	trace.buf[trace.next].index = (i);			\
	trace.buf[trace.next].addr  = (long) (a);		\
	trace.next = (trace.next + 1) & (TRACE_BUF_LEN - 1);	\
	restore_flags(flags);					\
}

#else
#define TRACE(w, i, a)
#endif

#if DEBUG_ISP2x00_FABRIC
#define DEBUG_FABRIC(x)	x
#else
#define DEBUG_FABRIC(x)
#endif				/* DEBUG_ISP2x00_FABRIC */


#if DEBUG_ISP2x00
#define ENTER(x)	printk("isp2x00 : entering %s()\n", x);
#define LEAVE(x)	printk("isp2x00 : leaving %s()\n", x);
#define DEBUG(x)	x
#else
#define ENTER(x)
#define LEAVE(x)
#define DEBUG(x)
#endif				/* DEBUG_ISP2x00 */

#if DEBUG_ISP2x00_INTR
#define ENTER_INTR(x)	printk("isp2x00 : entering %s()\n", x);
#define LEAVE_INTR(x)	printk("isp2x00 : leaving %s()\n", x);
#define DEBUG_INTR(x)	x
#else
#define ENTER_INTR(x)
#define LEAVE_INTR(x)
#define DEBUG_INTR(x)
#endif				/* DEBUG ISP2x00_INTR */


#define ISP2100_REV_ID1	       1
#define ISP2100_REV_ID3        3
#define ISP2200_REV_ID5        5

/* host configuration and control registers */
#define HOST_HCCR	0xc0	/* host command and control */

/* pci bus interface registers */
#define FLASH_BIOS_ADDR	0x00
#define FLASH_BIOS_DATA	0x02
#define ISP_CTRL_STATUS	0x06	/* configuration register #1 */
#define PCI_INTER_CTL	0x08	/* pci interrupt control */
#define PCI_INTER_STS	0x0a	/* pci interrupt status */
#define PCI_SEMAPHORE	0x0c	/* pci semaphore */
#define PCI_NVRAM	0x0e	/* pci nvram interface */

/* mailbox registers */
#define MBOX0		0x10	/* mailbox 0 */
#define MBOX1		0x12	/* mailbox 1 */
#define MBOX2		0x14	/* mailbox 2 */
#define MBOX3		0x16	/* mailbox 3 */
#define MBOX4		0x18	/* mailbox 4 */
#define MBOX5		0x1a	/* mailbox 5 */
#define MBOX6		0x1c	/* mailbox 6 */
#define MBOX7		0x1e	/* mailbox 7 */

/* mailbox command complete status codes */
#define MBOX_COMMAND_COMPLETE		0x4000
#define INVALID_COMMAND			0x4001
#define HOST_INTERFACE_ERROR		0x4002
#define TEST_FAILED			0x4003
#define COMMAND_ERROR			0x4005
#define COMMAND_PARAM_ERROR		0x4006
#define PORT_ID_USED                    0x4007
#define LOOP_ID_USED                    0x4008
#define ALL_IDS_USED                    0x4009

/* async event status codes */
#define RESET_DETECTED  		0x8001
#define SYSTEM_ERROR			0x8002
#define REQUEST_TRANSFER_ERROR		0x8003
#define RESPONSE_TRANSFER_ERROR		0x8004
#define REQUEST_QUEUE_WAKEUP		0x8005
#define LIP_OCCURRED                     0x8010
#define LOOP_UP                         0x8011
#define LOOP_DOWN                       0x8012
#define LIP_RECEIVED                    0x8013
#define PORT_DB_CHANGED                 0x8014
#define CHANGE_NOTIFICATION             0x8015
#define SCSI_COMMAND_COMPLETE           0x8020
#define POINT_TO_POINT_UP               0x8030
#define CONNECTION_MODE                 0x8036

struct Entry_header {
	u_char entry_type;
	u_char entry_cnt;
	u_char sys_def_1;
	u_char flags;
};

/* entry header type commands */
#define ENTRY_COMMAND		0x19
#define ENTRY_CONTINUATION	0x0a

#define ENTRY_STATUS		0x03
#define ENTRY_MARKER		0x04


/* entry header flag definitions */
#define EFLAG_BUSY		2
#define EFLAG_BAD_HEADER	4
#define EFLAG_BAD_PAYLOAD	8

struct dataseg {
	u_int d_base;
	u_int d_base_hi;
	u_int d_count;
};

struct Command_Entry {
	struct Entry_header hdr;
	u_int handle;
	u_char target_lun;
	u_char target_id;
	u_short expanded_lun;
	u_short control_flags;
	u_short rsvd2;
	u_short time_out;
	u_short segment_cnt;
	u_char cdb[16];
	u_int total_byte_cnt;
	struct dataseg dataseg[DATASEGS_PER_COMMAND];
};

/* command entry control flag definitions */
#define CFLAG_NODISC		0x01
#define CFLAG_HEAD_TAG		0x02
#define CFLAG_ORDERED_TAG	0x04
#define CFLAG_SIMPLE_TAG	0x08
#define CFLAG_TAR_RTN		0x10
#define CFLAG_READ		0x20
#define CFLAG_WRITE		0x40

struct Continuation_Entry {
	struct Entry_header hdr;
	struct dataseg dataseg[DATASEGS_PER_CONT];
};

struct Marker_Entry {
	struct Entry_header hdr;
	u_int reserved;
	u_char target_lun;
	u_char target_id;
	u_char modifier;
	u_char expanded_lun;
	u_char rsvds[52];
};

/* marker entry modifier definitions */
#define SYNC_DEVICE	0
#define SYNC_TARGET	1
#define SYNC_ALL	2

struct Status_Entry {
	struct Entry_header hdr;
	u_int handle;
	u_short scsi_status;
	u_short completion_status;
	u_short state_flags;
	u_short status_flags;
	u_short res_info_len;
	u_short req_sense_len;
	u_int residual;
	u_char res_info[8];
	u_char req_sense_data[32];
};

/* status entry completion status definitions */
#define CS_COMPLETE			0x0000
#define CS_DMA_ERROR			0x0002
#define CS_RESET_OCCURRED		0x0004
#define CS_ABORTED			0x0005
#define CS_TIMEOUT			0x0006
#define CS_DATA_OVERRUN			0x0007
#define CS_DATA_UNDERRUN		0x0015
#define CS_QUEUE_FULL			0x001c
#define CS_PORT_UNAVAILABLE             0x0028
#define CS_PORT_LOGGED_OUT              0x0029
#define CS_PORT_CONFIG_CHANGED		0x002a

/* status entry state flag definitions */
#define SF_SENT_CDB			0x0400
#define SF_TRANSFERRED_DATA		0x0800
#define SF_GOT_STATUS			0x1000

/* status entry status flag definitions */
#define STF_BUS_RESET			0x0008
#define STF_DEVICE_RESET		0x0010
#define STF_ABORTED			0x0020
#define STF_TIMEOUT			0x0040

/* interrupt control commands */
#define ISP_EN_INT			0x8000
#define ISP_EN_RISC			0x0008

/* host control commands */
#define HCCR_NOP			0x0000
#define HCCR_RESET			0x1000
#define HCCR_PAUSE			0x2000
#define HCCR_RELEASE			0x3000
#define HCCR_SINGLE_STEP		0x4000
#define HCCR_SET_HOST_INTR		0x5000
#define HCCR_CLEAR_HOST_INTR		0x6000
#define HCCR_CLEAR_RISC_INTR		0x7000
#define HCCR_BP_ENABLE			0x8000
#define HCCR_BIOS_DISABLE		0x9000
#define HCCR_TEST_MODE			0xf000

#define RISC_BUSY			0x0004

/* mailbox commands */
#define MBOX_NO_OP			0x0000
#define MBOX_LOAD_RAM			0x0001
#define MBOX_EXEC_FIRMWARE		0x0002
#define MBOX_DUMP_RAM			0x0003
#define MBOX_WRITE_RAM_WORD		0x0004
#define MBOX_READ_RAM_WORD		0x0005
#define MBOX_MAILBOX_REG_TEST		0x0006
#define MBOX_VERIFY_CHECKSUM		0x0007
#define MBOX_ABOUT_FIRMWARE		0x0008
#define MBOX_LOAD_RISC_RAM              0x0009
#define MBOX_DUMP_RISC_RAM              0x000a
#define MBOX_CHECK_FIRMWARE		0x000e
#define MBOX_INIT_REQ_QUEUE		0x0010
#define MBOX_INIT_RES_QUEUE		0x0011
#define MBOX_EXECUTE_IOCB		0x0012
#define MBOX_WAKE_UP			0x0013
#define MBOX_STOP_FIRMWARE		0x0014
#define MBOX_ABORT_IOCB			0x0015
#define MBOX_ABORT_DEVICE		0x0016
#define MBOX_ABORT_TARGET		0x0017
#define MBOX_BUS_RESET			0x0018
#define MBOX_STOP_QUEUE			0x0019
#define MBOX_START_QUEUE		0x001a
#define MBOX_SINGLE_STEP_QUEUE		0x001b
#define MBOX_ABORT_QUEUE		0x001c
#define MBOX_GET_DEV_QUEUE_STATUS	0x001d
#define MBOX_GET_FIRMWARE_STATUS	0x001f
#define MBOX_GET_INIT_SCSI_ID		0x0020
#define MBOX_GET_RETRY_COUNT		0x0022
#define MBOX_GET_TARGET_PARAMS		0x0028
#define MBOX_GET_DEV_QUEUE_PARAMS	0x0029
#define MBOX_SET_RETRY_COUNT		0x0032
#define MBOX_SET_TARGET_PARAMS		0x0038
#define MBOX_SET_DEV_QUEUE_PARAMS	0x0039
#define MBOX_EXECUTE_IOCB64             0x0054
#define MBOX_INIT_FIRMWARE              0x0060
#define MBOX_GET_INIT_CB                0x0061
#define MBOX_INIT_LIP			0x0062
#define MBOX_GET_POS_MAP                0x0063
#define MBOX_GET_PORT_DB                0x0064
#define MBOX_CLEAR_ACA                  0x0065
#define MBOX_TARGET_RESET               0x0066
#define MBOX_CLEAR_TASK_SET             0x0067
#define MBOX_ABORT_TASK_SET             0x0068
#define MBOX_GET_FIRMWARE_STATE         0x0069
#define MBOX_GET_PORT_NAME              0x006a
#define MBOX_SEND_SNS                   0x006e
#define MBOX_PORT_LOGIN                 0x006f
#define MBOX_SEND_CHANGE_REQUEST        0x0070
#define MBOX_PORT_LOGOUT                0x0071

/*
 *	Firmware if needed (note this is a hack, it belongs in a separate
 *	module.
 */
 
#ifdef CONFIG_SCSI_QLOGIC_FC_FIRMWARE
#include "qlogicfc_asm.c"
#else
static unsigned short risc_code_addr01 = 0x1000 ;
#endif

/* Each element in mbox_param is an 8 bit bitmap where each bit indicates
   if that mbox should be copied as input.  For example 0x2 would mean
   only copy mbox1. */

static const u_char mbox_param[] =
{
	0x01,			/* MBOX_NO_OP */
	0x1f,			/* MBOX_LOAD_RAM */
	0x03,			/* MBOX_EXEC_FIRMWARE */
	0x1f,			/* MBOX_DUMP_RAM */
	0x07,			/* MBOX_WRITE_RAM_WORD */
	0x03,			/* MBOX_READ_RAM_WORD */
	0xff,			/* MBOX_MAILBOX_REG_TEST */
	0x03,			/* MBOX_VERIFY_CHECKSUM */
	0x01,			/* MBOX_ABOUT_FIRMWARE */
	0xff,			/* MBOX_LOAD_RISC_RAM */
	0xff,			/* MBOX_DUMP_RISC_RAM */
	0x00,			/* 0x000b */
	0x00,			/* 0x000c */
	0x00,			/* 0x000d */
	0x01,			/* MBOX_CHECK_FIRMWARE */
	0x00,			/* 0x000f */
	0x1f,			/* MBOX_INIT_REQ_QUEUE */
	0x2f,			/* MBOX_INIT_RES_QUEUE */
	0x0f,			/* MBOX_EXECUTE_IOCB */
	0x03,			/* MBOX_WAKE_UP */
	0x01,			/* MBOX_STOP_FIRMWARE */
	0x0f,			/* MBOX_ABORT_IOCB */
	0x03,			/* MBOX_ABORT_DEVICE */
	0x07,			/* MBOX_ABORT_TARGET */
	0x03,			/* MBOX_BUS_RESET */
	0x03,			/* MBOX_STOP_QUEUE */
	0x03,			/* MBOX_START_QUEUE */
	0x03,			/* MBOX_SINGLE_STEP_QUEUE */
	0x03,			/* MBOX_ABORT_QUEUE */
	0x03,			/* MBOX_GET_DEV_QUEUE_STATUS */
	0x00,			/* 0x001e */
	0x01,			/* MBOX_GET_FIRMWARE_STATUS */
	0x01,			/* MBOX_GET_INIT_SCSI_ID */
	0x00,			/* 0x0021 */
	0x01,			/* MBOX_GET_RETRY_COUNT */
	0x00,			/* 0x0023 */
	0x00,			/* 0x0024 */
	0x00,			/* 0x0025 */
	0x00,			/* 0x0026 */
	0x00,			/* 0x0027 */
	0x03,			/* MBOX_GET_TARGET_PARAMS */
	0x03,			/* MBOX_GET_DEV_QUEUE_PARAMS */
	0x00,			/* 0x002a */
	0x00,			/* 0x002b */
	0x00,			/* 0x002c */
	0x00,			/* 0x002d */
	0x00,			/* 0x002e */
	0x00,			/* 0x002f */
	0x00,			/* 0x0030 */
	0x00,			/* 0x0031 */
	0x07,			/* MBOX_SET_RETRY_COUNT */
	0x00,			/* 0x0033 */
	0x00,			/* 0x0034 */
	0x00,			/* 0x0035 */
	0x00,			/* 0x0036 */
	0x00,			/* 0x0037 */
	0x0f,			/* MBOX_SET_TARGET_PARAMS */
	0x0f,			/* MBOX_SET_DEV_QUEUE_PARAMS */
	0x00,			/* 0x003a */
	0x00,			/* 0x003b */
	0x00,			/* 0x003c */
	0x00,			/* 0x003d */
	0x00,			/* 0x003e */
	0x00,			/* 0x003f */
	0x00,			/* 0x0040 */
	0x00,			/* 0x0041 */
	0x00,			/* 0x0042 */
	0x00,			/* 0x0043 */
	0x00,			/* 0x0044 */
	0x00,			/* 0x0045 */
	0x00,			/* 0x0046 */
	0x00,			/* 0x0047 */
	0x00,			/* 0x0048 */
	0x00,			/* 0x0049 */
	0x00,			/* 0x004a */
	0x00,			/* 0x004b */
	0x00,			/* 0x004c */
	0x00,			/* 0x004d */
	0x00,			/* 0x004e */
	0x00,			/* 0x004f */
	0x00,			/* 0x0050 */
	0x00,			/* 0x0051 */
	0x00,			/* 0x0052 */
	0x00,			/* 0x0053 */
	0xcf,			/* MBOX_EXECUTE_IOCB64 */
	0x00,			/* 0x0055 */
	0x00,			/* 0x0056 */
	0x00,			/* 0x0057 */
	0x00,			/* 0x0058 */
	0x00,			/* 0x0059 */
	0x00,			/* 0x005a */
	0x00,			/* 0x005b */
	0x00,			/* 0x005c */
	0x00,			/* 0x005d */
	0x00,			/* 0x005e */
	0x00,			/* 0x005f */
	0xff,			/* MBOX_INIT_FIRMWARE */
	0xcd,			/* MBOX_GET_INIT_CB */
	0x01,			/* MBOX_INIT_LIP */
	0xcd,			/* MBOX_GET_POS_MAP */
	0xcf,			/* MBOX_GET_PORT_DB */
	0x03,			/* MBOX_CLEAR_ACA */
	0x03,			/* MBOX_TARGET_RESET */
	0x03,			/* MBOX_CLEAR_TASK_SET */
	0x03,			/* MBOX_ABORT_TASK_SET */
	0x01,			/* MBOX_GET_FIRMWARE_STATE */
	0x03,			/* MBOX_GET_PORT_NAME */
	0x00,			/* 0x006b */
	0x00,			/* 0x006c */
	0x00,			/* 0x006d */
	0xcf,			/* MBOX_SEND_SNS */
	0x0f,			/* MBOX_PORT_LOGIN */
	0x03,			/* MBOX_SEND_CHANGE_REQUEST */
	0x03,			/* MBOX_PORT_LOGOUT */
};

#define MAX_MBOX_COMMAND	(sizeof(mbox_param)/sizeof(u_short))


struct id_name_map {
	u64 wwn;
	u_char loop_id;
};

struct sns_cb {
	u_short len;
	u_short res1;
	u_int response_low;
	u_int response_high;
	u_short sub_len;
	u_short res2;
	u_char data[44];
};

/* address of instance of this struct is passed to adapter to initialize things
 */
struct init_cb {
	u_char version;
	u_char reseverd1[1];
	u_short firm_opts;
	u_short max_frame_len;
	u_short max_iocb;
	u_short exec_throttle;
	u_char retry_cnt;
	u_char retry_delay;
	u_short node_name[4];
	u_short hard_addr;
	u_char reserved2[10];
	u_short req_queue_out;
	u_short res_queue_in;
	u_short req_queue_len;
	u_short res_queue_len;
	u_int req_queue_addr_lo;
	u_int req_queue_addr_high;
	u_int res_queue_addr_lo;
	u_int res_queue_addr_high;
        /* the rest of this structure only applies to the isp2200 */
        u_short lun_enables;
        u_char cmd_resource_cnt;
        u_char notify_resource_cnt;
        u_short timeout;
        u_short reserved3;
        u_short add_firm_opts;
        u_char res_accum_timer;
        u_char irq_delay_timer;
        u_short special_options;
        u_short reserved4[13];
};

/*
 * The result queue can be quite a bit smaller since continuation entries
 * do not show up there:
 */
#define RES_QUEUE_LEN		((QLOGICFC_REQ_QUEUE_LEN + 1) / 8 - 1)
#define QUEUE_ENTRY_LEN		64

#if ISP2x00_FABRIC
#define QLOGICFC_MAX_ID    0xff
#else
#define QLOGICFC_MAX_ID    0x7d
#endif

#define QLOGICFC_MAX_LUN	128
#define QLOGICFC_MAX_LOOP_ID	0x7d

/* the following connection options only apply to the 2200.  i have only
 * had success with LOOP_ONLY and P2P_ONLY.
 */

#define LOOP_ONLY              0
#define P2P_ONLY               1
#define LOOP_PREFERED          2
#define P2P_PREFERED           3

#define CONNECTION_PREFERENCE  LOOP_ONLY

/* adapter_state values */
#define AS_FIRMWARE_DEAD      -1
#define AS_LOOP_DOWN           0
#define AS_LOOP_GOOD           1
#define AS_REDO_FABRIC_PORTDB  2
#define AS_REDO_LOOP_PORTDB    4

#define RES_SIZE	((RES_QUEUE_LEN + 1)*QUEUE_ENTRY_LEN)
#define REQ_SIZE	((QLOGICFC_REQ_QUEUE_LEN + 1)*QUEUE_ENTRY_LEN)

struct isp2x00_hostdata {
	u_char revision;
	struct pci_dev *pci_dev;
	/* result and request queues (shared with isp2x00): */
	u_int req_in_ptr;	/* index of next request slot */
	u_int res_out_ptr;	/* index of next result slot */

	/* this is here so the queues are nicely aligned */
	long send_marker;	/* do we need to send a marker? */

	char * res;
	char * req;
	struct init_cb control_block;
	int adapter_state;
	unsigned long int tag_ages[QLOGICFC_MAX_ID + 1];
	Scsi_Cmnd *handle_ptrs[QLOGICFC_REQ_QUEUE_LEN + 1];
	unsigned long handle_serials[QLOGICFC_REQ_QUEUE_LEN + 1];
	struct id_name_map port_db[QLOGICFC_MAX_ID + 1];
	u_char mbox_done;
	u64 wwn;
	u_int port_id;
	u_char queued;
	u_char host_id;
        struct timer_list explore_timer;
	struct id_name_map tempmap[QLOGICFC_MAX_ID + 1];
};


/* queue length's _must_ be power of two: */
#define QUEUE_DEPTH(in, out, ql)	((in - out) & (ql))
#define REQ_QUEUE_DEPTH(in, out)	QUEUE_DEPTH(in, out, 		     \
						    QLOGICFC_REQ_QUEUE_LEN)
#define RES_QUEUE_DEPTH(in, out)	QUEUE_DEPTH(in, out, RES_QUEUE_LEN)

static void isp2x00_enable_irqs(struct Scsi_Host *);
static void isp2x00_disable_irqs(struct Scsi_Host *);
static int isp2x00_init(struct Scsi_Host *);
static int isp2x00_reset_hardware(struct Scsi_Host *);
static int isp2x00_mbox_command(struct Scsi_Host *, u_short[]);
static int isp2x00_return_status(Scsi_Cmnd *, struct Status_Entry *);
static void isp2x00_intr_handler(int, void *, struct pt_regs *);
static irqreturn_t do_isp2x00_intr_handler(int, void *, struct pt_regs *);
static int isp2x00_make_portdb(struct Scsi_Host *);

#if ISP2x00_FABRIC
static int isp2x00_init_fabric(struct Scsi_Host *, struct id_name_map *, int);
#endif

#if USE_NVRAM_DEFAULTS
static int isp2x00_get_nvram_defaults(struct Scsi_Host *, struct init_cb *);
static u_short isp2x00_read_nvram_word(struct Scsi_Host *, u_short);
#endif

#if DEBUG_ISP2x00
static void isp2x00_print_scsi_cmd(Scsi_Cmnd *);
#endif

#if DEBUG_ISP2x00_INTR
static void isp2x00_print_status_entry(struct Status_Entry *);
#endif

static inline void isp2x00_enable_irqs(struct Scsi_Host *host)
{
	outw(ISP_EN_INT | ISP_EN_RISC, host->io_port + PCI_INTER_CTL);
}


static inline void isp2x00_disable_irqs(struct Scsi_Host *host)
{
	outw(0x0, host->io_port + PCI_INTER_CTL);
}


static int isp2x00_detect(struct scsi_host_template * tmpt)
{
	int hosts = 0;
	unsigned long wait_time;
	struct Scsi_Host *host = NULL;
	struct isp2x00_hostdata *hostdata;
	struct pci_dev *pdev;
	unsigned short device_ids[2];
	dma_addr_t busaddr;
	int i;


	ENTER("isp2x00_detect");

       	device_ids[0] = PCI_DEVICE_ID_QLOGIC_ISP2100;
	device_ids[1] = PCI_DEVICE_ID_QLOGIC_ISP2200;

	tmpt->proc_name = "isp2x00";

	for (i=0; i<2; i++){
		pdev = NULL;
	        while ((pdev = pci_find_device(PCI_VENDOR_ID_QLOGIC, device_ids[i], pdev))) {
			if (pci_enable_device(pdev))
				continue;

			/* Try to configure DMA attributes. */
			if (pci_set_dma_mask(pdev, 0xffffffffffffffffULL) &&
			    pci_set_dma_mask(pdev, 0xffffffffULL))
					continue;

		        host = scsi_register(tmpt, sizeof(struct isp2x00_hostdata));
			if (!host) {
			        printk("qlogicfc%d : could not register host.\n", hosts);
				continue;
			}
			host->max_id = QLOGICFC_MAX_ID + 1;
			host->max_lun = QLOGICFC_MAX_LUN;
			hostdata = (struct isp2x00_hostdata *) host->hostdata;

			memset(hostdata, 0, sizeof(struct isp2x00_hostdata));
			hostdata->pci_dev = pdev;
			hostdata->res = pci_alloc_consistent(pdev, RES_SIZE + REQ_SIZE, &busaddr);

			if (!hostdata->res){
			        printk("qlogicfc%d : could not allocate memory for request and response queue.\n", hosts);
			        scsi_unregister(host);
				continue;
			}
			hostdata->req = hostdata->res + (RES_QUEUE_LEN + 1)*QUEUE_ENTRY_LEN;
			hostdata->queued = 0;
			/* set up the control block */
			hostdata->control_block.version = 0x1;
			hostdata->control_block.firm_opts = cpu_to_le16(0x800e);
			hostdata->control_block.max_frame_len = cpu_to_le16(2048);
			hostdata->control_block.max_iocb = cpu_to_le16(QLOGICFC_REQ_QUEUE_LEN);
			hostdata->control_block.exec_throttle = cpu_to_le16(QLOGICFC_CMD_PER_LUN);
			hostdata->control_block.retry_delay = 5;
			hostdata->control_block.retry_cnt = 1;
			hostdata->control_block.node_name[0] = cpu_to_le16(0x0020);
			hostdata->control_block.node_name[1] = cpu_to_le16(0xE000);
			hostdata->control_block.node_name[2] = cpu_to_le16(0x008B);
			hostdata->control_block.node_name[3] = cpu_to_le16(0x0000);
			hostdata->control_block.hard_addr = cpu_to_le16(0x0003);
			hostdata->control_block.req_queue_len = cpu_to_le16(QLOGICFC_REQ_QUEUE_LEN + 1);
			hostdata->control_block.res_queue_len = cpu_to_le16(RES_QUEUE_LEN + 1);
			hostdata->control_block.res_queue_addr_lo = cpu_to_le32(pci64_dma_lo32(busaddr));
			hostdata->control_block.res_queue_addr_high = cpu_to_le32(pci64_dma_hi32(busaddr));
			hostdata->control_block.req_queue_addr_lo = cpu_to_le32(pci64_dma_lo32(busaddr + RES_SIZE));
			hostdata->control_block.req_queue_addr_high = cpu_to_le32(pci64_dma_hi32(busaddr + RES_SIZE));


			hostdata->control_block.add_firm_opts |= cpu_to_le16(CONNECTION_PREFERENCE<<4);
			hostdata->adapter_state = AS_LOOP_DOWN;
			hostdata->explore_timer.data = 1;
			hostdata->host_id = hosts;

			if (isp2x00_init(host) || isp2x00_reset_hardware(host)) {
				pci_free_consistent (pdev, RES_SIZE + REQ_SIZE, hostdata->res, busaddr);
			        scsi_unregister(host);
				continue;
			}
			host->this_id = 0;

			if (request_irq(host->irq, do_isp2x00_intr_handler, SA_INTERRUPT | SA_SHIRQ, "qlogicfc", host)) {
			        printk("qlogicfc%d : interrupt %d already in use\n",
				       hostdata->host_id, host->irq);
				pci_free_consistent (pdev, RES_SIZE + REQ_SIZE, hostdata->res, busaddr);
				scsi_unregister(host);
				continue;
			}
			if (!request_region(host->io_port, 0xff, "qlogicfc")) {
			        printk("qlogicfc%d : i/o region 0x%lx-0x%lx already "
				       "in use\n",
				       hostdata->host_id, host->io_port, host->io_port + 0xff);
				free_irq(host->irq, host);
				pci_free_consistent (pdev, RES_SIZE + REQ_SIZE, hostdata->res, busaddr);
				scsi_unregister(host);
				continue;
			}

			outw(0x0, host->io_port + PCI_SEMAPHORE);
			outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
			isp2x00_enable_irqs(host);
			/* wait for the loop to come up */
			for (wait_time = jiffies + 10 * HZ; time_before(jiffies, wait_time) && hostdata->adapter_state == AS_LOOP_DOWN;) {
			        barrier();
				cpu_relax();
			}
			if (hostdata->adapter_state == AS_LOOP_DOWN) {
			        printk("qlogicfc%d : link is not up\n", hostdata->host_id);
			}
			hosts++;
			hostdata->explore_timer.data = 0;
		}
	}


	/* this busy loop should not be needed but the isp2x00 seems to need 
	   some time before recognizing it is attached to a fabric */

#if ISP2x00_FABRIC
	if (hosts) {
		for (wait_time = jiffies + 5 * HZ; time_before(jiffies, wait_time);) {
			barrier();
			cpu_relax();
		}
	}
#endif

	LEAVE("isp2x00_detect");

	return hosts;
}


static int isp2x00_make_portdb(struct Scsi_Host *host)
{

	short param[8];
	int i, j;
	struct isp2x00_hostdata *hostdata;

	isp2x00_disable_irqs(host);

	hostdata = (struct isp2x00_hostdata *) host->hostdata;
	memset(hostdata->tempmap, 0, sizeof(hostdata->tempmap));

#if ISP2x00_FABRIC
	for (i = 0x81; i < QLOGICFC_MAX_ID; i++) {
		param[0] = MBOX_PORT_LOGOUT;
		param[1] = i << 8;
		param[2] = 0;
		param[3] = 0;

		isp2x00_mbox_command(host, param);

		if (param[0] != MBOX_COMMAND_COMPLETE) {

			DEBUG_FABRIC(printk("qlogicfc%d : logout failed %x  %x\n", hostdata->host_id, i, param[0]));
		}
	}
#endif


	param[0] = MBOX_GET_INIT_SCSI_ID;

	isp2x00_mbox_command(host, param);

	if (param[0] == MBOX_COMMAND_COMPLETE) {
		hostdata->port_id = ((u_int) param[3]) << 16;
		hostdata->port_id |= param[2];
		hostdata->tempmap[0].loop_id = param[1];
		hostdata->tempmap[0].wwn = hostdata->wwn;
	}
	else {
	        printk("qlogicfc%d : error getting scsi id.\n", hostdata->host_id);
	}

        for (i = 0; i <=QLOGICFC_MAX_ID; i++)
                hostdata->tempmap[i].loop_id = hostdata->tempmap[0].loop_id;
   
        for (i = 0, j = 1; i <= QLOGICFC_MAX_LOOP_ID; i++) {
                param[0] = MBOX_GET_PORT_NAME;
		param[1] = (i << 8) & 0xff00;

		isp2x00_mbox_command(host, param);

		if (param[0] == MBOX_COMMAND_COMPLETE) {
			hostdata->tempmap[j].loop_id = i;
			hostdata->tempmap[j].wwn = ((u64) (param[2] & 0xff)) << 56;
			hostdata->tempmap[j].wwn |= ((u64) ((param[2] >> 8) & 0xff)) << 48;
			hostdata->tempmap[j].wwn |= ((u64) (param[3] & 0xff)) << 40;
			hostdata->tempmap[j].wwn |= ((u64) ((param[3] >> 8) & 0xff)) << 32;
			hostdata->tempmap[j].wwn |= ((u64) (param[6] & 0xff)) << 24;
			hostdata->tempmap[j].wwn |= ((u64) ((param[6] >> 8) & 0xff)) << 16;
			hostdata->tempmap[j].wwn |= ((u64) (param[7] & 0xff)) << 8;
			hostdata->tempmap[j].wwn |= ((u64) ((param[7] >> 8) & 0xff));

			j++;

		}
	}


#if ISP2x00_FABRIC
	isp2x00_init_fabric(host, hostdata->tempmap, j);
#endif

	for (i = 0; i <= QLOGICFC_MAX_ID; i++) {
		if (hostdata->tempmap[i].wwn != hostdata->port_db[i].wwn) {
			for (j = 0; j <= QLOGICFC_MAX_ID; j++) {
				if (hostdata->tempmap[j].wwn == hostdata->port_db[i].wwn) {
					hostdata->port_db[i].loop_id = hostdata->tempmap[j].loop_id;
					break;
				}
			}
			if (j == QLOGICFC_MAX_ID + 1)
				hostdata->port_db[i].loop_id = hostdata->tempmap[0].loop_id;

			for (j = 0; j <= QLOGICFC_MAX_ID; j++) {
				if (hostdata->port_db[j].wwn == hostdata->tempmap[i].wwn || !hostdata->port_db[j].wwn) {
					break;
				}
			}
			if (j == QLOGICFC_MAX_ID + 1)
				printk("qlogicfc%d : Too many scsi devices, no more room in port map.\n", hostdata->host_id);
			if (!hostdata->port_db[j].wwn) {
				hostdata->port_db[j].loop_id = hostdata->tempmap[i].loop_id;
				hostdata->port_db[j].wwn = hostdata->tempmap[i].wwn;
			}
		} else
			hostdata->port_db[i].loop_id = hostdata->tempmap[i].loop_id;

	}

	isp2x00_enable_irqs(host);

	return 0;
}


#if ISP2x00_FABRIC

#define FABRIC_PORT          0x7e
#define FABRIC_CONTROLLER    0x7f
#define FABRIC_SNS           0x80

int isp2x00_init_fabric(struct Scsi_Host *host, struct id_name_map *port_db, int cur_scsi_id)
{

	u_short param[8];
	u64 wwn;
	int done = 0;
	u_short loop_id = 0x81;
	u_short scsi_id = cur_scsi_id;
	u_int port_id;
	struct sns_cb *req;
	u_char *sns_response;
	dma_addr_t busaddr;
	struct isp2x00_hostdata *hostdata;

	hostdata = (struct isp2x00_hostdata *) host->hostdata;
	
	DEBUG_FABRIC(printk("qlogicfc%d : Checking for a fabric.\n", hostdata->host_id));
	param[0] = MBOX_GET_PORT_NAME;
	param[1] = (u16)FABRIC_PORT << 8;

	isp2x00_mbox_command(host, param);

	if (param[0] != MBOX_COMMAND_COMPLETE) {
		DEBUG_FABRIC(printk("qlogicfc%d : fabric check result %x\n", hostdata->host_id, param[0]));
		return 0;
	}
	printk("qlogicfc%d : Fabric found.\n", hostdata->host_id);

	req = (struct sns_cb *)pci_alloc_consistent(hostdata->pci_dev, sizeof(*req) + 608, &busaddr);
	
	if (!req){
		printk("qlogicfc%d : Could not allocate DMA resources for fabric initialization\n", hostdata->host_id);
		return 0;
	}
	sns_response = (u_char *)(req + 1);

	if (hostdata->adapter_state & AS_REDO_LOOP_PORTDB){
	        memset(req, 0, sizeof(*req));
	
		req->len = cpu_to_le16(8);
		req->response_low = cpu_to_le32(pci64_dma_lo32(busaddr + sizeof(*req)));
		req->response_high = cpu_to_le32(pci64_dma_hi32(busaddr + sizeof(*req)));
		req->sub_len = cpu_to_le16(22);
		req->data[0] = 0x17;
		req->data[1] = 0x02;
		req->data[8] = (u_char) (hostdata->port_id & 0xff);
		req->data[9] = (u_char) (hostdata->port_id >> 8 & 0xff);
		req->data[10] = (u_char) (hostdata->port_id >> 16 & 0xff);
		req->data[13] = 0x01;
		param[0] = MBOX_SEND_SNS;
		param[1] = 30;
		param[2] = pci64_dma_lo32(busaddr) >> 16;
		param[3] = pci64_dma_lo32(busaddr);
		param[6] = pci64_dma_hi32(busaddr) >> 16;
		param[7] = pci64_dma_hi32(busaddr);

		isp2x00_mbox_command(host, param);
	
		if (param[0] != MBOX_COMMAND_COMPLETE)
		        printk("qlogicfc%d : error sending RFC-4\n", hostdata->host_id);
	}

	port_id = hostdata->port_id;
	while (!done) {
		memset(req, 0, sizeof(*req));

		req->len = cpu_to_le16(304);
		req->response_low = cpu_to_le32(pci64_dma_lo32(busaddr + sizeof(*req)));
		req->response_high = cpu_to_le32(pci64_dma_hi32(busaddr + sizeof(*req)));
		req->sub_len = cpu_to_le16(6);
		req->data[0] = 0x00;
		req->data[1] = 0x01;
		req->data[8] = (u_char) (port_id & 0xff);
		req->data[9] = (u_char) (port_id >> 8 & 0xff);
		req->data[10] = (u_char) (port_id >> 16 & 0xff);

		param[0] = MBOX_SEND_SNS;
		param[1] = 14;
		param[2] = pci64_dma_lo32(busaddr) >> 16;
		param[3] = pci64_dma_lo32(busaddr);
		param[6] = pci64_dma_hi32(busaddr) >> 16;
		param[7] = pci64_dma_hi32(busaddr);

		isp2x00_mbox_command(host, param);

		if (param[0] == MBOX_COMMAND_COMPLETE) {
			DEBUG_FABRIC(printk("qlogicfc%d : found node %02x%02x%02x%02x%02x%02x%02x%02x ", hostdata->host_id, sns_response[20], sns_response[21], sns_response[22], sns_response[23], sns_response[24], sns_response[25], sns_response[26], sns_response[27]));
			DEBUG_FABRIC(printk("  port id: %02x%02x%02x\n", sns_response[17], sns_response[18], sns_response[19]));
			port_id = ((u_int) sns_response[17]) << 16;
			port_id |= ((u_int) sns_response[18]) << 8;
			port_id |= ((u_int) sns_response[19]);
			wwn = ((u64) sns_response[20]) << 56;
			wwn |= ((u64) sns_response[21]) << 48;
			wwn |= ((u64) sns_response[22]) << 40;
			wwn |= ((u64) sns_response[23]) << 32;
			wwn |= ((u64) sns_response[24]) << 24;
			wwn |= ((u64) sns_response[25]) << 16;
			wwn |= ((u64) sns_response[26]) << 8;
			wwn |= ((u64) sns_response[27]);
			if (hostdata->port_id >> 8 != port_id >> 8) {
				DEBUG_FABRIC(printk("qlogicfc%d : adding a fabric port: %x\n", hostdata->host_id, port_id));
				param[0] = MBOX_PORT_LOGIN;
				param[1] = loop_id << 8;
				param[2] = (u_short) (port_id >> 16);
				param[3] = (u_short) (port_id);

				isp2x00_mbox_command(host, param);

				if (param[0] == MBOX_COMMAND_COMPLETE) {
					port_db[scsi_id].wwn = wwn;
					port_db[scsi_id].loop_id = loop_id;
					loop_id++;
					scsi_id++;
				} else {
					printk("qlogicfc%d : Error performing port login %x\n", hostdata->host_id, param[0]);
					DEBUG_FABRIC(printk("qlogicfc%d : loop_id: %x\n", hostdata->host_id, loop_id));
					param[0] = MBOX_PORT_LOGOUT;
					param[1] = loop_id << 8;
					param[2] = 0;
					param[3] = 0;

					isp2x00_mbox_command(host, param);
					
				}

			}
			if (hostdata->port_id == port_id)
				done = 1;
		} else {
			printk("qlogicfc%d : Get All Next failed %x.\n", hostdata->host_id, param[0]);
			pci_free_consistent(hostdata->pci_dev, sizeof(*req) + 608, req, busaddr);
			return 0;
		}
	}

	pci_free_consistent(hostdata->pci_dev, sizeof(*req) + 608, req, busaddr);
	return 1;
}

#endif				/* ISP2x00_FABRIC */


static int isp2x00_release(struct Scsi_Host *host)
{
	struct isp2x00_hostdata *hostdata;
	dma_addr_t busaddr;

	ENTER("isp2x00_release");

	hostdata = (struct isp2x00_hostdata *) host->hostdata;

	outw(0x0, host->io_port + PCI_INTER_CTL);
	free_irq(host->irq, host);

	release_region(host->io_port, 0xff);

	busaddr = pci64_dma_build(le32_to_cpu(hostdata->control_block.res_queue_addr_high),
				  le32_to_cpu(hostdata->control_block.res_queue_addr_lo));
	pci_free_consistent(hostdata->pci_dev, RES_SIZE + REQ_SIZE, hostdata->res, busaddr);

	LEAVE("isp2x00_release");

	return 0;
}


static const char *isp2x00_info(struct Scsi_Host *host)
{
	static char buf[80];
	struct isp2x00_hostdata *hostdata;
	ENTER("isp2x00_info");

	hostdata = (struct isp2x00_hostdata *) host->hostdata;
	sprintf(buf,
		"QLogic ISP%04x SCSI on PCI bus %02x device %02x irq %d base 0x%lx",
		hostdata->pci_dev->device, hostdata->pci_dev->bus->number, hostdata->pci_dev->devfn, host->irq,
		host->io_port);


	LEAVE("isp2x00_info");

	return buf;
}


/*
 * The middle SCSI layer ensures that queuecommand never gets invoked
 * concurrently with itself or the interrupt handler (though the
 * interrupt handler may call this routine as part of
 * request-completion handling).
 */
static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
{
	int i, sg_count, n, num_free;
	u_int in_ptr, out_ptr;
	struct dataseg *ds;
	struct scatterlist *sg;
	struct Command_Entry *cmd;
	struct Continuation_Entry *cont;
	struct Scsi_Host *host;
	struct isp2x00_hostdata *hostdata;

	ENTER("isp2x00_queuecommand");

	host = Cmnd->device->host;
	hostdata = (struct isp2x00_hostdata *) host->hostdata;
	Cmnd->scsi_done = done;

	DEBUG(isp2x00_print_scsi_cmd(Cmnd));

	if (hostdata->adapter_state & AS_REDO_FABRIC_PORTDB || hostdata->adapter_state & AS_REDO_LOOP_PORTDB) {
		isp2x00_make_portdb(host);
		hostdata->adapter_state = AS_LOOP_GOOD;
		printk("qlogicfc%d : Port Database\n", hostdata->host_id);
		for (i = 0; hostdata->port_db[i].wwn != 0; i++) {
			printk("wwn: %08x%08x  scsi_id: %x  loop_id: ", (u_int) (hostdata->port_db[i].wwn >> 32), (u_int) hostdata->port_db[i].wwn, i);
			if (hostdata->port_db[i].loop_id != hostdata->port_db[0].loop_id || i == 0)
			        printk("%x", hostdata->port_db[i].loop_id);
			else
			        printk("Not Available");
			printk("\n");
		}
	}
	if (hostdata->adapter_state == AS_FIRMWARE_DEAD) {
		printk("qlogicfc%d : The firmware is dead, just return.\n", hostdata->host_id);
		host->max_id = 0;
		return 0;
	}

	out_ptr = inw(host->io_port + MBOX4);
	in_ptr = hostdata->req_in_ptr;

	DEBUG(printk("qlogicfc%d : request queue depth %d\n", hostdata->host_id,
		     REQ_QUEUE_DEPTH(in_ptr, out_ptr)));

	cmd = (struct Command_Entry *) &hostdata->req[in_ptr*QUEUE_ENTRY_LEN];
	in_ptr = (in_ptr + 1) & QLOGICFC_REQ_QUEUE_LEN;
	if (in_ptr == out_ptr) {
		DEBUG(printk("qlogicfc%d : request queue overflow\n", hostdata->host_id));
		return 1;
	}
	if (hostdata->send_marker) {
		struct Marker_Entry *marker;

		TRACE("queue marker", in_ptr, 0);

		DEBUG(printk("qlogicfc%d : adding marker entry\n", hostdata->host_id));
		marker = (struct Marker_Entry *) cmd;
		memset(marker, 0, sizeof(struct Marker_Entry));

		marker->hdr.entry_type = ENTRY_MARKER;
		marker->hdr.entry_cnt = 1;
		marker->modifier = SYNC_ALL;

		hostdata->send_marker = 0;

		if (((in_ptr + 1) & QLOGICFC_REQ_QUEUE_LEN) == out_ptr) {
			outw(in_ptr, host->io_port + MBOX4);
			hostdata->req_in_ptr = in_ptr;
			DEBUG(printk("qlogicfc%d : request queue overflow\n", hostdata->host_id));
			return 1;
		}
		cmd = (struct Command_Entry *) &hostdata->req[in_ptr*QUEUE_ENTRY_LEN];
		in_ptr = (in_ptr + 1) & QLOGICFC_REQ_QUEUE_LEN;
	}
	TRACE("queue command", in_ptr, Cmnd);

	memset(cmd, 0, sizeof(struct Command_Entry));

	/* find a free handle mapping slot */
	for (i = in_ptr; i != (in_ptr - 1) && hostdata->handle_ptrs[i]; i = ((i + 1) % (QLOGICFC_REQ_QUEUE_LEN + 1)));

	if (!hostdata->handle_ptrs[i]) {
		cmd->handle = cpu_to_le32(i);
		hostdata->handle_ptrs[i] = Cmnd;
		hostdata->handle_serials[i] = Cmnd->serial_number;
	} else {
		printk("qlogicfc%d : no handle slots, this should not happen.\n", hostdata->host_id);
		printk("hostdata->queued is %x, in_ptr: %x\n", hostdata->queued, in_ptr);
		for (i = 0; i <= QLOGICFC_REQ_QUEUE_LEN; i++){
			if (!hostdata->handle_ptrs[i]){
				printk("slot %d has %p\n", i, hostdata->handle_ptrs[i]);
			}
		}
		return 1;
	}

	cmd->hdr.entry_type = ENTRY_COMMAND;
	cmd->hdr.entry_cnt = 1;
	cmd->target_lun = Cmnd->device->lun;
	cmd->expanded_lun = cpu_to_le16(Cmnd->device->lun);
#if ISP2x00_PORTDB
	cmd->target_id = hostdata->port_db[Cmnd->device->id].loop_id;
#else
	cmd->target_id = Cmnd->target;
#endif
	cmd->total_byte_cnt = cpu_to_le32(Cmnd->request_bufflen);
	cmd->time_out = 0;
	memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);

	if (Cmnd->use_sg) {
		sg = (struct scatterlist *) Cmnd->request_buffer;
		sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
		cmd->segment_cnt = cpu_to_le16(sg_count);
		ds = cmd->dataseg;
		/* fill in first two sg entries: */
		n = sg_count;
		if (n > DATASEGS_PER_COMMAND)
			n = DATASEGS_PER_COMMAND;

		for (i = 0; i < n; i++) {
			ds[i].d_base = cpu_to_le32(pci64_dma_lo32(sg_dma_address(sg)));
			ds[i].d_base_hi = cpu_to_le32(pci64_dma_hi32(sg_dma_address(sg)));
			ds[i].d_count = cpu_to_le32(sg_dma_len(sg));
			++sg;
		}
		sg_count -= DATASEGS_PER_COMMAND;

		while (sg_count > 0) {
			++cmd->hdr.entry_cnt;
			cont = (struct Continuation_Entry *)
			    &hostdata->req[in_ptr*QUEUE_ENTRY_LEN];
			memset(cont, 0, sizeof(struct Continuation_Entry));
			in_ptr = (in_ptr + 1) & QLOGICFC_REQ_QUEUE_LEN;
			if (in_ptr == out_ptr) {
				DEBUG(printk("qlogicfc%d : unexpected request queue overflow\n", hostdata->host_id));
				return 1;
			}
			TRACE("queue continuation", in_ptr, 0);
			cont->hdr.entry_type = ENTRY_CONTINUATION;
			ds = cont->dataseg;
			n = sg_count;
			if (n > DATASEGS_PER_CONT)
				n = DATASEGS_PER_CONT;
			for (i = 0; i < n; ++i) {
				ds[i].d_base = cpu_to_le32(pci64_dma_lo32(sg_dma_address(sg)));
				ds[i].d_base_hi = cpu_to_le32(pci64_dma_hi32(sg_dma_address(sg)));
				ds[i].d_count = cpu_to_le32(sg_dma_len(sg));
				++sg;
			}
			sg_count -= n;
		}
	} else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE) {
		struct page *page = virt_to_page(Cmnd->request_buffer);
		unsigned long offset = offset_in_page(Cmnd->request_buffer);
		dma_addr_t busaddr = pci_map_page(hostdata->pci_dev,
						  page, offset,
						  Cmnd->request_bufflen,
						  Cmnd->sc_data_direction);
		Cmnd->SCp.dma_handle = busaddr;

		cmd->dataseg[0].d_base = cpu_to_le32(pci64_dma_lo32(busaddr));
		cmd->dataseg[0].d_base_hi = cpu_to_le32(pci64_dma_hi32(busaddr));
		cmd->dataseg[0].d_count = cpu_to_le32(Cmnd->request_bufflen);
		cmd->segment_cnt = cpu_to_le16(1);
	} else {
		cmd->dataseg[0].d_base = 0;
		cmd->dataseg[0].d_base_hi = 0;
		cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */
	}

	if (Cmnd->sc_data_direction == DMA_TO_DEVICE)
		cmd->control_flags = cpu_to_le16(CFLAG_WRITE);
	else 
		cmd->control_flags = cpu_to_le16(CFLAG_READ);

	if (Cmnd->device->tagged_supported) {
		if ((jiffies - hostdata->tag_ages[Cmnd->device->id]) > (2 * ISP_TIMEOUT)) {
			cmd->control_flags |= cpu_to_le16(CFLAG_ORDERED_TAG);
			hostdata->tag_ages[Cmnd->device->id] = jiffies;
		} else
			switch (Cmnd->tag) {
			case HEAD_OF_QUEUE_TAG:
				cmd->control_flags |= cpu_to_le16(CFLAG_HEAD_TAG);
				break;
			case ORDERED_QUEUE_TAG:
				cmd->control_flags |= cpu_to_le16(CFLAG_ORDERED_TAG);
				break;
			default:
				cmd->control_flags |= cpu_to_le16(CFLAG_SIMPLE_TAG);
				break;
		}
	}
	/*
	 * TEST_UNIT_READY commands from scsi_scan will fail due to "overlapped
	 * commands attempted" unless we setup at least a simple queue (midlayer 
	 * will embelish this once it can do an INQUIRY command to the device)
	 */
	else
		cmd->control_flags |= cpu_to_le16(CFLAG_SIMPLE_TAG);
	outw(in_ptr, host->io_port + MBOX4);
	hostdata->req_in_ptr = in_ptr;

	hostdata->queued++;

	num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
	num_free = (num_free > 2) ? num_free - 2 : 0;
       host->can_queue = host->host_busy + num_free;
	if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN)
		host->can_queue = QLOGICFC_REQ_QUEUE_LEN;
	host->sg_tablesize = QLOGICFC_MAX_SG(num_free);

	LEAVE("isp2x00_queuecommand");

	return 0;
}


/* we have received an event, such as a lip or an RSCN, which may mean that
 * our port database is incorrect so the port database must be recreated.
 */
static void redo_port_db(unsigned long arg)
{

        struct Scsi_Host * host = (struct Scsi_Host *) arg;
	struct isp2x00_hostdata * hostdata;
	unsigned long flags;
	int i;

	hostdata = (struct isp2x00_hostdata *) host->hostdata;
	hostdata->explore_timer.data = 0;
	del_timer(&hostdata->explore_timer);

	spin_lock_irqsave(host->host_lock, flags);

	if (hostdata->adapter_state & AS_REDO_FABRIC_PORTDB || hostdata->adapter_state & AS_REDO_LOOP_PORTDB) {
		isp2x00_make_portdb(host);
		printk("qlogicfc%d : Port Database\n", hostdata->host_id);
		for (i = 0; hostdata->port_db[i].wwn != 0; i++) {
			printk("wwn: %08x%08x  scsi_id: %x  loop_id: ", (u_int) (hostdata->port_db[i].wwn >> 32), (u_int) hostdata->port_db[i].wwn, i);
			if (hostdata->port_db[i].loop_id != hostdata->port_db[0].loop_id || i == 0)
			        printk("%x", hostdata->port_db[i].loop_id);
			else
			        printk("Not Available");
			printk("\n");
		}
		
	        for (i = 0; i < QLOGICFC_REQ_QUEUE_LEN; i++){ 
		        if (hostdata->handle_ptrs[i] && (hostdata->port_db[hostdata->handle_ptrs[i]->device->id].loop_id > QLOGICFC_MAX_LOOP_ID || hostdata->adapter_state & AS_REDO_LOOP_PORTDB)){
                                if (hostdata->port_db[hostdata->handle_ptrs[i]->device->id].loop_id != hostdata->port_db[0].loop_id){
					Scsi_Cmnd *Cmnd = hostdata->handle_ptrs[i];

					 if (Cmnd->use_sg)
						 pci_unmap_sg(hostdata->pci_dev,
							      (struct scatterlist *)Cmnd->buffer,
							      Cmnd->use_sg,
							      Cmnd->sc_data_direction);
					 else if (Cmnd->request_bufflen &&
						  Cmnd->sc_data_direction != PCI_DMA_NONE) {
						 pci_unmap_page(hostdata->pci_dev,
								Cmnd->SCp.dma_handle,
								Cmnd->request_bufflen,
								Cmnd->sc_data_direction);
					 }

					 hostdata->handle_ptrs[i]->result = DID_SOFT_ERROR << 16;

					 if (hostdata->handle_ptrs[i]->scsi_done){
					   (*hostdata->handle_ptrs[i]->scsi_done) (hostdata->handle_ptrs[i]);
					 }
					 else printk("qlogicfc%d : done is null?\n", hostdata->host_id);
					 hostdata->handle_ptrs[i] = NULL;
					 hostdata->handle_serials[i] = 0;
				}
			}
		}
		
		hostdata->adapter_state = AS_LOOP_GOOD;
	}

	spin_unlock_irqrestore(host->host_lock, flags);

}

#define ASYNC_EVENT_INTERRUPT	0x01

irqreturn_t do_isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	struct Scsi_Host *host = dev_id;
	unsigned long flags;

	spin_lock_irqsave(host->host_lock, flags);
	isp2x00_intr_handler(irq, dev_id, regs);
	spin_unlock_irqrestore(host->host_lock, flags);

	return IRQ_HANDLED;
}

void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	Scsi_Cmnd *Cmnd;
	struct Status_Entry *sts;
	struct Scsi_Host *host = dev_id;
	struct isp2x00_hostdata *hostdata;
	u_int in_ptr, out_ptr, handle, num_free;
	u_short status;

	ENTER_INTR("isp2x00_intr_handler");

	hostdata = (struct isp2x00_hostdata *) host->hostdata;

	DEBUG_INTR(printk("qlogicfc%d : interrupt on line %d\n", hostdata->host_id, irq));

	if (!(inw(host->io_port + PCI_INTER_STS) & 0x08)) {
		/* spurious interrupts can happen legally */
		DEBUG_INTR(printk("qlogicfc%d : got spurious interrupt\n", hostdata->host_id));
		return;
	}
	in_ptr = inw(host->io_port + MBOX5);
	out_ptr = hostdata->res_out_ptr;

	if ((inw(host->io_port + PCI_SEMAPHORE) & ASYNC_EVENT_INTERRUPT)) {
		status = inw(host->io_port + MBOX0);

		DEBUG_INTR(printk("qlogicfc%d : mbox completion status: %x\n",
				  hostdata->host_id, status));

		switch (status) {
		case LOOP_UP:
		case POINT_TO_POINT_UP:
		        printk("qlogicfc%d : Link is Up\n", hostdata->host_id);
			hostdata->adapter_state = AS_REDO_FABRIC_PORTDB | AS_REDO_LOOP_PORTDB;
			break;
		case LOOP_DOWN:
		        printk("qlogicfc%d : Link is Down\n", hostdata->host_id);
			hostdata->adapter_state = AS_LOOP_DOWN;
			break;
		case CONNECTION_MODE:
		        printk("received CONNECTION_MODE irq %x\n", inw(host->io_port + MBOX1));
			break;
		case CHANGE_NOTIFICATION:
		        printk("qlogicfc%d : RSCN Received\n", hostdata->host_id);
			if (hostdata->adapter_state == AS_LOOP_GOOD)
				hostdata->adapter_state = AS_REDO_FABRIC_PORTDB;
			break;		        
		case LIP_OCCURRED:
		case LIP_RECEIVED:
		        printk("qlogicfc%d : Loop Reinitialized\n", hostdata->host_id);
			if (hostdata->adapter_state == AS_LOOP_GOOD)
				hostdata->adapter_state = AS_REDO_LOOP_PORTDB;
			break;
		case SYSTEM_ERROR:
			printk("qlogicfc%d : The firmware just choked.\n", hostdata->host_id);
			hostdata->adapter_state = AS_FIRMWARE_DEAD;
			break;
		case SCSI_COMMAND_COMPLETE:
			handle = inw(host->io_port + MBOX1) | (inw(host->io_port + MBOX2) << 16);
			Cmnd = hostdata->handle_ptrs[handle];
			hostdata->handle_ptrs[handle] = NULL;
			hostdata->handle_serials[handle] = 0;
			hostdata->queued--;
			if (Cmnd != NULL) {
				if (Cmnd->use_sg)
					pci_unmap_sg(hostdata->pci_dev,
						     (struct scatterlist *)Cmnd->buffer,
						     Cmnd->use_sg,
						     Cmnd->sc_data_direction);
				else if (Cmnd->request_bufflen &&
					 Cmnd->sc_data_direction != PCI_DMA_NONE)
					pci_unmap_page(hostdata->pci_dev,
						       Cmnd->SCp.dma_handle,
						       Cmnd->request_bufflen,
						       Cmnd->sc_data_direction);
				Cmnd->result = 0x0;
				(*Cmnd->scsi_done) (Cmnd);
			} else
				printk("qlogicfc%d.c : got a null value out of handle_ptrs, this sucks\n", hostdata->host_id);
			break;
		case MBOX_COMMAND_COMPLETE:
		case INVALID_COMMAND:
		case HOST_INTERFACE_ERROR:
		case TEST_FAILED:
		case COMMAND_ERROR:
		case COMMAND_PARAM_ERROR:
		case PORT_ID_USED:
		case LOOP_ID_USED:
		case ALL_IDS_USED:
			hostdata->mbox_done = 1;
			outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
			return;
		default:
			printk("qlogicfc%d : got an unknown status? %x\n", hostdata->host_id, status);
		}
		if ((hostdata->adapter_state & AS_REDO_LOOP_PORTDB || hostdata->adapter_state & AS_REDO_FABRIC_PORTDB) && hostdata->explore_timer.data == 0){
                        hostdata->explore_timer.function = redo_port_db;
			hostdata->explore_timer.data = (unsigned long)host;
			hostdata->explore_timer.expires = jiffies + (HZ/4);
			init_timer(&hostdata->explore_timer);
			add_timer(&hostdata->explore_timer);
		}
		outw(0x0, host->io_port + PCI_SEMAPHORE);
	} else {
		DEBUG_INTR(printk("qlogicfc%d : response queue update\n", hostdata->host_id));
		DEBUG_INTR(printk("qlogicfc%d : response queue depth %d\n", hostdata->host_id, RES_QUEUE_DEPTH(in_ptr, out_ptr)));

		while (out_ptr != in_ptr) {
			unsigned le_hand;
			sts = (struct Status_Entry *) &hostdata->res[out_ptr*QUEUE_ENTRY_LEN];
			out_ptr = (out_ptr + 1) & RES_QUEUE_LEN;
                 
			TRACE("done", out_ptr, Cmnd);
			DEBUG_INTR(isp2x00_print_status_entry(sts));
			le_hand = le32_to_cpu(sts->handle);
			if (sts->hdr.entry_type == ENTRY_STATUS && (Cmnd = hostdata->handle_ptrs[le_hand])) {
				Cmnd->result = isp2x00_return_status(Cmnd, sts);
				hostdata->queued--;

				if (Cmnd->use_sg)
					pci_unmap_sg(hostdata->pci_dev,
						     (struct scatterlist *)Cmnd->buffer, Cmnd->use_sg,
						     Cmnd->sc_data_direction);
				else if (Cmnd->request_bufflen && Cmnd->sc_data_direction != PCI_DMA_NONE)
					pci_unmap_page(hostdata->pci_dev,
						       Cmnd->SCp.dma_handle,
						       Cmnd->request_bufflen,
						       Cmnd->sc_data_direction);

				/* 
				 * if any of the following are true we do not
				 * call scsi_done.  if the status is CS_ABORTED
				 * we don't have to call done because the upper
				 * level should already know its aborted.
				 */
				if (hostdata->handle_serials[le_hand] != Cmnd->serial_number 
				    || le16_to_cpu(sts->completion_status) == CS_ABORTED){
					hostdata->handle_serials[le_hand] = 0;
					hostdata->handle_ptrs[le_hand] = NULL;
					outw(out_ptr, host->io_port + MBOX5);
					continue;
				}
				/*
				 * if we get back an error indicating the port
				 * is not there or if the link is down and 
				 * this is a device that used to be there 
				 * allow the command to timeout.
				 * the device may well be back in a couple of
				 * seconds.
				 */
				if ((hostdata->adapter_state == AS_LOOP_DOWN || sts->completion_status == cpu_to_le16(CS_PORT_UNAVAILABLE) || sts->completion_status == cpu_to_le16(CS_PORT_LOGGED_OUT) || sts->completion_status == cpu_to_le16(CS_PORT_CONFIG_CHANGED)) && hostdata->port_db[Cmnd->device->id].wwn){
					outw(out_ptr, host->io_port + MBOX5);
					continue;
				}
			} else {
				outw(out_ptr, host->io_port + MBOX5);
				continue;
			}

			hostdata->handle_ptrs[le_hand] = NULL;

			if (sts->completion_status == cpu_to_le16(CS_RESET_OCCURRED)
			    || (sts->status_flags & cpu_to_le16(STF_BUS_RESET)))
				hostdata->send_marker = 1;

			if (le16_to_cpu(sts->scsi_status) & 0x0200)
				memcpy(Cmnd->sense_buffer, sts->req_sense_data,
				       sizeof(Cmnd->sense_buffer));

			outw(out_ptr, host->io_port + MBOX5);

			if (Cmnd->scsi_done != NULL) {
				(*Cmnd->scsi_done) (Cmnd);
			} else
				printk("qlogicfc%d : Ouch, scsi done is NULL\n", hostdata->host_id);
		}
		hostdata->res_out_ptr = out_ptr;
	}


	out_ptr = inw(host->io_port + MBOX4);
	in_ptr = hostdata->req_in_ptr;

	num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
	num_free = (num_free > 2) ? num_free - 2 : 0;
       host->can_queue = host->host_busy + num_free;
	if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN)
		host->can_queue = QLOGICFC_REQ_QUEUE_LEN;
	host->sg_tablesize = QLOGICFC_MAX_SG(num_free);

	outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
	LEAVE_INTR("isp2x00_intr_handler");
}


static int isp2x00_return_status(Scsi_Cmnd *Cmnd, struct Status_Entry *sts)
{
	int host_status = DID_ERROR;
#if DEBUG_ISP2x00_INTR
	static char *reason[] =
	{
		"DID_OK",
		"DID_NO_CONNECT",
		"DID_BUS_BUSY",
		"DID_TIME_OUT",
		"DID_BAD_TARGET",
		"DID_ABORT",
		"DID_PARITY",
		"DID_ERROR",
		"DID_RESET",
		"DID_BAD_INTR"
	};
#endif				/* DEBUG_ISP2x00_INTR */

	ENTER("isp2x00_return_status");

	DEBUG(printk("qlogicfc : completion status = 0x%04x\n",
		     le16_to_cpu(sts->completion_status)));

	switch (le16_to_cpu(sts->completion_status)) {
	case CS_COMPLETE:
		host_status = DID_OK;
		break;
	case CS_DMA_ERROR:
		host_status = DID_ERROR;
		break;
	case CS_RESET_OCCURRED:
		host_status = DID_RESET;
		break;
	case CS_ABORTED:
		host_status = DID_ABORT;
		break;
	case CS_TIMEOUT:
		host_status = DID_TIME_OUT;
		break;
	case CS_DATA_OVERRUN:
		host_status = DID_ERROR;
		break;
	case CS_DATA_UNDERRUN:
	        if (Cmnd->underflow <= (Cmnd->request_bufflen - le32_to_cpu(sts->residual)))
		        host_status = DID_OK;
		else
		        host_status = DID_ERROR;
		break;
	case CS_PORT_UNAVAILABLE:
	case CS_PORT_LOGGED_OUT:
	case CS_PORT_CONFIG_CHANGED:
		host_status = DID_BAD_TARGET;
		break;
	case CS_QUEUE_FULL:
		host_status = DID_ERROR;
		break;
	default:
		printk("qlogicfc : unknown completion status 0x%04x\n",
		       le16_to_cpu(sts->completion_status));
		host_status = DID_ERROR;
		break;
	}

	DEBUG_INTR(printk("qlogicfc : host status (%s) scsi status %x\n",
			  reason[host_status], le16_to_cpu(sts->scsi_status)));

	LEAVE("isp2x00_return_status");

	return (le16_to_cpu(sts->scsi_status) & STATUS_MASK) | (host_status << 16);
}


static int isp2x00_abort(Scsi_Cmnd * Cmnd)
{
	u_short param[8];
	int i;
	struct Scsi_Host *host;
	struct isp2x00_hostdata *hostdata;
	int return_status = SUCCESS;

	ENTER("isp2x00_abort");

	host = Cmnd->device->host;
	hostdata = (struct isp2x00_hostdata *) host->hostdata;

	for (i = 0; i < QLOGICFC_REQ_QUEUE_LEN; i++)
		if (hostdata->handle_ptrs[i] == Cmnd)
			break;

	if (i == QLOGICFC_REQ_QUEUE_LEN){
		return SUCCESS;
	}

	isp2x00_disable_irqs(host);

	param[0] = MBOX_ABORT_IOCB;
#if ISP2x00_PORTDB
	param[1] = (((u_short) hostdata->port_db[Cmnd->device->id].loop_id) << 8) | Cmnd->device->lun;
#else
	param[1] = (((u_short) Cmnd->target) << 8) | Cmnd->lun;
#endif
	param[2] = i & 0xffff;
	param[3] = i >> 16;

	isp2x00_mbox_command(host, param);

	if (param[0] != MBOX_COMMAND_COMPLETE) {
		printk("qlogicfc%d : scsi abort failure: %x\n", hostdata->host_id, param[0]);
		if (param[0] == 0x4005)
			Cmnd->result = DID_ERROR << 16;
		if (param[0] == 0x4006)
			Cmnd->result = DID_BAD_TARGET << 16;
		return_status = FAILED;
	}

	if (return_status != SUCCESS){
		param[0] = MBOX_GET_FIRMWARE_STATE;
		isp2x00_mbox_command(host, param);
		printk("qlogicfc%d : abort failed\n", hostdata->host_id);
		printk("qlogicfc%d : firmware status is %x %x\n", hostdata->host_id, param[0], param[1]);
	}

	isp2x00_enable_irqs(host);

	LEAVE("isp2x00_abort");

	return return_status;
}


static int isp2x00_biosparam(struct scsi_device *sdev, struct block_device *n,
		sector_t capacity, int ip[])
{
	int size = capacity;

	ENTER("isp2x00_biosparam");

	ip[0] = 64;
	ip[1] = 32;
	ip[2] = size >> 11;
	if (ip[2] > 1024) {
		ip[0] = 255;
		ip[1] = 63;
		ip[2] = size / (ip[0] * ip[1]);
	}
	LEAVE("isp2x00_biosparam");

	return 0;
}

static int isp2x00_reset_hardware(struct Scsi_Host *host)
{
	u_short param[8];
	struct isp2x00_hostdata *hostdata;
	int loop_count;
	dma_addr_t busaddr;

	ENTER("isp2x00_reset_hardware");

	hostdata = (struct isp2x00_hostdata *) host->hostdata;

	/*
	 *	This cannot be right - PCI writes are posted
	 *	(apparently this is hardware design flaw not software ?)
	 */
	 
	outw(0x01, host->io_port + ISP_CTRL_STATUS);
	udelay(100);
	outw(HCCR_RESET, host->io_port + HOST_HCCR);
	udelay(100);
	outw(HCCR_RELEASE, host->io_port + HOST_HCCR);
	outw(HCCR_BIOS_DISABLE, host->io_port + HOST_HCCR);

	loop_count = DEFAULT_LOOP_COUNT;
	while (--loop_count && inw(host->io_port + HOST_HCCR) == RISC_BUSY) {
		barrier();
		cpu_relax();
	}
	if (!loop_count)
		printk("qlogicfc%d : reset_hardware loop timeout\n", hostdata->host_id);



#if DEBUG_ISP2x00
	printk("qlogicfc%d : mbox 0 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX0));
	printk("qlogicfc%d : mbox 1 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX1));
	printk("qlogicfc%d : mbox 2 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX2));
	printk("qlogicfc%d : mbox 3 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX3));
	printk("qlogicfc%d : mbox 4 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX4));
	printk("qlogicfc%d : mbox 5 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX5));
	printk("qlogicfc%d : mbox 6 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX6));
	printk("qlogicfc%d : mbox 7 0x%04x \n", hostdata->host_id,  inw(host->io_port + MBOX7));
#endif				/* DEBUG_ISP2x00 */

	DEBUG(printk("qlogicfc%d : verifying checksum\n", hostdata->host_id));

#if defined(CONFIG_SCSI_QLOGIC_FC_FIRMWARE)
	{
		int i;
		unsigned short * risc_code = NULL;
		unsigned short risc_code_len = 0;
		if (hostdata->pci_dev->device == PCI_DEVICE_ID_QLOGIC_ISP2100){
		        risc_code = risc_code2100;
			risc_code_len = risc_code_length2100;
		}
		else if (hostdata->pci_dev->device == PCI_DEVICE_ID_QLOGIC_ISP2200){
		        risc_code = risc_code2200;
			risc_code_len = risc_code_length2200;
		}

		for (i = 0; i < risc_code_len; i++) {
			param[0] = MBOX_WRITE_RAM_WORD;
			param[1] = risc_code_addr01 + i;
			param[2] = risc_code[i];