aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2016-11-18 00:16:12 -0500
committerShaohua Li <shli@fb.com>2016-11-22 12:13:18 -0500
commit2e52d449bcec31cb66d80aa8c798b15f76f1f5e0 (patch)
treebebd10d39bc47f8603e78c8dfb99d41d1476f673 /drivers/md/raid1.c
parent46533ff7fefb7e9e3539494f5873b00091caa8eb (diff)
md/raid1: add failfast handling for reads.
If a device is marked FailFast and it is not the only device we can read from, we mark the bio with REQ_FAILFAST_* flags. If this does fail, we don't try read repair but just allow failure. If it was the last device it doesn't fail of course, so the retry happens on the same device - this time without FAILFAST. A subsequent failure will not retry but will just pass up the error. During resync we may use FAILFAST requests and on a failure we will simply use the other device(s). During recovery we will only use FAILFAST in the unusual case were there are multiple places to read from - i.e. if there are > 2 devices. If we get a failure we will fail the device and complete the resync/recovery with remaining devices. The new R1BIO_FailFast flag is set on read reqest to suggest the a FAILFAST request might be acceptable. The rdev needs to have FailFast set as well for the read to actually use REQ_FAILFAST_*. We need to know there are at least two working devices before we can set R1BIO_FailFast, so we mustn't stop looking at the first device we find. So the "min_pending == 0" handling to not exit early, but too always choose the best_pending_disk if min_pending == 0. The spinlocked region in raid1_error() in enlarged to ensure that if two bios, reading from two different devices, fail at the same time, then there is no risk that both devices will be marked faulty, leaving zero "In_sync" devices. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4006a9be2eab..1f22df0e5f3d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -329,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
329 329
330 if (uptodate) 330 if (uptodate)
331 set_bit(R1BIO_Uptodate, &r1_bio->state); 331 set_bit(R1BIO_Uptodate, &r1_bio->state);
332 else if (test_bit(FailFast, &rdev->flags) &&
333 test_bit(R1BIO_FailFast, &r1_bio->state))
334 /* This was a fail-fast read so we definitely
335 * want to retry */
336 ;
332 else { 337 else {
333 /* If all other devices have failed, we want to return 338 /* If all other devices have failed, we want to return
334 * the error upwards rather than fail the last device. 339 * the error upwards rather than fail the last device.
@@ -535,6 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
535 best_good_sectors = 0; 540 best_good_sectors = 0;
536 has_nonrot_disk = 0; 541 has_nonrot_disk = 0;
537 choose_next_idle = 0; 542 choose_next_idle = 0;
543 clear_bit(R1BIO_FailFast, &r1_bio->state);
538 544
539 if ((conf->mddev->recovery_cp < this_sector + sectors) || 545 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
540 (mddev_is_clustered(conf->mddev) && 546 (mddev_is_clustered(conf->mddev) &&
@@ -608,6 +614,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
608 } else 614 } else
609 best_good_sectors = sectors; 615 best_good_sectors = sectors;
610 616
617 if (best_disk >= 0)
618 /* At least two disks to choose from so failfast is OK */
619 set_bit(R1BIO_FailFast, &r1_bio->state);
620
611 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 621 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
612 has_nonrot_disk |= nonrot; 622 has_nonrot_disk |= nonrot;
613 pending = atomic_read(&rdev->nr_pending); 623 pending = atomic_read(&rdev->nr_pending);
@@ -646,11 +656,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
646 } 656 }
647 break; 657 break;
648 } 658 }
649 /* If device is idle, use it */
650 if (pending == 0) {
651 best_disk = disk;
652 break;
653 }
654 659
655 if (choose_next_idle) 660 if (choose_next_idle)
656 continue; 661 continue;
@@ -673,7 +678,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
673 * mixed ratation/non-rotational disks depending on workload. 678 * mixed ratation/non-rotational disks depending on workload.
674 */ 679 */
675 if (best_disk == -1) { 680 if (best_disk == -1) {
676 if (has_nonrot_disk) 681 if (has_nonrot_disk || min_pending == 0)
677 best_disk = best_pending_disk; 682 best_disk = best_pending_disk;
678 else 683 else
679 best_disk = best_dist_disk; 684 best_disk = best_dist_disk;
@@ -1167,6 +1172,9 @@ read_again:
1167 read_bio->bi_bdev = mirror->rdev->bdev; 1172 read_bio->bi_bdev = mirror->rdev->bdev;
1168 read_bio->bi_end_io = raid1_end_read_request; 1173 read_bio->bi_end_io = raid1_end_read_request;
1169 bio_set_op_attrs(read_bio, op, do_sync); 1174 bio_set_op_attrs(read_bio, op, do_sync);
1175 if (test_bit(FailFast, &mirror->rdev->flags) &&
1176 test_bit(R1BIO_FailFast, &r1_bio->state))
1177 read_bio->bi_opf |= MD_FAILFAST;
1170 read_bio->bi_private = r1_bio; 1178 read_bio->bi_private = r1_bio;
1171 1179
1172 if (mddev->gendisk) 1180 if (mddev->gendisk)
@@ -1464,6 +1472,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1464 * next level up know. 1472 * next level up know.
1465 * else mark the drive as failed 1473 * else mark the drive as failed
1466 */ 1474 */
1475 spin_lock_irqsave(&conf->device_lock, flags);
1467 if (test_bit(In_sync, &rdev->flags) 1476 if (test_bit(In_sync, &rdev->flags)
1468 && (conf->raid_disks - mddev->degraded) == 1) { 1477 && (conf->raid_disks - mddev->degraded) == 1) {
1469 /* 1478 /*
@@ -1473,10 +1482,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1473 * it is very likely to fail. 1482 * it is very likely to fail.
1474 */ 1483 */
1475 conf->recovery_disabled = mddev->recovery_disabled; 1484 conf->recovery_disabled = mddev->recovery_disabled;
1485 spin_unlock_irqrestore(&conf->device_lock, flags);
1476 return; 1486 return;
1477 } 1487 }
1478 set_bit(Blocked, &rdev->flags); 1488 set_bit(Blocked, &rdev->flags);
1479 spin_lock_irqsave(&conf->device_lock, flags);
1480 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1489 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1481 mddev->degraded++; 1490 mddev->degraded++;
1482 set_bit(Faulty, &rdev->flags); 1491 set_bit(Faulty, &rdev->flags);
@@ -1815,12 +1824,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
1815 sector_t sect = r1_bio->sector; 1824 sector_t sect = r1_bio->sector;
1816 int sectors = r1_bio->sectors; 1825 int sectors = r1_bio->sectors;
1817 int idx = 0; 1826 int idx = 0;
1827 struct md_rdev *rdev;
1828
1829 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1830 if (test_bit(FailFast, &rdev->flags)) {
1831 /* Don't try recovering from here - just fail it
1832 * ... unless it is the last working device of course */
1833 md_error(mddev, rdev);
1834 if (test_bit(Faulty, &rdev->flags))
1835 /* Don't try to read from here, but make sure
1836 * put_buf does it's thing
1837 */
1838 bio->bi_end_io = end_sync_write;
1839 }
1818 1840
1819 while(sectors) { 1841 while(sectors) {
1820 int s = sectors; 1842 int s = sectors;
1821 int d = r1_bio->read_disk; 1843 int d = r1_bio->read_disk;
1822 int success = 0; 1844 int success = 0;
1823 struct md_rdev *rdev;
1824 int start; 1845 int start;
1825 1846
1826 if (s > (PAGE_SIZE>>9)) 1847 if (s > (PAGE_SIZE>>9))
@@ -2331,7 +2352,9 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2331 bio_put(bio); 2352 bio_put(bio);
2332 r1_bio->bios[r1_bio->read_disk] = NULL; 2353 r1_bio->bios[r1_bio->read_disk] = NULL;
2333 2354
2334 if (mddev->ro == 0) { 2355 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2356 if (mddev->ro == 0
2357 && !test_bit(FailFast, &rdev->flags)) {
2335 freeze_array(conf, 1); 2358 freeze_array(conf, 1);
2336 fix_read_error(conf, r1_bio->read_disk, 2359 fix_read_error(conf, r1_bio->read_disk,
2337 r1_bio->sector, r1_bio->sectors); 2360 r1_bio->sector, r1_bio->sectors);
@@ -2340,7 +2363,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2340 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 2363 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2341 } 2364 }
2342 2365
2343 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); 2366 rdev_dec_pending(rdev, conf->mddev);
2344 2367
2345read_more: 2368read_more:
2346 disk = read_balance(conf, r1_bio, &max_sectors); 2369 disk = read_balance(conf, r1_bio, &max_sectors);
@@ -2365,6 +2388,9 @@ read_more:
2365 bio->bi_bdev = rdev->bdev; 2388 bio->bi_bdev = rdev->bdev;
2366 bio->bi_end_io = raid1_end_read_request; 2389 bio->bi_end_io = raid1_end_read_request;
2367 bio_set_op_attrs(bio, REQ_OP_READ, do_sync); 2390 bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
2391 if (test_bit(FailFast, &rdev->flags) &&
2392 test_bit(R1BIO_FailFast, &r1_bio->state))
2393 bio->bi_opf |= MD_FAILFAST;
2368 bio->bi_private = r1_bio; 2394 bio->bi_private = r1_bio;
2369 if (max_sectors < r1_bio->sectors) { 2395 if (max_sectors < r1_bio->sectors) {
2370 /* Drat - have to split this up more */ 2396 /* Drat - have to split this up more */
@@ -2653,6 +2679,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2653 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2679 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2654 bio->bi_bdev = rdev->bdev; 2680 bio->bi_bdev = rdev->bdev;
2655 bio->bi_private = r1_bio; 2681 bio->bi_private = r1_bio;
2682 if (test_bit(FailFast, &rdev->flags))
2683 bio->bi_opf |= MD_FAILFAST;
2656 } 2684 }
2657 } 2685 }
2658 rcu_read_unlock(); 2686 rcu_read_unlock();
@@ -2783,6 +2811,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2783 if (bio->bi_end_io == end_sync_read) { 2811 if (bio->bi_end_io == end_sync_read) {
2784 read_targets--; 2812 read_targets--;
2785 md_sync_acct(bio->bi_bdev, nr_sectors); 2813 md_sync_acct(bio->bi_bdev, nr_sectors);
2814 if (read_targets == 1)
2815 bio->bi_opf &= ~MD_FAILFAST;
2786 generic_make_request(bio); 2816 generic_make_request(bio);
2787 } 2817 }
2788 } 2818 }
@@ -2790,6 +2820,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2790 atomic_set(&r1_bio->remaining, 1); 2820 atomic_set(&r1_bio->remaining, 1);
2791 bio = r1_bio->bios[r1_bio->read_disk]; 2821 bio = r1_bio->bios[r1_bio->read_disk];
2792 md_sync_acct(bio->bi_bdev, nr_sectors); 2822 md_sync_acct(bio->bi_bdev, nr_sectors);
2823 if (read_targets == 1)
2824 bio->bi_opf &= ~MD_FAILFAST;
2793 generic_make_request(bio); 2825 generic_make_request(bio);
2794 2826
2795 } 2827 }
/a> 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894


































                                                                                                                   







                                          





                                                            
                                               










                                       

                                                         

                                                                 








                                                                                         



























































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































                                                                                                                                                                                  
/*
 *  Copyright (C) 1997 Cullen Jennings
 *  Copyright (C) 1998 Elmer Joandiu, elmer@ylenurme.ee
 *  GNU General Public License applies
 * This module provides support for the Arlan 655 card made by Aironet
 */

#include <linux/config.h>
#include "arlan.h"

#if BITS_PER_LONG != 32
#  error FIXME: this driver requires a 32-bit platform
#endif

static const char *arlan_version = "C.Jennigs 97 & Elmer.Joandi@ut.ee  Oct'98, http://www.ylenurme.ee/~elmer/655/";

struct net_device *arlan_device[MAX_ARLANS];

static int SID = SIDUNKNOWN;
static int radioNodeId = radioNodeIdUNKNOWN;
static char encryptionKey[12] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'};
int arlan_debug = debugUNKNOWN;
static int spreadingCode = spreadingCodeUNKNOWN;
static int channelNumber = channelNumberUNKNOWN;
static int channelSet = channelSetUNKNOWN;
static int systemId = systemIdUNKNOWN;
static int registrationMode = registrationModeUNKNOWN;
static int keyStart;
static int tx_delay_ms;
static int retries = 5;
static int tx_queue_len = 1;
static int arlan_EEPROM_bad;

#ifdef ARLAN_DEBUGGING

static int testMemory = testMemoryUNKNOWN;
static int irq = irqUNKNOWN;
static int txScrambled = 1;
static int mdebug;

module_param(irq, int, 0);
module_param(mdebug, int, 0);
module_param(testMemory, int, 0);
module_param(txScrambled, int, 0);
MODULE_PARM_DESC(irq, "(unused)");
MODULE_PARM_DESC(testMemory, "(unused)");
MODULE_PARM_DESC(mdebug, "Arlan multicast debugging (0-1)");
#endif

module_param_named(debug, arlan_debug, int, 0);
module_param(spreadingCode, int, 0);
module_param(channelNumber, int, 0);
module_param(channelSet, int, 0);
module_param(systemId, int, 0);
module_param(registrationMode, int, 0);
module_param(radioNodeId, int, 0);
module_param(SID, int, 0);
module_param(keyStart, int, 0);
module_param(tx_delay_ms, int, 0);
module_param(retries, int, 0);
module_param(tx_queue_len, int, 0);
module_param_named(EEPROM_bad, arlan_EEPROM_bad, int, 0);
MODULE_PARM_DESC(debug, "Arlan debug enable (0-1)");
MODULE_PARM_DESC(retries, "Arlan maximum packet retransmisions");
#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
static int arlan_entry_debug;
static int arlan_exit_debug;
static int arlan_entry_and_exit_debug;
module_param_named(entry_debug, arlan_entry_debug, int, 0);
module_param_named(exit_debug, arlan_exit_debug, int, 0);
module_param_named(entry_and_exit_debug, arlan_entry_and_exit_debug, int, 0);
MODULE_PARM_DESC(entry_debug, "Arlan driver function entry debugging");
MODULE_PARM_DESC(exit_debug, "Arlan driver function exit debugging");
MODULE_PARM_DESC(entry_and_exit_debug, "Arlan driver function entry and exit debugging");
#endif

struct arlan_conf_stru arlan_conf[MAX_ARLANS];
static int arlans_found;

static  int 	arlan_open(struct net_device *dev);
static  int 	arlan_tx(struct sk_buff *skb, struct net_device *dev);
static  irqreturn_t arlan_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static  int 	arlan_close(struct net_device *dev);
static  struct net_device_stats *
		arlan_statistics		(struct net_device *dev);
static  void 	arlan_set_multicast		(struct net_device *dev);
static  int 	arlan_hw_tx			(struct net_device* dev, char *buf, int length );
static  int	arlan_hw_config			(struct net_device * dev);
static  void 	arlan_tx_done_interrupt		(struct net_device * dev, int status);
static  void	arlan_rx_interrupt		(struct net_device * dev, u_char rxStatus, u_short, u_short);
static  void	arlan_process_interrupt		(struct net_device * dev);
static	void	arlan_tx_timeout		(struct net_device *dev);

static inline long us2ticks(int us)
{
	return us * (1000000 / HZ);
}


#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
#define ARLAN_DEBUG_ENTRY(name) \
	{\
	struct timeval timev;\
	do_gettimeofday(&timev);\
		if (arlan_entry_debug || arlan_entry_and_exit_debug)\
			printk("--->>>" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec));\
	}
#define ARLAN_DEBUG_EXIT(name) \
	{\
	struct timeval timev;\
	do_gettimeofday(&timev);\
		if (arlan_exit_debug || arlan_entry_and_exit_debug)\
			printk("<<<---" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec) );\
	}
#else
#define ARLAN_DEBUG_ENTRY(name)
#define ARLAN_DEBUG_EXIT(name)
#endif


#define arlan_interrupt_ack(dev)\
        clearClearInterrupt(dev);\
        setClearInterrupt(dev);

static inline int arlan_drop_tx(struct net_device *dev)
{
	struct arlan_private *priv = netdev_priv(dev);

	priv->stats.tx_errors++;
	if (priv->Conf->tx_delay_ms)
	{
		priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
	}
	else
	{
		priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
		TXHEAD(dev).offset = 0;
		TXTAIL(dev).offset = 0;
		priv->txLast = 0;
		priv->bad = 0;
		if (!priv->under_reset && !priv->under_config)
			netif_wake_queue (dev);
	}
	return 1;
}


int arlan_command(struct net_device *dev, int command_p)
{
	struct arlan_private *priv = netdev_priv(dev);
	volatile struct arlan_shmem __iomem *arlan = priv->card;
	struct arlan_conf_stru *conf = priv->Conf;
	int udelayed = 0;
	int i = 0;
	unsigned long flags;

	ARLAN_DEBUG_ENTRY("arlan_command");

	if (priv->card_polling_interval)
		priv->card_polling_interval = 1;

	if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
		printk(KERN_DEBUG "arlan_command, %lx commandByte %x waiting %lx incoming %x \n",
		jiffies, READSHMB(arlan->commandByte),
		       priv->waiting_command_mask, command_p);

	priv->waiting_command_mask |= command_p;

	if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
		if (time_after(jiffies, priv->lastReset + 5 * HZ))
			priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;

	if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ACK)
	{
		arlan_interrupt_ack(dev);
		priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ACK;
	}
	if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ENABLE)
	{
		setInterruptEnable(dev);
		priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ENABLE;
	}

	/* Card access serializing lock */
	spin_lock_irqsave(&priv->lock, flags);

	/* Check cards status and waiting */

	if (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
	{
		while (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
		{
			if (READSHMB(arlan->resetFlag) ||
				READSHMB(arlan->commandByte))	/* || 
								   (readControlRegister(dev) & ARLAN_ACCESS))
								 */
				udelay(40);
			else
				priv->waiting_command_mask &= ~(ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW);

			udelayed++;

			if (priv->waiting_command_mask & ARLAN_COMMAND_LONG_WAIT_NOW)
			{
				if (udelayed * 40 > 1000000)
				{
					printk(KERN_ERR "%s long wait too long \n", dev->name);
					priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
					break;
				}
			}
			else if (priv->waiting_command_mask & ARLAN_COMMAND_WAIT_NOW)
			{
				if (udelayed * 40 > 1000)
				{
					printk(KERN_ERR "%s short wait too long \n", dev->name);
					goto bad_end;
				}
			}
		}
	}
	else
	{
		i = 0;
		while ((READSHMB(arlan->resetFlag) ||
			READSHMB(arlan->commandByte)) &&
			conf->pre_Command_Wait > (i++) * 10)
			udelay(10);


		if ((READSHMB(arlan->resetFlag) ||
			READSHMB(arlan->commandByte)) &&
			!(priv->waiting_command_mask & ARLAN_COMMAND_RESET))
		{
			goto card_busy_end;
		}
	}
	if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
		priv->under_reset = 1;
	if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
		priv->under_config = 1;

	/* Issuing command */
	arlan_lock_card_access(dev);
	if (priv->waiting_command_mask & ARLAN_COMMAND_POWERUP)
	{
	//     if (readControlRegister(dev) & (ARLAN_ACCESS && ARLAN_POWER))
		setPowerOn(dev);
		arlan_interrupt_lancpu(dev);
		priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERUP;
		priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
		priv->card_polling_interval = HZ / 10;
	}
	else if (priv->waiting_command_mask & ARLAN_COMMAND_ACTIVATE)
	{
		WRITESHMB(arlan->commandByte, ARLAN_COM_ACTIVATE);
		arlan_interrupt_lancpu(dev);
		priv->waiting_command_mask &= ~ARLAN_COMMAND_ACTIVATE;
		priv->card_polling_interval = HZ / 10;
	}
	else if (priv->waiting_command_mask & ARLAN_COMMAND_RX_ABORT)
	{
		if (priv->rx_command_given)
		{
			WRITESHMB(arlan->commandByte, ARLAN_COM_RX_ABORT);
			arlan_interrupt_lancpu(dev);
			priv->rx_command_given = 0;
		}
		priv->waiting_command_mask &= ~ARLAN_COMMAND_RX_ABORT;
		priv->card_polling_interval = 1;
	}
	else if (priv->waiting_command_mask & ARLAN_COMMAND_TX_ABORT)
	{
		if (priv->tx_command_given)
		{
			WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ABORT);
			arlan_interrupt_lancpu(dev);
			priv->tx_command_given = 0;
		}
		priv->waiting_command_mask &= ~ARLAN_COMMAND_TX_ABORT;
		priv->card_polling_interval = 1;
	}
	else if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
	{
		priv->under_reset=1;
		netif_stop_queue (dev);