aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/fore200e.c10
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c45
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/ucb1x00.h2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/bonding/bond_main.c285
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c31
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/skge.c24
-rw-r--r--drivers/net/starfire.c46
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/21142.c2
-rw-r--r--drivers/net/wireless/orinoco.c14
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c37
-rw-r--r--drivers/scsi/ahci.c31
-rw-r--r--drivers/scsi/libata-core.c433
-rw-r--r--drivers/scsi/libata-scsi.c689
-rw-r--r--drivers/scsi/libata.h16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/sata_mv.c1142
-rw-r--r--drivers/scsi/sata_nv.c16
-rw-r--r--drivers/scsi/sata_promise.c6
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/serial/sunsu.c4
33 files changed, 1972 insertions, 905 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 2bf723a7b6e6..6f1a83c9d9e0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq)
178 178
179 179
180static void* 180static void*
181fore200e_kmalloc(int size, int flags) 181fore200e_kmalloc(int size, unsigned int __nocast flags)
182{ 182{
183 void* chunk = kmalloc(size, flags); 183 void *chunk = kzalloc(size, flags);
184 184
185 if (chunk) 185 if (!chunk)
186 memset(chunk, 0x00, size); 186 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
187 else
188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
189 187
190 return chunk; 188 return chunk;
191} 189}
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 95a976c96eb8..70458cb061c6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
47MODULE_PARM_DESC(debug, "Enable debug output"); 47MODULE_PARM_DESC(debug, "Enable debug output");
48 48
49module_param_named(cards_limit, drm_cards_limit, int, 0444); 49module_param_named(cards_limit, drm_cards_limit, int, 0444);
50module_param_named(debug, drm_debug, int, 0666); 50module_param_named(debug, drm_debug, int, 0600);
51 51
52drm_head_t **drm_heads; 52drm_head_t **drm_heads;
53struct drm_sysfs_class *drm_class; 53struct drm_sysfs_class *drm_class;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index bb0b3a8de14b..1422285d537c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -69,7 +69,8 @@ int cn_already_initialized = 0;
69 * a new message. 69 * a new message.
70 * 70 *
71 */ 71 */
72int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) 72int cn_netlink_send(struct cn_msg *msg, u32 __group,
73 unsigned int __nocast gfp_mask)
73{ 74{
74 struct cn_callback_entry *__cbq; 75 struct cn_callback_entry *__cbq;
75 unsigned int size; 76 unsigned int size;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ffbcd40418d5..23a3f56c7899 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -503,6 +503,25 @@ err_free_aux:
503 return err; 503 return err;
504} 504}
505 505
506static void mthca_free_icms(struct mthca_dev *mdev)
507{
508 u8 status;
509
510 mthca_free_icm_table(mdev, mdev->mcg_table.table);
511 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
512 mthca_free_icm_table(mdev, mdev->srq_table.table);
513 mthca_free_icm_table(mdev, mdev->cq_table.table);
514 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
515 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
516 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
517 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
518 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
519 mthca_unmap_eq_icm(mdev);
520
521 mthca_UNMAP_ICM_AUX(mdev, &status);
522 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
523}
524
506static int __devinit mthca_init_arbel(struct mthca_dev *mdev) 525static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
507{ 526{
508 struct mthca_dev_lim dev_lim; 527 struct mthca_dev_lim dev_lim;
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
580 return 0; 599 return 0;
581 600
582err_free_icm: 601err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 602 mthca_free_icms(mdev);
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
585 mthca_free_icm_table(mdev, mdev->cq_table.table);
586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
588 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
589 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
590 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
591 mthca_unmap_eq_icm(mdev);
592
593 mthca_UNMAP_ICM_AUX(mdev, &status);
594 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
595 603
596err_stop_fw: 604err_stop_fw:
597 mthca_UNMAP_FA(mdev, &status); 605 mthca_UNMAP_FA(mdev, &status);
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
611 mthca_CLOSE_HCA(mdev, 0, &status); 619 mthca_CLOSE_HCA(mdev, 0, &status);
612 620
613 if (mthca_is_memfree(mdev)) { 621 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 622 mthca_free_icms(mdev);
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626 623
627 mthca_UNMAP_FA(mdev, &status); 624 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 625 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 704f48e0b6a7..6c5bf07489f4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -474,7 +474,7 @@ err:
474 spin_unlock(&priv->lock); 474 spin_unlock(&priv->lock);
475} 475}
476 476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev) 477static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
478{ 478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480 480
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
569 569
570 if (skb->dst && skb->dst->neighbour) { 570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev); 572 ipoib_path_lookup(skb, dev);
573 goto out; 573 goto out;
574 } 574 }
575 575
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 10f6ce1bc0ab..612564ac6f7b 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void)
642module_init(ucb1x00_init); 642module_init(ucb1x00_init);
643module_exit(ucb1x00_exit); 643module_exit(ucb1x00_exit);
644 644
645EXPORT_SYMBOL(ucb1x00_class);
646
647EXPORT_SYMBOL(ucb1x00_io_set_dir); 645EXPORT_SYMBOL(ucb1x00_io_set_dir);
648EXPORT_SYMBOL(ucb1x00_io_write); 646EXPORT_SYMBOL(ucb1x00_io_write);
649EXPORT_SYMBOL(ucb1x00_io_read); 647EXPORT_SYMBOL(ucb1x00_io_read);
diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h
index 6b632644f59a..9c9a647d8b7b 100644
--- a/drivers/mfd/ucb1x00.h
+++ b/drivers/mfd/ucb1x00.h
@@ -106,8 +106,6 @@ struct ucb1x00_irq {
106 void (*fn)(int, void *); 106 void (*fn)(int, void *);
107}; 107};
108 108
109extern struct class ucb1x00_class;
110
111struct ucb1x00 { 109struct ucb1x00 {
112 spinlock_t lock; 110 spinlock_t lock;
113 struct mcp *mcp; 111 struct mcp *mcp;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a908c4690a7..c748b0e16419 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1655,7 +1655,7 @@ config LAN_SAA9730
1655 1655
1656config NET_POCKET 1656config NET_POCKET
1657 bool "Pocket and portable adapters" 1657 bool "Pocket and portable adapters"
1658 depends on NET_ETHERNET && ISA 1658 depends on NET_ETHERNET && PARPORT
1659 ---help--- 1659 ---help---
1660 Cute little network (Ethernet) devices which attach to the parallel 1660 Cute little network (Ethernet) devices which attach to the parallel
1661 port ("pocket adapters"), commonly used with laptops. If you have 1661 port ("pocket adapters"), commonly used with laptops. If you have
@@ -1679,7 +1679,7 @@ config NET_POCKET
1679 1679
1680config ATP 1680config ATP
1681 tristate "AT-LAN-TEC/RealTek pocket adapter support" 1681 tristate "AT-LAN-TEC/RealTek pocket adapter support"
1682 depends on NET_POCKET && ISA && X86 1682 depends on NET_POCKET && PARPORT && X86
1683 select CRC32 1683 select CRC32
1684 ---help--- 1684 ---help---
1685 This is a network (Ethernet) device which attaches to your parallel 1685 This is a network (Ethernet) device which attaches to your parallel
@@ -1694,7 +1694,7 @@ config ATP
1694 1694
1695config DE600 1695config DE600
1696 tristate "D-Link DE600 pocket adapter support" 1696 tristate "D-Link DE600 pocket adapter support"
1697 depends on NET_POCKET && ISA 1697 depends on NET_POCKET && PARPORT
1698 ---help--- 1698 ---help---
1699 This is a network (Ethernet) device which attaches to your parallel 1699 This is a network (Ethernet) device which attaches to your parallel
1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the
@@ -1709,7 +1709,7 @@ config DE600
1709 1709
1710config DE620 1710config DE620
1711 tristate "D-Link DE620 pocket adapter support" 1711 tristate "D-Link DE620 pocket adapter support"
1712 depends on NET_POCKET && ISA 1712 depends on NET_POCKET && PARPORT
1713 ---help--- 1713 ---help---
1714 This is a network (Ethernet) device which attaches to your parallel 1714 This is a network (Ethernet) device which attaches to your parallel
1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bf81cd45e4d4..f0a5b772a386 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -487,6 +487,8 @@
487 * * Added xmit_hash_policy_layer34() 487 * * Added xmit_hash_policy_layer34()
488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4. 488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
489 * Set version to 2.6.3. 489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
490 */ 492 */
491 493
492//#define BONDING_DEBUG 1 494//#define BONDING_DEBUG 1
@@ -595,14 +597,7 @@ static int arp_ip_count = 0;
595static int bond_mode = BOND_MODE_ROUNDROBIN; 597static int bond_mode = BOND_MODE_ROUNDROBIN;
596static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
597static int lacp_fast = 0; 599static int lacp_fast = 0;
598static int app_abi_ver = 0; 600
599static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
600 * we receive from the application. Once set,
601 * it won't be changed, and the module will
602 * refuse to enslave/release interfaces if the
603 * command comes from an application using
604 * another ABI version.
605 */
606struct bond_parm_tbl { 601struct bond_parm_tbl {
607 char *modename; 602 char *modename;
608 int mode; 603 int mode;
@@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond)
1294/* 1289/*
1295 * Copy all the Multicast addresses from src to the bonding device dst 1290 * Copy all the Multicast addresses from src to the bonding device dst
1296 */ 1291 */
1297static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) 1292static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
1293 unsigned int __nocast gfp_flag)
1298{ 1294{
1299 struct dev_mc_list *dmi, *new_dmi; 1295 struct dev_mc_list *dmi, *new_dmi;
1300 1296
1301 for (dmi = mc_list; dmi; dmi = dmi->next) { 1297 for (dmi = mc_list; dmi; dmi = dmi->next) {
1302 new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); 1298 new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
1303 1299
1304 if (!new_dmi) { 1300 if (!new_dmi) {
1305 /* FIXME: Potential memory leak !!! */ 1301 /* FIXME: Potential memory leak !!! */
@@ -1702,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1702 } 1698 }
1703 } 1699 }
1704 1700
1705 if (app_abi_ver >= 1) { 1701 /*
1706 /* The application is using an ABI, which requires the 1702 * Old ifenslave binaries are no longer supported. These can
1707 * slave interface to be closed. 1703 * be identified with moderate accurary by the state of the slave:
1708 */ 1704 * the current ifenslave will set the interface down prior to
1709 if ((slave_dev->flags & IFF_UP)) { 1705 * enslaving it; the old ifenslave will not.
1710 printk(KERN_ERR DRV_NAME 1706 */
1711 ": Error: %s is up\n", 1707 if ((slave_dev->flags & IFF_UP)) {
1712 slave_dev->name); 1708 printk(KERN_ERR DRV_NAME ": %s is up. "
1713 res = -EPERM; 1709 "This may be due to an out of date ifenslave.\n",
1714 goto err_undo_flags; 1710 slave_dev->name);
1715 } 1711 res = -EPERM;
1716 1712 goto err_undo_flags;
1717 if (slave_dev->set_mac_address == NULL) { 1713 }
1718 printk(KERN_ERR DRV_NAME
1719 ": Error: The slave device you specified does "
1720 "not support setting the MAC address.\n");
1721 printk(KERN_ERR
1722 "Your kernel likely does not support slave "
1723 "devices.\n");
1724 1714
1725 res = -EOPNOTSUPP; 1715 if (slave_dev->set_mac_address == NULL) {
1726 goto err_undo_flags; 1716 printk(KERN_ERR DRV_NAME
1727 } 1717 ": Error: The slave device you specified does "
1728 } else { 1718 "not support setting the MAC address.\n");
1729 /* The application is not using an ABI, which requires the 1719 printk(KERN_ERR
1730 * slave interface to be open. 1720 "Your kernel likely does not support slave devices.\n");
1731 */
1732 if (!(slave_dev->flags & IFF_UP)) {
1733 printk(KERN_ERR DRV_NAME
1734 ": Error: %s is not running\n",
1735 slave_dev->name);
1736 res = -EINVAL;
1737 goto err_undo_flags;
1738 }
1739 1721
1740 if ((bond->params.mode == BOND_MODE_8023AD) || 1722 res = -EOPNOTSUPP;
1741 (bond->params.mode == BOND_MODE_TLB) || 1723 goto err_undo_flags;
1742 (bond->params.mode == BOND_MODE_ALB)) {
1743 printk(KERN_ERR DRV_NAME
1744 ": Error: to use %s mode, you must upgrade "
1745 "ifenslave.\n",
1746 bond_mode_name(bond->params.mode));
1747 res = -EOPNOTSUPP;
1748 goto err_undo_flags;
1749 }
1750 } 1724 }
1751 1725
1752 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); 1726 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1762,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1762 */ 1736 */
1763 new_slave->original_flags = slave_dev->flags; 1737 new_slave->original_flags = slave_dev->flags;
1764 1738
1765 if (app_abi_ver >= 1) { 1739 /*
1766 /* save slave's original ("permanent") mac address for 1740 * Save slave's original ("permanent") mac address for modes
1767 * modes that needs it, and for restoring it upon release, 1741 * that need it, and for restoring it upon release, and then
1768 * and then set it to the master's address 1742 * set it to the master's address
1769 */ 1743 */
1770 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1744 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1771 1745
1772 /* set slave to master's mac address 1746 /*
1773 * The application already set the master's 1747 * Set slave to master's mac address. The application already
1774 * mac address to that of the first slave 1748 * set the master's mac address to that of the first slave
1775 */ 1749 */
1776 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); 1750 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
1777 addr.sa_family = slave_dev->type; 1751 addr.sa_family = slave_dev->type;
1778 res = dev_set_mac_address(slave_dev, &addr); 1752 res = dev_set_mac_address(slave_dev, &addr);
1779 if (res) { 1753 if (res) {
1780 dprintk("Error %d calling set_mac_address\n", res); 1754 dprintk("Error %d calling set_mac_address\n", res);
1781 goto err_free; 1755 goto err_free;
1782 } 1756 }
1783 1757
1784 /* open the slave since the application closed it */ 1758 /* open the slave since the application closed it */
1785 res = dev_open(slave_dev); 1759 res = dev_open(slave_dev);
1786 if (res) { 1760 if (res) {
1787 dprintk("Openning slave %s failed\n", slave_dev->name); 1761 dprintk("Openning slave %s failed\n", slave_dev->name);
1788 goto err_restore_mac; 1762 goto err_restore_mac;
1789 }
1790 } 1763 }
1791 1764
1792 res = netdev_set_master(slave_dev, bond_dev); 1765 res = netdev_set_master(slave_dev, bond_dev);
1793 if (res) { 1766 if (res) {
1794 dprintk("Error %d calling netdev_set_master\n", res); 1767 dprintk("Error %d calling netdev_set_master\n", res);
1795 if (app_abi_ver < 1) { 1768 goto err_close;
1796 goto err_free;
1797 } else {
1798 goto err_close;
1799 }
1800 } 1769 }
1801 1770
1802 new_slave->dev = slave_dev; 1771 new_slave->dev = slave_dev;
@@ -1997,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1997 1966
1998 write_unlock_bh(&bond->lock); 1967 write_unlock_bh(&bond->lock);
1999 1968
2000 if (app_abi_ver < 1) {
2001 /*
2002 * !!! This is to support old versions of ifenslave.
2003 * We can remove this in 2.5 because our ifenslave takes
2004 * care of this for us.
2005 * We check to see if the master has a mac address yet.
2006 * If not, we'll give it the mac address of our slave device.
2007 */
2008 int ndx = 0;
2009
2010 for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
2011 dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
2012 ndx);
2013 if (bond_dev->dev_addr[ndx] != 0) {
2014 dprintk("Found non-zero byte at ndx=%d\n",
2015 ndx);
2016 break;
2017 }
2018 }
2019
2020 if (ndx == bond_dev->addr_len) {
2021 /*
2022 * We got all the way through the address and it was
2023 * all 0's.
2024 */
2025 dprintk("%s doesn't have a MAC address yet. \n",
2026 bond_dev->name);
2027 dprintk("Going to give assign it from %s.\n",
2028 slave_dev->name);
2029 bond_sethwaddr(bond_dev, slave_dev);
2030 }
2031 }
2032
2033 printk(KERN_INFO DRV_NAME 1969 printk(KERN_INFO DRV_NAME
2034 ": %s: enslaving %s as a%s interface with a%s link.\n", 1970 ": %s: enslaving %s as a%s interface with a%s link.\n",
2035 bond_dev->name, slave_dev->name, 1971 bond_dev->name, slave_dev->name,
@@ -2227,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2227 /* close slave before restoring its mac address */ 2163 /* close slave before restoring its mac address */
2228 dev_close(slave_dev); 2164 dev_close(slave_dev);
2229 2165
2230 if (app_abi_ver >= 1) { 2166 /* restore original ("permanent") mac address */
2231 /* restore original ("permanent") mac address */ 2167 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2232 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2168 addr.sa_family = slave_dev->type;
2233 addr.sa_family = slave_dev->type; 2169 dev_set_mac_address(slave_dev, &addr);
2234 dev_set_mac_address(slave_dev, &addr);
2235 }
2236 2170
2237 /* restore the original state of the 2171 /* restore the original state of the
2238 * IFF_NOARP flag that might have been 2172 * IFF_NOARP flag that might have been
@@ -2320,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev)
2320 /* close slave before restoring its mac address */ 2254 /* close slave before restoring its mac address */
2321 dev_close(slave_dev); 2255 dev_close(slave_dev);
2322 2256
2323 if (app_abi_ver >= 1) { 2257 /* restore original ("permanent") mac address*/
2324 /* restore original ("permanent") mac address*/ 2258 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2325 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2259 addr.sa_family = slave_dev->type;
2326 addr.sa_family = slave_dev->type; 2260 dev_set_mac_address(slave_dev, &addr);
2327 dev_set_mac_address(slave_dev, &addr);
2328 }
2329 2261
2330 /* restore the original state of the IFF_NOARP flag that might have 2262 /* restore the original state of the IFF_NOARP flag that might have
2331 * been set by bond_set_slave_inactive_flags() 2263 * been set by bond_set_slave_inactive_flags()
@@ -2423,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2423 return res; 2355 return res;
2424} 2356}
2425 2357
2426static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
2427{
2428 struct ethtool_drvinfo info;
2429 void __user *addr = ifr->ifr_data;
2430 uint32_t cmd;
2431
2432 if (get_user(cmd, (uint32_t __user *)addr)) {
2433 return -EFAULT;
2434 }
2435
2436 switch (cmd) {
2437 case ETHTOOL_GDRVINFO:
2438 if (copy_from_user(&info, addr, sizeof(info))) {
2439 return -EFAULT;
2440 }
2441
2442 if (strcmp(info.driver, "ifenslave") == 0) {
2443 int new_abi_ver;
2444 char *endptr;
2445
2446 new_abi_ver = simple_strtoul(info.fw_version,
2447 &endptr, 0);
2448 if (*endptr) {
2449 printk(KERN_ERR DRV_NAME
2450 ": Error: got invalid ABI "
2451 "version from application\n");
2452
2453 return -EINVAL;
2454 }
2455
2456 if (orig_app_abi_ver == -1) {
2457 orig_app_abi_ver = new_abi_ver;
2458 }
2459
2460 app_abi_ver = new_abi_ver;
2461 }
2462
2463 strncpy(info.driver, DRV_NAME, 32);
2464 strncpy(info.version, DRV_VERSION, 32);
2465 snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
2466
2467 if (copy_to_user(addr, &info, sizeof(info))) {
2468 return -EFAULT;
2469 }
2470
2471 return 0;
2472 default:
2473 return -EOPNOTSUPP;
2474 }
2475}
2476
2477static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2358static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2478{ 2359{
2479 struct bonding *bond = bond_dev->priv; 2360 struct bonding *bond = bond_dev->priv;
@@ -3442,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
3442 seq_printf(seq, "Link Failure Count: %d\n", 3323 seq_printf(seq, "Link Failure Count: %d\n",
3443 slave->link_failure_count); 3324 slave->link_failure_count);
3444 3325
3445 if (app_abi_ver >= 1) { 3326 seq_printf(seq,
3446 seq_printf(seq, 3327 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
3447 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", 3328 slave->perm_hwaddr[0], slave->perm_hwaddr[1],
3448 slave->perm_hwaddr[0], 3329 slave->perm_hwaddr[2], slave->perm_hwaddr[3],
3449 slave->perm_hwaddr[1], 3330 slave->perm_hwaddr[4], slave->perm_hwaddr[5]);
3450 slave->perm_hwaddr[2],
3451 slave->perm_hwaddr[3],
3452 slave->perm_hwaddr[4],
3453 slave->perm_hwaddr[5]);
3454 }
3455 3331
3456 if (bond->params.mode == BOND_MODE_8023AD) { 3332 if (bond->params.mode == BOND_MODE_8023AD) {
3457 const struct aggregator *agg 3333 const struct aggregator *agg
@@ -4010,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4010 struct ifslave k_sinfo; 3886 struct ifslave k_sinfo;
4011 struct ifslave __user *u_sinfo = NULL; 3887 struct ifslave __user *u_sinfo = NULL;
4012 struct mii_ioctl_data *mii = NULL; 3888 struct mii_ioctl_data *mii = NULL;
4013 int prev_abi_ver = orig_app_abi_ver;
4014 int res = 0; 3889 int res = 0;
4015 3890
4016 dprintk("bond_ioctl: master=%s, cmd=%d\n", 3891 dprintk("bond_ioctl: master=%s, cmd=%d\n",
4017 bond_dev->name, cmd); 3892 bond_dev->name, cmd);
4018 3893
4019 switch (cmd) { 3894 switch (cmd) {
4020 case SIOCETHTOOL:
4021 return bond_ethtool_ioctl(bond_dev, ifr);
4022 case SIOCGMIIPHY: 3895 case SIOCGMIIPHY:
4023 mii = if_mii(ifr); 3896 mii = if_mii(ifr);
4024 if (!mii) { 3897 if (!mii) {
@@ -4090,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4090 return -EPERM; 3963 return -EPERM;
4091 } 3964 }
4092 3965
4093 if (orig_app_abi_ver == -1) {
4094 /* no orig_app_abi_ver was provided yet, so we'll use the
4095 * current one from now on, even if it's 0
4096 */
4097 orig_app_abi_ver = app_abi_ver;
4098
4099 } else if (orig_app_abi_ver != app_abi_ver) {
4100 printk(KERN_ERR DRV_NAME
4101 ": Error: already using ifenslave ABI version %d; to "
4102 "upgrade ifenslave to version %d, you must first "
4103 "reload bonding.\n",
4104 orig_app_abi_ver, app_abi_ver);
4105 return -EINVAL;
4106 }
4107
4108 slave_dev = dev_get_by_name(ifr->ifr_slave); 3966 slave_dev = dev_get_by_name(ifr->ifr_slave);
4109 3967
4110 dprintk("slave_dev=%p: \n", slave_dev); 3968 dprintk("slave_dev=%p: \n", slave_dev);
@@ -4137,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4137 dev_put(slave_dev); 3995 dev_put(slave_dev);
4138 } 3996 }
4139 3997
4140 if (res < 0) {
4141 /* The ioctl failed, so there's no point in changing the
4142 * orig_app_abi_ver. We'll restore it's value just in case
4143 * we've changed it earlier in this function.
4144 */
4145 orig_app_abi_ver = prev_abi_ver;
4146 }
4147
4148 return res; 3998 return res;
4149} 3999}
4150 4000
@@ -4578,9 +4428,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4578 } 4428 }
4579} 4429}
4580 4430
4431static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4432 struct ethtool_drvinfo *drvinfo)
4433{
4434 strncpy(drvinfo->driver, DRV_NAME, 32);
4435 strncpy(drvinfo->version, DRV_VERSION, 32);
4436 snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
4437}
4438
4581static struct ethtool_ops bond_ethtool_ops = { 4439static struct ethtool_ops bond_ethtool_ops = {
4582 .get_tx_csum = ethtool_op_get_tx_csum, 4440 .get_tx_csum = ethtool_op_get_tx_csum,
4583 .get_sg = ethtool_op_get_sg, 4441 .get_sg = ethtool_op_get_sg,
4442 .get_drvinfo = bond_ethtool_get_drvinfo,
4584}; 4443};
4585 4444
4586/* 4445/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 388196980862..bbf9da8af624 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -40,8 +40,8 @@
40#include "bond_3ad.h" 40#include "bond_3ad.h"
41#include "bond_alb.h" 41#include "bond_alb.h"
42 42
43#define DRV_VERSION "2.6.3" 43#define DRV_VERSION "2.6.4"
44#define DRV_RELDATE "June 8, 2005" 44#define DRV_RELDATE "September 26, 2005"
45#define DRV_NAME "bonding" 45#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 47
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 0de3bb906174..14e9b6315f20 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1875 rc = -ENODEV; 1875 rc = -ENODEV;
1876 goto bail; 1876 goto bail;
1877 } 1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1878 1881
1879 /* Setup initial PHY config & startup aneg */ 1882 /* Setup initial PHY config & startup aneg */
1880 if (ep->phy_mii.def->ops->init) 1883 if (ep->phy_mii.def->ops->init)
@@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1882 netif_carrier_off(ndev); 1885 netif_carrier_off(ndev);
1883 if (ep->phy_mii.def->features & SUPPORTED_Autoneg) 1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1884 ep->want_autoneg = 1; 1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890
1891 /* Select highest supported speed/duplex */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) {
1893 ep->phy_mii.speed = SPEED_1000;
1894 ep->phy_mii.duplex = DUPLEX_FULL;
1895 } else if (ep->phy_mii.def->features &
1896 SUPPORTED_1000baseT_Half) {
1897 ep->phy_mii.speed = SPEED_1000;
1898 ep->phy_mii.duplex = DUPLEX_HALF;
1899 } else if (ep->phy_mii.def->features &
1900 SUPPORTED_100baseT_Full) {
1901 ep->phy_mii.speed = SPEED_100;
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else {
1912 ep->phy_mii.speed = SPEED_10;
1913 ep->phy_mii.duplex = DUPLEX_HALF;
1914 }
1915 }
1885 emac_start_link(ep, NULL); 1916 emac_start_link(ep, NULL);
1886 1917
1887 /* read the MAC Address */ 1918 /* read the MAC Address */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e64df4d0800b..83334db2921c 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
584 return 0; 584 return 0;
585} 585}
586 586
587static inline int rx_refill(struct net_device *ndev, int gfp) 587static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
588{ 588{
589 struct ns83820 *dev = PRIV(ndev); 589 struct ns83820 *dev = PRIV(ndev);
590 unsigned i; 590 unsigned i;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index d652e1eddb45..c7cca842e5ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
1832{ 1832{
1833 struct dev_mc_list *mc_addr; 1833 struct dev_mc_list *mc_addr;
1834 1834
1835 for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { 1835 for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
1836 u_int position = ether_crc(6, mc_addr->dmi_addr); 1836 u_int position = ether_crc(6, mc_addr->dmi_addr);
1837#ifndef final_version /* Verify multicast address. */ 1837#ifndef final_version /* Verify multicast address. */
1838 if ((mc_addr->dmi_addr[0] & 1) == 0) 1838 if ((mc_addr->dmi_addr[0] & 1) == 0)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index fd398da4993b..c2e6484ef138 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2837,21 +2837,29 @@ static void skge_netpoll(struct net_device *dev)
2837static int skge_set_mac_address(struct net_device *dev, void *p) 2837static int skge_set_mac_address(struct net_device *dev, void *p)
2838{ 2838{
2839 struct skge_port *skge = netdev_priv(dev); 2839 struct skge_port *skge = netdev_priv(dev);
2840 struct sockaddr *addr = p; 2840 struct skge_hw *hw = skge->hw;
2841 int err = 0; 2841 unsigned port = skge->port;
2842 const struct sockaddr *addr = p;
2842 2843
2843 if (!is_valid_ether_addr(addr->sa_data)) 2844 if (!is_valid_ether_addr(addr->sa_data))
2844 return -EADDRNOTAVAIL; 2845 return -EADDRNOTAVAIL;
2845 2846
2846 skge_down(dev); 2847 spin_lock_bh(&hw->phy_lock);
2847 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2848 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2848 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, 2849 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2849 dev->dev_addr, ETH_ALEN); 2850 dev->dev_addr, ETH_ALEN);
2850 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, 2851 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2851 dev->dev_addr, ETH_ALEN); 2852 dev->dev_addr, ETH_ALEN);
2852 if (dev->flags & IFF_UP) 2853
2853 err = skge_up(dev); 2854 if (hw->chip_id == CHIP_ID_GENESIS)
2854 return err; 2855 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2856 else {
2857 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2858 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2859 }
2860 spin_unlock_bh(&hw->phy_lock);
2861
2862 return 0;
2855} 2863}
2856 2864
2857static const struct { 2865static const struct {
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 88b89dc95c77..efdb179ecc8c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -133,14 +133,18 @@
133 - finally added firmware (GPL'ed by Adaptec) 133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x 134 - removed compatibility code for 2.2.x
135 135
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
139
136TODO: - fix forced speed/duplexing code (broken a long time ago, when 140TODO: - fix forced speed/duplexing code (broken a long time ago, when
137 somebody converted the driver to use the generic MII code) 141 somebody converted the driver to use the generic MII code)
138 - fix VLAN support 142 - fix VLAN support
139*/ 143*/
140 144
141#define DRV_NAME "starfire" 145#define DRV_NAME "starfire"
142#define DRV_VERSION "1.03+LK1.4.2" 146#define DRV_VERSION "1.03+LK1.4.2.1"
143#define DRV_RELDATE "January 19, 2005" 147#define DRV_RELDATE "October 3, 2005"
144 148
145#include <linux/config.h> 149#include <linux/config.h>
146#include <linux/version.h> 150#include <linux/version.h>
@@ -165,6 +169,14 @@ TODO: - fix forced speed/duplexing code (broken a long time ago, when
165 * of length 1. If and when this is fixed, the #define below can be removed. 169 * of length 1. If and when this is fixed, the #define below can be removed.
166 */ 170 */
167#define HAS_BROKEN_FIRMWARE 171#define HAS_BROKEN_FIRMWARE
172
173/*
174 * If using the broken firmware, data must be padded to the next 32-bit boundary.
175 */
176#ifdef HAS_BROKEN_FIRMWARE
177#define PADDING_MASK 3
178#endif
179
168/* 180/*
169 * Define this if using the driver with the zero-copy patch 181 * Define this if using the driver with the zero-copy patch
170 */ 182 */
@@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, };
257 * This SUCKS. 269 * This SUCKS.
258 * We need a much better method to determine if dma_addr_t is 64-bit. 270 * We need a much better method to determine if dma_addr_t is 64-bit.
259 */ 271 */
260#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) 272#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
261/* 64-bit dma_addr_t */ 273/* 64-bit dma_addr_t */
262#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 274#define ADDR_64BITS /* This chip uses 64 bit addresses. */
275#define netdrv_addr_t u64
263#define cpu_to_dma(x) cpu_to_le64(x) 276#define cpu_to_dma(x) cpu_to_le64(x)
264#define dma_to_cpu(x) le64_to_cpu(x) 277#define dma_to_cpu(x) le64_to_cpu(x)
265#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 278#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
@@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, };
268#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 281#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
269#define RX_DESC_ADDR_SIZE RxDescAddr64bit 282#define RX_DESC_ADDR_SIZE RxDescAddr64bit
270#else /* 32-bit dma_addr_t */ 283#else /* 32-bit dma_addr_t */
284#define netdrv_addr_t u32
271#define cpu_to_dma(x) cpu_to_le32(x) 285#define cpu_to_dma(x) cpu_to_le32(x)
272#define dma_to_cpu(x) le32_to_cpu(x) 286#define dma_to_cpu(x) le32_to_cpu(x)
273#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 287#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
@@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1333 } 1347 }
1334 1348
1335#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1349#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1336 { 1350 if (skb->ip_summed == CHECKSUM_HW) {
1337 int has_bad_length = 0; 1351 skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
1338 1352 if (skb == NULL)
1339 if (skb_first_frag_len(skb) == 1) 1353 return NETDEV_TX_OK;
1340 has_bad_length = 1;
1341 else {
1342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1343 if (skb_shinfo(skb)->frags[i].size == 1) {
1344 has_bad_length = 1;
1345 break;
1346 }
1347 }
1348
1349 if (has_bad_length)
1350 skb_checksum_help(skb, 0);
1351 } 1354 }
1352#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1355#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1353 1356
@@ -2127,13 +2130,12 @@ static int __init starfire_init (void)
2127#endif 2130#endif
2128#endif 2131#endif
2129 2132
2130#ifndef ADDR_64BITS
2131 /* we can do this test only at run-time... sigh */ 2133 /* we can do this test only at run-time... sigh */
2132 if (sizeof(dma_addr_t) == sizeof(u64)) { 2134 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2133 printk("This driver has not been ported to this 64-bit architecture yet\n"); 2135 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2134 return -ENODEV; 2136 return -ENODEV;
2135 } 2137 }
2136#endif /* not ADDR_64BITS */ 2138
2137 return pci_module_init (&starfire_driver); 2139 return pci_module_init (&starfire_driver);
2138} 2140}
2139 2141
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index ff8ae5f79970..16edbb1a4a7a 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1035,7 +1035,8 @@ struct gem {
1035 1035
1036#define ALIGNED_RX_SKB_ADDR(addr) \ 1036#define ALIGNED_RX_SKB_ADDR(addr) \
1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
1038static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags) 1038static __inline__ struct sk_buff *gem_alloc_skb(int size,
1039 unsigned int __nocast gfp_flags)
1039{ 1040{
1040 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
1041 1042
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index e7b001017b9a..32057e65808b 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
531 if (!time_after(jiffies, timeout)) continue; 531 if (!time_after(jiffies, timeout)) continue;
532 DPRINTK( "Hardware timeout during initialization.\n"); 532 DPRINTK( "Hardware timeout during initialization.\n");
533 iounmap(t_mmio); 533 iounmap(t_mmio);
534 kfree(ti);
535 return -ENODEV; 534 return -ENODEV;
536 } 535 }
537 ti->sram_phys = 536 ti->sram_phys =
@@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
645 DPRINTK("Unknown shared ram paging info %01X\n", 644 DPRINTK("Unknown shared ram paging info %01X\n",
646 ti->shared_ram_paging); 645 ti->shared_ram_paging);
647 iounmap(t_mmio); 646 iounmap(t_mmio);
648 kfree(ti);
649 return -ENODEV; 647 return -ENODEV;
650 break; 648 break;
651 } /*end switch shared_ram_paging */ 649 } /*end switch shared_ram_paging */
@@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
675 "driver limit (%05x), adapter not started.\n", 673 "driver limit (%05x), adapter not started.\n",
676 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); 674 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
677 iounmap(t_mmio); 675 iounmap(t_mmio);
678 kfree(ti);
679 return -ENODEV; 676 return -ENODEV;
680 } else { /* seems cool, record what we have figured out */ 677 } else { /* seems cool, record what we have figured out */
681 ti->sram_base = new_base >> 12; 678 ti->sram_base = new_base >> 12;
@@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
690 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", 687 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
691 irq); 688 irq);
692 iounmap(t_mmio); 689 iounmap(t_mmio);
693 kfree(ti);
694 return -ENODEV; 690 return -ENODEV;
695 } 691 }
696 /*?? Now, allocate some of the PIO PORTs for this driver.. */ 692 /*?? Now, allocate some of the PIO PORTs for this driver.. */
@@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
699 DPRINTK("Could not grab PIO range. Halting driver.\n"); 695 DPRINTK("Could not grab PIO range. Halting driver.\n");
700 free_irq(dev->irq, dev); 696 free_irq(dev->irq, dev);
701 iounmap(t_mmio); 697 iounmap(t_mmio);
702 kfree(ti);
703 return -EBUSY; 698 return -EBUSY;
704 } 699 }
705 700
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 5db694c4eb02..683f14b01c06 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
172 int i; 172 int i;
173 for (i = 0; i < tp->mtable->leafcount; i++) 173 for (i = 0; i < tp->mtable->leafcount; i++)
174 if (tp->mtable->mleaf[i].media == dev->if_port) { 174 if (tp->mtable->mleaf[i].media == dev->if_port) {
175 int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65)); 175 int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
176 tp->cur_index = i; 176 tp->cur_index = i;
177 tulip_select_media(dev, startup); 177 tulip_select_media(dev, startup);
178 setup_done = 1; 178 setup_done = 1;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 6deb7cc810cc..cf3daaa1b369 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -503,9 +503,14 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
503 return 0; 503 return 0;
504 } 504 }
505 505
506 /* Length of the packet body */ 506 /* Check packet length, pad short packets, round up odd length */
507 /* FIXME: what if the skb is smaller than this? */ 507 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
508 len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); 508 if (skb->len < len) {
509 skb = skb_padto(skb, len);
510 if (skb == NULL)
511 goto fail;
512 }
513 len -= ETH_HLEN;
509 514
510 eh = (struct ethhdr *)skb->data; 515 eh = (struct ethhdr *)skb->data;
511 516
@@ -557,8 +562,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
557 p = skb->data; 562 p = skb->data;
558 } 563 }
559 564
560 /* Round up for odd length packets */ 565 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len,
561 err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
562 txfid, data_off); 566 txfid, data_off);
563 if (err) { 567 if (err) {
564 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 568 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 2ad4797ce024..9963479ba89f 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -686,6 +686,7 @@ struct qeth_seqno {
686 __u32 pdu_hdr; 686 __u32 pdu_hdr;
687 __u32 pdu_hdr_ack; 687 __u32 pdu_hdr_ack;
688 __u16 ipa; 688 __u16 ipa;
689 __u32 pkt_seqno;
689}; 690};
690 691
691struct qeth_reply { 692struct qeth_reply {
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
848 "on interface %s", QETH_CARD_IFNAME(card)); 849 "on interface %s", QETH_CARD_IFNAME(card));
849 return -ENOMEM; 850 return -ENOMEM;
850 } 851 }
852 kfree_skb(*skb);
851 *skb = new_skb; 853 *skb = new_skb;
852 } 854 }
853 return 0; 855 return 0;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 71de834ece1a..bd28e2438d7f 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -511,7 +511,7 @@ static int
511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) 511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
512{ 512{
513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; 513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
514 int rc = 0; 514 int rc = 0, rc2 = 0, rc3 = 0;
515 enum qeth_card_states recover_flag; 515 enum qeth_card_states recover_flag;
516 516
517 QETH_DBF_TEXT(setup, 3, "setoffl"); 517 QETH_DBF_TEXT(setup, 3, "setoffl");
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
523 CARD_BUS_ID(card)); 523 CARD_BUS_ID(card));
524 return -ERESTARTSYS; 524 return -ERESTARTSYS;
525 } 525 }
526 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || 526 rc = ccw_device_set_offline(CARD_DDEV(card));
527 (rc = ccw_device_set_offline(CARD_WDEV(card))) || 527 rc2 = ccw_device_set_offline(CARD_WDEV(card));
528 (rc = ccw_device_set_offline(CARD_RDEV(card)))) { 528 rc3 = ccw_device_set_offline(CARD_RDEV(card));
529 if (!rc)
530 rc = (rc2) ? rc2 : rc3;
531 if (rc)
529 QETH_DBF_TEXT_(setup, 2, "1err%d", rc); 532 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
530 }
531 if (recover_flag == CARD_STATE_UP) 533 if (recover_flag == CARD_STATE_UP)
532 card->state = CARD_STATE_RECOVER; 534 card->state = CARD_STATE_RECOVER;
533 qeth_notify_processes(); 535 qeth_notify_processes();
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card)
1046 spin_lock_init(&card->vlanlock); 1048 spin_lock_init(&card->vlanlock);
1047 card->vlangrp = NULL; 1049 card->vlangrp = NULL;
1048#endif 1050#endif
1051 spin_lock_init(&card->lock);
1049 spin_lock_init(&card->ip_lock); 1052 spin_lock_init(&card->ip_lock);
1050 spin_lock_init(&card->thread_mask_lock); 1053 spin_lock_init(&card->thread_mask_lock);
1051 card->thread_start_mask = 0; 1054 card->thread_start_mask = 0;
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data)
1626 spin_unlock_irqrestore(&reply->card->lock, flags); 1629 spin_unlock_irqrestore(&reply->card->lock, flags);
1627} 1630}
1628 1631
1629static void
1630qeth_reset_ip_addresses(struct qeth_card *card)
1631{
1632 QETH_DBF_TEXT(trace, 2, "rstipadd");
1633
1634 qeth_clear_ip_list(card, 0, 1);
1635 /* this function will also schedule the SET_IP_THREAD */
1636 qeth_set_multicast_list(card->dev);
1637}
1638
1639static struct qeth_ipa_cmd * 1632static struct qeth_ipa_cmd *
1640qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1633qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1641{ 1634{
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1664 "IP address reset.\n", 1657 "IP address reset.\n",
1665 QETH_CARD_IFNAME(card), 1658 QETH_CARD_IFNAME(card),
1666 card->info.chpid); 1659 card->info.chpid);
1667 card->lan_online = 1;
1668 netif_carrier_on(card->dev); 1660 netif_carrier_on(card->dev);
1669 qeth_reset_ip_addresses(card); 1661 qeth_schedule_recovery(card);
1670 return NULL; 1662 return NULL;
1671 case IPA_CMD_REGISTER_LOCAL_ADDR: 1663 case IPA_CMD_REGISTER_LOCAL_ADDR:
1672 QETH_DBF_TEXT(trace,3, "irla"); 1664 QETH_DBF_TEXT(trace,3, "irla");
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2387 skb_pull(skb, VLAN_HLEN); 2379 skb_pull(skb, VLAN_HLEN);
2388 } 2380 }
2389#endif 2381#endif
2382 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2390 return vlan_id; 2383 return vlan_id;
2391} 2384}
2392 2385
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card)
3014 return -ENOMEM; 3007 return -ENOMEM;
3015 } 3008 }
3016 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ 3009 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3017 ptr = (void *) __get_free_page(GFP_KERNEL); 3010 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3018 if (!ptr) { 3011 if (!ptr) {
3019 while (j > 0) 3012 while (j > 0)
3020 free_page((unsigned long) 3013 free_page((unsigned long)
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3058 if (card->qdio.state == QETH_QDIO_ALLOCATED) 3051 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3059 return 0; 3052 return 0;
3060 3053
3061 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); 3054 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3055 GFP_KERNEL|GFP_DMA);
3062 if (!card->qdio.in_q) 3056 if (!card->qdio.in_q)
3063 return - ENOMEM; 3057 return - ENOMEM;
3064 QETH_DBF_TEXT(setup, 2, "inq"); 3058 QETH_DBF_TEXT(setup, 2, "inq");
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3083 } 3077 }
3084 for (i = 0; i < card->qdio.no_out_queues; ++i){ 3078 for (i = 0; i < card->qdio.no_out_queues; ++i){
3085 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), 3079 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3086 GFP_KERNEL); 3080 GFP_KERNEL|GFP_DMA);
3087 if (!card->qdio.out_qs[i]){ 3081 if (!card->qdio.out_qs[i]){
3088 while (i > 0) 3082 while (i > 0)
3089 kfree(card->qdio.out_qs[--i]); 3083 kfree(card->qdio.out_qs[--i]);
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6470 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 6464 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6471 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 6465 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6472 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 6466 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6467 /* Disable IPV6 support hard coded for Hipersockets */
6468 if(card->info.type == QETH_CARD_TYPE_IQD)
6469 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
6473 } else { 6470 } else {
6474#ifdef CONFIG_QETH_IPV6 6471#ifdef CONFIG_QETH_IPV6
6475 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 6472 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index c2c8fa828e24..5ec866b00479 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -672,17 +672,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
672 672
673 for (i = 0; i < host_set->n_ports; i++) { 673 for (i = 0; i < host_set->n_ports; i++) {
674 struct ata_port *ap; 674 struct ata_port *ap;
675 u32 tmp;
676 675
677 VPRINTK("port %u\n", i); 676 if (!(irq_stat & (1 << i)))
677 continue;
678
678 ap = host_set->ports[i]; 679 ap = host_set->ports[i];
679 tmp = irq_stat & (1 << i); 680 if (ap) {
680 if (tmp && ap) {
681 struct ata_queued_cmd *qc; 681 struct ata_queued_cmd *qc;
682 qc = ata_qc_from_tag(ap, ap->active_tag); 682 qc = ata_qc_from_tag(ap, ap->active_tag);
683 if (ahci_host_intr(ap, qc)) 683 if (!ahci_host_intr(ap, qc))
684 irq_ack |= (1 << i); 684 if (ata_ratelimit()) {
685 struct pci_dev *pdev =
686 to_pci_dev(ap->host_set->dev);
687 printk(KERN_WARNING
688 "ahci(%s): unhandled interrupt on port %u\n",
689 pci_name(pdev), i);
690 }
691
692 VPRINTK("port %u\n", i);
693 } else {
694 VPRINTK("port %u (no irq)\n", i);
695 if (ata_ratelimit()) {
696 struct pci_dev *pdev =
697 to_pci_dev(ap->host_set->dev);
698 printk(KERN_WARNING
699 "ahci(%s): interrupt on disabled port %u\n",
700 pci_name(pdev), i);
701 }
685 } 702 }
703
704 irq_ack |= (1 << i);
686 } 705 }
687 706
688 if (irq_ack) { 707 if (irq_ack) {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index e5b01997117a..d568914c4344 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -48,6 +48,7 @@
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
51#include <scsi/scsi.h> 52#include <scsi/scsi.h>
52#include "scsi.h" 53#include "scsi.h"
53#include "scsi_priv.h" 54#include "scsi_priv.h"
@@ -62,6 +63,7 @@
62static unsigned int ata_busy_sleep (struct ata_port *ap, 63static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat, 64 unsigned long tmout_pat,
64 unsigned long tmout); 65 unsigned long tmout);
66static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65static void ata_set_mode(struct ata_port *ap); 67static void ata_set_mode(struct ata_port *ap);
66static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); 69static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
@@ -69,7 +71,6 @@ static int fgb(u32 bitmap);
69static int ata_choose_xfer_mode(struct ata_port *ap, 71static int ata_choose_xfer_mode(struct ata_port *ap,
70 u8 *xfer_mode_out, 72 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out); 73 unsigned int *xfer_shift_out);
72static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73static void __ata_qc_complete(struct ata_queued_cmd *qc); 74static void __ata_qc_complete(struct ata_queued_cmd *qc);
74 75
75static unsigned int ata_unique_id = 1; 76static unsigned int ata_unique_id = 1;
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
1131static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1132static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1132{ 1133{
1133 struct ata_device *dev = &ap->device[device]; 1134 struct ata_device *dev = &ap->device[device];
1134 unsigned int i; 1135 unsigned int major_version;
1135 u16 tmp; 1136 u16 tmp;
1136 unsigned long xfer_modes; 1137 unsigned long xfer_modes;
1137 u8 status; 1138 u8 status;
@@ -1229,9 +1230,9 @@ retry:
1229 * common ATA, ATAPI feature tests 1230 * common ATA, ATAPI feature tests
1230 */ 1231 */
1231 1232
1232 /* we require LBA and DMA support (bits 8 & 9 of word 49) */ 1233 /* we require DMA support (bits 8 of word 49) */
1233 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { 1234 if (!ata_id_has_dma(dev->id)) {
1234 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); 1235 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 goto err_out_nosup; 1236 goto err_out_nosup;
1236 } 1237 }
1237 1238
@@ -1251,32 +1252,69 @@ retry:
1251 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1252 if (!ata_id_is_ata(dev->id)) /* sanity check */
1252 goto err_out_nosup; 1253 goto err_out_nosup;
1253 1254
1255 /* get major version */
1254 tmp = dev->id[ATA_ID_MAJOR_VER]; 1256 tmp = dev->id[ATA_ID_MAJOR_VER];
1255 for (i = 14; i >= 1; i--) 1257 for (major_version = 14; major_version >= 1; major_version--)
1256 if (tmp & (1 << i)) 1258 if (tmp & (1 << major_version))
1257 break; 1259 break;
1258 1260
1259 /* we require at least ATA-3 */ 1261 /*
1260 if (i < 3) { 1262 * The exact sequence expected by certain pre-ATA4 drives is:
1261 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); 1263 * SRST RESET
1262 goto err_out_nosup; 1264 * IDENTIFY
1263 } 1265 * INITIALIZE DEVICE PARAMETERS
1266 * anything else..
1267 * Some drives were very specific about that exact sequence.
1268 */
1269 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1270 ata_dev_init_params(ap, dev);
1271
1272 if (ata_id_has_lba(dev->id)) {
1273 dev->flags |= ATA_DFLAG_LBA;
1274
1275 if (ata_id_has_lba48(dev->id)) {
1276 dev->flags |= ATA_DFLAG_LBA48;
1277 dev->n_sectors = ata_id_u64(dev->id, 100);
1278 } else {
1279 dev->n_sectors = ata_id_u32(dev->id, 60);
1280 }
1281
1282 /* print device info to dmesg */
1283 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1284 ap->id, device,
1285 major_version,
1286 ata_mode_string(xfer_modes),
1287 (unsigned long long)dev->n_sectors,
1288 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1289 } else {
1290 /* CHS */
1291
1292 /* Default translation */
1293 dev->cylinders = dev->id[1];
1294 dev->heads = dev->id[3];
1295 dev->sectors = dev->id[6];
1296 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1297
1298 if (ata_id_current_chs_valid(dev->id)) {
1299 /* Current CHS translation is valid. */
1300 dev->cylinders = dev->id[54];
1301 dev->heads = dev->id[55];
1302 dev->sectors = dev->id[56];
1303
1304 dev->n_sectors = ata_id_u32(dev->id, 57);
1305 }
1306
1307 /* print device info to dmesg */
1308 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1309 ap->id, device,
1310 major_version,
1311 ata_mode_string(xfer_modes),
1312 (unsigned long long)dev->n_sectors,
1313 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1264 1314
1265 if (ata_id_has_lba48(dev->id)) {
1266 dev->flags |= ATA_DFLAG_LBA48;
1267 dev->n_sectors = ata_id_u64(dev->id, 100);
1268 } else {
1269 dev->n_sectors = ata_id_u32(dev->id, 60);
1270 } 1315 }
1271 1316
1272 ap->host->max_cmd_len = 16; 1317 ap->host->max_cmd_len = 16;
1273
1274 /* print device info to dmesg */
1275 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1276 ap->id, device,
1277 ata_mode_string(xfer_modes),
1278 (unsigned long long)dev->n_sectors,
1279 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1280 } 1318 }
1281 1319
1282 /* ATAPI-specific feature tests */ 1320 /* ATAPI-specific feature tests */
@@ -2144,6 +2182,54 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2144} 2182}
2145 2183
2146/** 2184/**
2185 * ata_dev_init_params - Issue INIT DEV PARAMS command
2186 * @ap: Port associated with device @dev
2187 * @dev: Device to which command will be sent
2188 *
2189 * LOCKING:
2190 */
2191
2192static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2193{
2194 DECLARE_COMPLETION(wait);
2195 struct ata_queued_cmd *qc;
2196 int rc;
2197 unsigned long flags;
2198 u16 sectors = dev->id[6];
2199 u16 heads = dev->id[3];
2200
2201 /* Number of sectors per track 1-255. Number of heads 1-16 */
2202 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2203 return;
2204
2205 /* set up init dev params taskfile */
2206 DPRINTK("init dev params \n");
2207
2208 qc = ata_qc_new_init(ap, dev);
2209 BUG_ON(qc == NULL);
2210
2211 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2212 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2213 qc->tf.protocol = ATA_PROT_NODATA;
2214 qc->tf.nsect = sectors;
2215 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2216
2217 qc->waiting = &wait;
2218 qc->complete_fn = ata_qc_complete_noop;
2219
2220 spin_lock_irqsave(&ap->host_set->lock, flags);
2221 rc = ata_qc_issue(qc);
2222 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2223
2224 if (rc)
2225 ata_port_disable(ap);
2226 else
2227 wait_for_completion(&wait);
2228
2229 DPRINTK("EXIT\n");
2230}
2231
2232/**
2147 * ata_sg_clean - Unmap DMA memory associated with command 2233 * ata_sg_clean - Unmap DMA memory associated with command
2148 * @qc: Command containing DMA memory to be released 2234 * @qc: Command containing DMA memory to be released
2149 * 2235 *
@@ -2425,20 +2511,20 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2425static unsigned long ata_pio_poll(struct ata_port *ap) 2511static unsigned long ata_pio_poll(struct ata_port *ap)
2426{ 2512{
2427 u8 status; 2513 u8 status;
2428 unsigned int poll_state = PIO_ST_UNKNOWN; 2514 unsigned int poll_state = HSM_ST_UNKNOWN;
2429 unsigned int reg_state = PIO_ST_UNKNOWN; 2515 unsigned int reg_state = HSM_ST_UNKNOWN;
2430 const unsigned int tmout_state = PIO_ST_TMOUT; 2516 const unsigned int tmout_state = HSM_ST_TMOUT;
2431 2517
2432 switch (ap->pio_task_state) { 2518 switch (ap->hsm_task_state) {
2433 case PIO_ST: 2519 case HSM_ST:
2434 case PIO_ST_POLL: 2520 case HSM_ST_POLL:
2435 poll_state = PIO_ST_POLL; 2521 poll_state = HSM_ST_POLL;
2436 reg_state = PIO_ST; 2522 reg_state = HSM_ST;
2437 break; 2523 break;
2438 case PIO_ST_LAST: 2524 case HSM_ST_LAST:
2439 case PIO_ST_LAST_POLL: 2525 case HSM_ST_LAST_POLL:
2440 poll_state = PIO_ST_LAST_POLL; 2526 poll_state = HSM_ST_LAST_POLL;
2441 reg_state = PIO_ST_LAST; 2527 reg_state = HSM_ST_LAST;
2442 break; 2528 break;
2443 default: 2529 default:
2444 BUG(); 2530 BUG();
@@ -2448,14 +2534,14 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2448 status = ata_chk_status(ap); 2534 status = ata_chk_status(ap);
2449 if (status & ATA_BUSY) { 2535 if (status & ATA_BUSY) {
2450 if (time_after(jiffies, ap->pio_task_timeout)) { 2536 if (time_after(jiffies, ap->pio_task_timeout)) {
2451 ap->pio_task_state = tmout_state; 2537 ap->hsm_task_state = tmout_state;
2452 return 0; 2538 return 0;
2453 } 2539 }
2454 ap->pio_task_state = poll_state; 2540 ap->hsm_task_state = poll_state;
2455 return ATA_SHORT_PAUSE; 2541 return ATA_SHORT_PAUSE;
2456 } 2542 }
2457 2543
2458 ap->pio_task_state = reg_state; 2544 ap->hsm_task_state = reg_state;
2459 return 0; 2545 return 0;
2460} 2546}
2461 2547
@@ -2480,14 +2566,14 @@ static int ata_pio_complete (struct ata_port *ap)
2480 * we enter, BSY will be cleared in a chk-status or two. If not, 2566 * we enter, BSY will be cleared in a chk-status or two. If not,
2481 * the drive is probably seeking or something. Snooze for a couple 2567 * the drive is probably seeking or something. Snooze for a couple
2482 * msecs, then chk-status again. If still busy, fall back to 2568 * msecs, then chk-status again. If still busy, fall back to
2483 * PIO_ST_POLL state. 2569 * HSM_ST_POLL state.
2484 */ 2570 */
2485 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2571 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2572 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 msleep(2); 2573 msleep(2);
2488 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2574 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2575 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2490 ap->pio_task_state = PIO_ST_LAST_POLL; 2576 ap->hsm_task_state = HSM_ST_LAST_POLL;
2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2577 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2492 return 0; 2578 return 0;
2493 } 2579 }
@@ -2495,14 +2581,14 @@ static int ata_pio_complete (struct ata_port *ap)
2495 2581
2496 drv_stat = ata_wait_idle(ap); 2582 drv_stat = ata_wait_idle(ap);
2497 if (!ata_ok(drv_stat)) { 2583 if (!ata_ok(drv_stat)) {
2498 ap->pio_task_state = PIO_ST_ERR; 2584 ap->hsm_task_state = HSM_ST_ERR;
2499 return 0; 2585 return 0;
2500 } 2586 }
2501 2587
2502 qc = ata_qc_from_tag(ap, ap->active_tag); 2588 qc = ata_qc_from_tag(ap, ap->active_tag);
2503 assert(qc != NULL); 2589 assert(qc != NULL);
2504 2590
2505 ap->pio_task_state = PIO_ST_IDLE; 2591 ap->hsm_task_state = HSM_ST_IDLE;
2506 2592
2507 ata_poll_qc_complete(qc, drv_stat); 2593 ata_poll_qc_complete(qc, drv_stat);
2508 2594
@@ -2662,7 +2748,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2662 unsigned char *buf; 2748 unsigned char *buf;
2663 2749
2664 if (qc->cursect == (qc->nsect - 1)) 2750 if (qc->cursect == (qc->nsect - 1))
2665 ap->pio_task_state = PIO_ST_LAST; 2751 ap->hsm_task_state = HSM_ST_LAST;
2666 2752
2667 page = sg[qc->cursg].page; 2753 page = sg[qc->cursg].page;
2668 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2754 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2712,7 +2798,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2712 unsigned int offset, count; 2798 unsigned int offset, count;
2713 2799
2714 if (qc->curbytes + bytes >= qc->nbytes) 2800 if (qc->curbytes + bytes >= qc->nbytes)
2715 ap->pio_task_state = PIO_ST_LAST; 2801 ap->hsm_task_state = HSM_ST_LAST;
2716 2802
2717next_sg: 2803next_sg:
2718 if (unlikely(qc->cursg >= qc->n_elem)) { 2804 if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2734,7 +2820,7 @@ next_sg:
2734 for (i = 0; i < words; i++) 2820 for (i = 0; i < words; i++)
2735 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 2821 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2736 2822
2737 ap->pio_task_state = PIO_ST_LAST; 2823 ap->hsm_task_state = HSM_ST_LAST;
2738 return; 2824 return;
2739 } 2825 }
2740 2826
@@ -2815,7 +2901,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2815err_out: 2901err_out:
2816 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 2902 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2817 ap->id, dev->devno); 2903 ap->id, dev->devno);
2818 ap->pio_task_state = PIO_ST_ERR; 2904 ap->hsm_task_state = HSM_ST_ERR;
2819} 2905}
2820 2906
2821/** 2907/**
@@ -2837,14 +2923,14 @@ static void ata_pio_block(struct ata_port *ap)
2837 * a chk-status or two. If not, the drive is probably seeking 2923 * a chk-status or two. If not, the drive is probably seeking
2838 * or something. Snooze for a couple msecs, then 2924 * or something. Snooze for a couple msecs, then
2839 * chk-status again. If still busy, fall back to 2925 * chk-status again. If still busy, fall back to
2840 * PIO_ST_POLL state. 2926 * HSM_ST_POLL state.
2841 */ 2927 */
2842 status = ata_busy_wait(ap, ATA_BUSY, 5); 2928 status = ata_busy_wait(ap, ATA_BUSY, 5);
2843 if (status & ATA_BUSY) { 2929 if (status & ATA_BUSY) {
2844 msleep(2); 2930 msleep(2);
2845 status = ata_busy_wait(ap, ATA_BUSY, 10); 2931 status = ata_busy_wait(ap, ATA_BUSY, 10);
2846 if (status & ATA_BUSY) { 2932 if (status & ATA_BUSY) {
2847 ap->pio_task_state = PIO_ST_POLL; 2933 ap->hsm_task_state = HSM_ST_POLL;
2848 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2934 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2849 return; 2935 return;
2850 } 2936 }
@@ -2856,7 +2942,7 @@ static void ata_pio_block(struct ata_port *ap)
2856 if (is_atapi_taskfile(&qc->tf)) { 2942 if (is_atapi_taskfile(&qc->tf)) {
2857 /* no more data to transfer or unsupported ATAPI command */ 2943 /* no more data to transfer or unsupported ATAPI command */
2858 if ((status & ATA_DRQ) == 0) { 2944 if ((status & ATA_DRQ) == 0) {
2859 ap->pio_task_state = PIO_ST_LAST; 2945 ap->hsm_task_state = HSM_ST_LAST;
2860 return; 2946 return;
2861 } 2947 }
2862 2948
@@ -2864,7 +2950,7 @@ static void ata_pio_block(struct ata_port *ap)
2864 } else { 2950 } else {
2865 /* handle BSY=0, DRQ=0 as error */ 2951 /* handle BSY=0, DRQ=0 as error */
2866 if ((status & ATA_DRQ) == 0) { 2952 if ((status & ATA_DRQ) == 0) {
2867 ap->pio_task_state = PIO_ST_ERR; 2953 ap->hsm_task_state = HSM_ST_ERR;
2868 return; 2954 return;
2869 } 2955 }
2870 2956
@@ -2884,7 +2970,7 @@ static void ata_pio_error(struct ata_port *ap)
2884 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", 2970 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2885 ap->id, drv_stat); 2971 ap->id, drv_stat);
2886 2972
2887 ap->pio_task_state = PIO_ST_IDLE; 2973 ap->hsm_task_state = HSM_ST_IDLE;
2888 2974
2889 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 2975 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2890} 2976}
@@ -2899,25 +2985,25 @@ fsm_start:
2899 timeout = 0; 2985 timeout = 0;
2900 qc_completed = 0; 2986 qc_completed = 0;
2901 2987
2902 switch (ap->pio_task_state) { 2988 switch (ap->hsm_task_state) {
2903 case PIO_ST_IDLE: 2989 case HSM_ST_IDLE:
2904 return; 2990 return;
2905 2991
2906 case PIO_ST: 2992 case HSM_ST:
2907 ata_pio_block(ap); 2993 ata_pio_block(ap);
2908 break; 2994 break;
2909 2995
2910 case PIO_ST_LAST: 2996 case HSM_ST_LAST:
2911 qc_completed = ata_pio_complete(ap); 2997 qc_completed = ata_pio_complete(ap);
2912 break; 2998 break;
2913 2999
2914 case PIO_ST_POLL: 3000 case HSM_ST_POLL:
2915 case PIO_ST_LAST_POLL: 3001 case HSM_ST_LAST_POLL:
2916 timeout = ata_pio_poll(ap); 3002 timeout = ata_pio_poll(ap);
2917 break; 3003 break;
2918 3004
2919 case PIO_ST_TMOUT: 3005 case HSM_ST_TMOUT:
2920 case PIO_ST_ERR: 3006 case HSM_ST_ERR:
2921 ata_pio_error(ap); 3007 ata_pio_error(ap);
2922 return; 3008 return;
2923 } 3009 }
@@ -2928,52 +3014,6 @@ fsm_start:
2928 goto fsm_start; 3014 goto fsm_start;
2929} 3015}
2930 3016
2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2932 struct scsi_cmnd *cmd)
2933{
2934 DECLARE_COMPLETION(wait);
2935 struct ata_queued_cmd *qc;
2936 unsigned long flags;
2937 int rc;
2938
2939 DPRINTK("ATAPI request sense\n");
2940
2941 qc = ata_qc_new_init(ap, dev);
2942 BUG_ON(qc == NULL);
2943
2944 /* FIXME: is this needed? */
2945 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2946
2947 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2948 qc->dma_dir = DMA_FROM_DEVICE;
2949
2950 memset(&qc->cdb, 0, ap->cdb_len);
2951 qc->cdb[0] = REQUEST_SENSE;
2952 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2953
2954 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2955 qc->tf.command = ATA_CMD_PACKET;
2956
2957 qc->tf.protocol = ATA_PROT_ATAPI;
2958 qc->tf.lbam = (8 * 1024) & 0xff;
2959 qc->tf.lbah = (8 * 1024) >> 8;
2960 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2961
2962 qc->waiting = &wait;
2963 qc->complete_fn = ata_qc_complete_noop;
2964
2965 spin_lock_irqsave(&ap->host_set->lock, flags);
2966 rc = ata_qc_issue(qc);
2967 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2968
2969 if (rc)
2970 ata_port_disable(ap);
2971 else
2972 wait_for_completion(&wait);
2973
2974 DPRINTK("EXIT\n");
2975}
2976
2977/** 3017/**
2978 * ata_qc_timeout - Handle timeout of queued command 3018 * ata_qc_timeout - Handle timeout of queued command
2979 * @qc: Command that timed out 3019 * @qc: Command that timed out
@@ -3091,14 +3131,14 @@ void ata_eng_timeout(struct ata_port *ap)
3091 DPRINTK("ENTER\n"); 3131 DPRINTK("ENTER\n");
3092 3132
3093 qc = ata_qc_from_tag(ap, ap->active_tag); 3133 qc = ata_qc_from_tag(ap, ap->active_tag);
3094 if (!qc) { 3134 if (qc)
3135 ata_qc_timeout(qc);
3136 else {
3095 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3137 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3096 ap->id); 3138 ap->id);
3097 goto out; 3139 goto out;
3098 } 3140 }
3099 3141
3100 ata_qc_timeout(qc);
3101
3102out: 3142out:
3103 DPRINTK("EXIT\n"); 3143 DPRINTK("EXIT\n");
3104} 3144}
@@ -3156,14 +3196,18 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3156 3196
3157 ata_tf_init(ap, &qc->tf, dev->devno); 3197 ata_tf_init(ap, &qc->tf, dev->devno);
3158 3198
3159 if (dev->flags & ATA_DFLAG_LBA48) 3199 if (dev->flags & ATA_DFLAG_LBA) {
3160 qc->tf.flags |= ATA_TFLAG_LBA48; 3200 qc->tf.flags |= ATA_TFLAG_LBA;
3201
3202 if (dev->flags & ATA_DFLAG_LBA48)
3203 qc->tf.flags |= ATA_TFLAG_LBA48;
3204 }
3161 } 3205 }
3162 3206
3163 return qc; 3207 return qc;
3164} 3208}
3165 3209
3166static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3210int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3167{ 3211{
3168 return 0; 3212 return 0;
3169} 3213}
@@ -3360,7 +3404,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3360 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3404 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3361 ata_qc_set_polling(qc); 3405 ata_qc_set_polling(qc);
3362 ata_tf_to_host_nolock(ap, &qc->tf); 3406 ata_tf_to_host_nolock(ap, &qc->tf);
3363 ap->pio_task_state = PIO_ST; 3407 ap->hsm_task_state = HSM_ST;
3364 queue_work(ata_wq, &ap->pio_task); 3408 queue_work(ata_wq, &ap->pio_task);
3365 break; 3409 break;
3366 3410
@@ -3586,7 +3630,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3586 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3630 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3587 host_stat = readb(mmio + ATA_DMA_STATUS); 3631 host_stat = readb(mmio + ATA_DMA_STATUS);
3588 } else 3632 } else
3589 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3633 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3590 return host_stat; 3634 return host_stat;
3591} 3635}
3592 3636
@@ -3806,7 +3850,7 @@ static void atapi_packet_task(void *_data)
3806 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 3850 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3807 3851
3808 /* PIO commands are handled by polling */ 3852 /* PIO commands are handled by polling */
3809 ap->pio_task_state = PIO_ST; 3853 ap->hsm_task_state = HSM_ST;
3810 queue_work(ata_wq, &ap->pio_task); 3854 queue_work(ata_wq, &ap->pio_task);
3811 } 3855 }
3812 3856
@@ -4113,7 +4157,7 @@ int ata_device_add(struct ata_probe_ent *ent)
4113 for (i = 0; i < count; i++) { 4157 for (i = 0; i < count; i++) {
4114 struct ata_port *ap = host_set->ports[i]; 4158 struct ata_port *ap = host_set->ports[i];
4115 4159
4116 scsi_scan_host(ap->host); 4160 ata_scsi_scan_host(ap);
4117 } 4161 }
4118 4162
4119 dev_set_drvdata(dev, host_set); 4163 dev_set_drvdata(dev, host_set);
@@ -4273,85 +4317,87 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4273 * ata_pci_init_native_mode - Initialize native-mode driver 4317 * ata_pci_init_native_mode - Initialize native-mode driver
4274 * @pdev: pci device to be initialized 4318 * @pdev: pci device to be initialized
4275 * @port: array[2] of pointers to port info structures. 4319 * @port: array[2] of pointers to port info structures.
4320 * @ports: bitmap of ports present
4276 * 4321 *
4277 * Utility function which allocates and initializes an 4322 * Utility function which allocates and initializes an
4278 * ata_probe_ent structure for a standard dual-port 4323 * ata_probe_ent structure for a standard dual-port
4279 * PIO-based IDE controller. The returned ata_probe_ent 4324 * PIO-based IDE controller. The returned ata_probe_ent
4280 * structure can be passed to ata_device_add(). The returned 4325 * structure can be passed to ata_device_add(). The returned
4281 * ata_probe_ent structure should then be freed with kfree(). 4326 * ata_probe_ent structure should then be freed with kfree().
4327 *
4328 * The caller need only pass the address of the primary port, the
4329 * secondary will be deduced automatically. If the device has non
4330 * standard secondary port mappings this function can be called twice,
4331 * once for each interface.
4282 */ 4332 */
4283 4333
4284struct ata_probe_ent * 4334struct ata_probe_ent *
4285ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4335ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4286{ 4336{
4287 struct ata_probe_ent *probe_ent = 4337 struct ata_probe_ent *probe_ent =
4288 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4338 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4339 int p = 0;
4340
4289 if (!probe_ent) 4341 if (!probe_ent)
4290 return NULL; 4342 return NULL;
4291 4343
4292 probe_ent->n_ports = 2;
4293 probe_ent->irq = pdev->irq; 4344 probe_ent->irq = pdev->irq;
4294 probe_ent->irq_flags = SA_SHIRQ; 4345 probe_ent->irq_flags = SA_SHIRQ;
4295 4346
4296 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); 4347 if (ports & ATA_PORT_PRIMARY) {
4297 probe_ent->port[0].altstatus_addr = 4348 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4298 probe_ent->port[0].ctl_addr = 4349 probe_ent->port[p].altstatus_addr =
4299 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4350 probe_ent->port[p].ctl_addr =
4300 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4351 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4301 4352 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4302 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); 4353 ata_std_ports(&probe_ent->port[p]);
4303 probe_ent->port[1].altstatus_addr = 4354 p++;
4304 probe_ent->port[1].ctl_addr = 4355 }
4305 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4306 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4307 4356
4308 ata_std_ports(&probe_ent->port[0]); 4357 if (ports & ATA_PORT_SECONDARY) {
4309 ata_std_ports(&probe_ent->port[1]); 4358 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4359 probe_ent->port[p].altstatus_addr =
4360 probe_ent->port[p].ctl_addr =
4361 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4362 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4363 ata_std_ports(&probe_ent->port[p]);
4364 p++;
4365 }
4310 4366
4367 probe_ent->n_ports = p;
4311 return probe_ent; 4368 return probe_ent;
4312} 4369}
4313 4370
4314static struct ata_probe_ent * 4371static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
4315ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4316 struct ata_probe_ent **ppe2)
4317{ 4372{
4318 struct ata_probe_ent *probe_ent, *probe_ent2; 4373 struct ata_probe_ent *probe_ent;
4319 4374
4320 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4375 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4321 if (!probe_ent) 4376 if (!probe_ent)
4322 return NULL; 4377 return NULL;
4323 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4324 if (!probe_ent2) {
4325 kfree(probe_ent);
4326 return NULL;
4327 }
4328 4378
4329 probe_ent->n_ports = 1; 4379
4330 probe_ent->irq = 14;
4331
4332 probe_ent->hard_port_no = 0;
4333 probe_ent->legacy_mode = 1; 4380 probe_ent->legacy_mode = 1;
4334 4381 probe_ent->n_ports = 1;
4335 probe_ent2->n_ports = 1; 4382 probe_ent->hard_port_no = port_num;
4336 probe_ent2->irq = 15; 4383
4337 4384 switch(port_num)
4338 probe_ent2->hard_port_no = 1; 4385 {
4339 probe_ent2->legacy_mode = 1; 4386 case 0:
4340 4387 probe_ent->irq = 14;
4341 probe_ent->port[0].cmd_addr = 0x1f0; 4388 probe_ent->port[0].cmd_addr = 0x1f0;
4342 probe_ent->port[0].altstatus_addr = 4389 probe_ent->port[0].altstatus_addr =
4343 probe_ent->port[0].ctl_addr = 0x3f6; 4390 probe_ent->port[0].ctl_addr = 0x3f6;
4344 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4391 break;
4345 4392 case 1:
4346 probe_ent2->port[0].cmd_addr = 0x170; 4393 probe_ent->irq = 15;
4347 probe_ent2->port[0].altstatus_addr = 4394 probe_ent->port[0].cmd_addr = 0x170;
4348 probe_ent2->port[0].ctl_addr = 0x376; 4395 probe_ent->port[0].altstatus_addr =
4349 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; 4396 probe_ent->port[0].ctl_addr = 0x376;
4350 4397 break;
4398 }
4399 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4351 ata_std_ports(&probe_ent->port[0]); 4400 ata_std_ports(&probe_ent->port[0]);
4352 ata_std_ports(&probe_ent2->port[0]);
4353
4354 *ppe2 = probe_ent2;
4355 return probe_ent; 4401 return probe_ent;
4356} 4402}
4357 4403
@@ -4380,7 +4426,7 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4380int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4426int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4381 unsigned int n_ports) 4427 unsigned int n_ports)
4382{ 4428{
4383 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; 4429 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4384 struct ata_port_info *port[2]; 4430 struct ata_port_info *port[2];
4385 u8 tmp8, mask; 4431 u8 tmp8, mask;
4386 unsigned int legacy_mode = 0; 4432 unsigned int legacy_mode = 0;
@@ -4397,7 +4443,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4397 4443
4398 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4444 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4399 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4445 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4400 /* TODO: support transitioning to native mode? */ 4446 /* TODO: What if one channel is in native mode ... */
4401 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4447 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4402 mask = (1 << 2) | (1 << 0); 4448 mask = (1 << 2) | (1 << 0);
4403 if ((tmp8 & mask) != mask) 4449 if ((tmp8 & mask) != mask)
@@ -4405,11 +4451,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4405 } 4451 }
4406 4452
4407 /* FIXME... */ 4453 /* FIXME... */
4408 if ((!legacy_mode) && (n_ports > 1)) { 4454 if ((!legacy_mode) && (n_ports > 2)) {
4409 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); 4455 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4410 return -EINVAL; 4456 n_ports = 2;
4457 /* For now */
4411 } 4458 }
4412 4459
4460 /* FIXME: Really for ATA it isn't safe because the device may be
4461 multi-purpose and we want to leave it alone if it was already
4462 enabled. Secondly for shared use as Arjan says we want refcounting
4463
4464 Checking dev->is_enabled is insufficient as this is not set at
4465 boot for the primary video which is BIOS enabled
4466 */
4467
4413 rc = pci_enable_device(pdev); 4468 rc = pci_enable_device(pdev);
4414 if (rc) 4469 if (rc)
4415 return rc; 4470 return rc;
@@ -4420,6 +4475,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4420 goto err_out; 4475 goto err_out;
4421 } 4476 }
4422 4477
4478 /* FIXME: Should use platform specific mappers for legacy port ranges */
4423 if (legacy_mode) { 4479 if (legacy_mode) {
4424 if (!request_region(0x1f0, 8, "libata")) { 4480 if (!request_region(0x1f0, 8, "libata")) {
4425 struct resource *conflict, res; 4481 struct resource *conflict, res;
@@ -4464,10 +4520,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4464 goto err_out_regions; 4520 goto err_out_regions;
4465 4521
4466 if (legacy_mode) { 4522 if (legacy_mode) {
4467 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); 4523 if (legacy_mode & (1 << 0))
4468 } else 4524 probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
4469 probe_ent = ata_pci_init_native_mode(pdev, port); 4525 if (legacy_mode & (1 << 1))
4470 if (!probe_ent) { 4526 probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
4527 } else {
4528 if (n_ports == 2)
4529 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4530 else
4531 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4532 }
4533 if (!probe_ent && !probe_ent2) {
4471 rc = -ENOMEM; 4534 rc = -ENOMEM;
4472 goto err_out_regions; 4535 goto err_out_regions;
4473 } 4536 }
@@ -4579,6 +4642,27 @@ static void __exit ata_exit(void)
4579module_init(ata_init); 4642module_init(ata_init);
4580module_exit(ata_exit); 4643module_exit(ata_exit);
4581 4644
4645static unsigned long ratelimit_time;
4646static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4647
4648int ata_ratelimit(void)
4649{
4650 int rc;
4651 unsigned long flags;
4652
4653 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4654
4655 if (time_after(jiffies, ratelimit_time)) {
4656 rc = 1;
4657 ratelimit_time = jiffies + (HZ/5);
4658 } else
4659 rc = 0;
4660
4661 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4662
4663 return rc;
4664}
4665
4582/* 4666/*
4583 * libata is essentially a library of internal helper functions for 4667 * libata is essentially a library of internal helper functions for
4584 * low-level ATA host controller drivers. As such, the API/ABI is 4668 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4620,6 +4704,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
4620EXPORT_SYMBOL_GPL(__sata_phy_reset); 4704EXPORT_SYMBOL_GPL(__sata_phy_reset);
4621EXPORT_SYMBOL_GPL(ata_bus_reset); 4705EXPORT_SYMBOL_GPL(ata_bus_reset);
4622EXPORT_SYMBOL_GPL(ata_port_disable); 4706EXPORT_SYMBOL_GPL(ata_port_disable);
4707EXPORT_SYMBOL_GPL(ata_ratelimit);
4623EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4708EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4624EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4709EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4625EXPORT_SYMBOL_GPL(ata_scsi_error); 4710EXPORT_SYMBOL_GPL(ata_scsi_error);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 104fd9a63e73..c64169ca7ff0 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -49,6 +49,14 @@ static struct ata_device *
49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); 49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
50 50
51 51
52static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
53 void (*done)(struct scsi_cmnd *))
54{
55 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
56 /* "Invalid field in cbd" */
57 done(cmd);
58}
59
52/** 60/**
53 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 61 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
54 * @sdev: SCSI device for which BIOS geometry is to be determined 62 * @sdev: SCSI device for which BIOS geometry is to be determined
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
182{ 190{
183 struct scsi_cmnd *cmd = qc->scsicmd; 191 struct scsi_cmnd *cmd = qc->scsicmd;
184 u8 err = 0; 192 u8 err = 0;
185 unsigned char *sb = cmd->sense_buffer;
186 /* Based on the 3ware driver translation table */ 193 /* Based on the 3ware driver translation table */
187 static unsigned char sense_table[][4] = { 194 static unsigned char sense_table[][4] = {
188 /* BBD|ECC|ID|MAR */ 195 /* BBD|ECC|ID|MAR */
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
225 }; 232 };
226 int i = 0; 233 int i = 0;
227 234
228 cmd->result = SAM_STAT_CHECK_CONDITION;
229
230 /* 235 /*
231 * Is this an error we can process/parse 236 * Is this an error we can process/parse
232 */ 237 */
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
281 /* Look for best matches first */ 286 /* Look for best matches first */
282 if((sense_table[i][0] & err) == sense_table[i][0]) 287 if((sense_table[i][0] & err) == sense_table[i][0])
283 { 288 {
284 sb[0] = 0x70; 289 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
285 sb[2] = sense_table[i][1]; 290 sense_table[i][2] /* asc */,
286 sb[7] = 0x0a; 291 sense_table[i][3] /* ascq */ );
287 sb[12] = sense_table[i][2];
288 sb[13] = sense_table[i][3];
289 return; 292 return;
290 } 293 }
291 i++; 294 i++;
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
300 { 303 {
301 if(stat_table[i][0] & drv_stat) 304 if(stat_table[i][0] & drv_stat)
302 { 305 {
303 sb[0] = 0x70; 306 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
304 sb[2] = stat_table[i][1]; 307 sense_table[i][2] /* asc */,
305 sb[7] = 0x0a; 308 sense_table[i][3] /* ascq */ );
306 sb[12] = stat_table[i][2];
307 sb[13] = stat_table[i][3];
308 return; 309 return;
309 } 310 }
310 i++; 311 i++;
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
313 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); 314 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
314 /* additional-sense-code[-qualifier] */ 315 /* additional-sense-code[-qualifier] */
315 316
316 sb[0] = 0x70;
317 sb[2] = MEDIUM_ERROR;
318 sb[7] = 0x0A;
319 if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 317 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
320 sb[12] = 0x11; /* "unrecovered read error" */ 318 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4);
321 sb[13] = 0x04; 319 /* "unrecovered read error" */
322 } else { 320 } else {
323 sb[12] = 0x0C; /* "write error - */ 321 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2);
324 sb[13] = 0x02; /* auto-reallocation failed" */ 322 /* "write error - auto-reallocation failed" */
325 } 323 }
326} 324}
327 325
@@ -430,15 +428,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
430 ; /* ignore IMMED bit, violates sat-r05 */ 428 ; /* ignore IMMED bit, violates sat-r05 */
431 } 429 }
432 if (scsicmd[4] & 0x2) 430 if (scsicmd[4] & 0x2)
433 return 1; /* LOEJ bit set not supported */ 431 goto invalid_fld; /* LOEJ bit set not supported */
434 if (((scsicmd[4] >> 4) & 0xf) != 0) 432 if (((scsicmd[4] >> 4) & 0xf) != 0)
435 return 1; /* power conditions not supported */ 433 goto invalid_fld; /* power conditions not supported */
436 if (scsicmd[4] & 0x1) { 434 if (scsicmd[4] & 0x1) {
437 tf->nsect = 1; /* 1 sector, lba=0 */ 435 tf->nsect = 1; /* 1 sector, lba=0 */
438 tf->lbah = 0x0; 436
439 tf->lbam = 0x0; 437 if (qc->dev->flags & ATA_DFLAG_LBA) {
440 tf->lbal = 0x0; 438 qc->tf.flags |= ATA_TFLAG_LBA;
441 tf->device |= ATA_LBA; 439
440 tf->lbah = 0x0;
441 tf->lbam = 0x0;
442 tf->lbal = 0x0;
443 tf->device |= ATA_LBA;
444 } else {
445 /* CHS */
446 tf->lbal = 0x1; /* sect */
447 tf->lbam = 0x0; /* cyl low */
448 tf->lbah = 0x0; /* cyl high */
449 }
450
442 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 451 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
443 } else { 452 } else {
444 tf->nsect = 0; /* time period value (0 implies now) */ 453 tf->nsect = 0; /* time period value (0 implies now) */
@@ -453,6 +462,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
453 */ 462 */
454 463
455 return 0; 464 return 0;
465
466invalid_fld:
467 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
468 /* "Invalid field in cbd" */
469 return 1;
456} 470}
457 471
458 472
@@ -488,6 +502,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
488} 502}
489 503
490/** 504/**
505 * scsi_6_lba_len - Get LBA and transfer length
506 * @scsicmd: SCSI command to translate
507 *
508 * Calculate LBA and transfer length for 6-byte commands.
509 *
510 * RETURNS:
511 * @plba: the LBA
512 * @plen: the transfer length
513 */
514
515static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
516{
517 u64 lba = 0;
518 u32 len = 0;
519
520 VPRINTK("six-byte command\n");
521
522 lba |= ((u64)scsicmd[2]) << 8;
523 lba |= ((u64)scsicmd[3]);
524
525 len |= ((u32)scsicmd[4]);
526
527 *plba = lba;
528 *plen = len;
529}
530
531/**
532 * scsi_10_lba_len - Get LBA and transfer length
533 * @scsicmd: SCSI command to translate
534 *
535 * Calculate LBA and transfer length for 10-byte commands.
536 *
537 * RETURNS:
538 * @plba: the LBA
539 * @plen: the transfer length
540 */
541
542static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
543{
544 u64 lba = 0;
545 u32 len = 0;
546
547 VPRINTK("ten-byte command\n");
548
549 lba |= ((u64)scsicmd[2]) << 24;
550 lba |= ((u64)scsicmd[3]) << 16;
551 lba |= ((u64)scsicmd[4]) << 8;
552 lba |= ((u64)scsicmd[5]);
553
554 len |= ((u32)scsicmd[7]) << 8;
555 len |= ((u32)scsicmd[8]);
556
557 *plba = lba;
558 *plen = len;
559}
560
561/**
562 * scsi_16_lba_len - Get LBA and transfer length
563 * @scsicmd: SCSI command to translate
564 *
565 * Calculate LBA and transfer length for 16-byte commands.
566 *
567 * RETURNS:
568 * @plba: the LBA
569 * @plen: the transfer length
570 */
571
572static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
573{
574 u64 lba = 0;
575 u32 len = 0;
576
577 VPRINTK("sixteen-byte command\n");
578
579 lba |= ((u64)scsicmd[2]) << 56;
580 lba |= ((u64)scsicmd[3]) << 48;
581 lba |= ((u64)scsicmd[4]) << 40;
582 lba |= ((u64)scsicmd[5]) << 32;
583 lba |= ((u64)scsicmd[6]) << 24;
584 lba |= ((u64)scsicmd[7]) << 16;
585 lba |= ((u64)scsicmd[8]) << 8;
586 lba |= ((u64)scsicmd[9]);
587
588 len |= ((u32)scsicmd[10]) << 24;
589 len |= ((u32)scsicmd[11]) << 16;
590 len |= ((u32)scsicmd[12]) << 8;
591 len |= ((u32)scsicmd[13]);
592
593 *plba = lba;
594 *plen = len;
595}
596
597/**
491 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 598 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
492 * @qc: Storage for translated ATA taskfile 599 * @qc: Storage for translated ATA taskfile
493 * @scsicmd: SCSI command to translate 600 * @scsicmd: SCSI command to translate
@@ -504,79 +611,102 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
504static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 611static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
505{ 612{
506 struct ata_taskfile *tf = &qc->tf; 613 struct ata_taskfile *tf = &qc->tf;
614 struct ata_device *dev = qc->dev;
615 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
507 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 616 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
508 u64 dev_sectors = qc->dev->n_sectors; 617 u64 dev_sectors = qc->dev->n_sectors;
509 u64 sect = 0; 618 u64 block;
510 u32 n_sect = 0; 619 u32 n_block;
511 620
512 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 621 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
513 tf->protocol = ATA_PROT_NODATA; 622 tf->protocol = ATA_PROT_NODATA;
514 tf->device |= ATA_LBA;
515
516 if (scsicmd[0] == VERIFY) {
517 sect |= ((u64)scsicmd[2]) << 24;
518 sect |= ((u64)scsicmd[3]) << 16;
519 sect |= ((u64)scsicmd[4]) << 8;
520 sect |= ((u64)scsicmd[5]);
521
522 n_sect |= ((u32)scsicmd[7]) << 8;
523 n_sect |= ((u32)scsicmd[8]);
524 }
525
526 else if (scsicmd[0] == VERIFY_16) {
527 sect |= ((u64)scsicmd[2]) << 56;
528 sect |= ((u64)scsicmd[3]) << 48;
529 sect |= ((u64)scsicmd[4]) << 40;
530 sect |= ((u64)scsicmd[5]) << 32;
531 sect |= ((u64)scsicmd[6]) << 24;
532 sect |= ((u64)scsicmd[7]) << 16;
533 sect |= ((u64)scsicmd[8]) << 8;
534 sect |= ((u64)scsicmd[9]);
535
536 n_sect |= ((u32)scsicmd[10]) << 24;
537 n_sect |= ((u32)scsicmd[11]) << 16;
538 n_sect |= ((u32)scsicmd[12]) << 8;
539 n_sect |= ((u32)scsicmd[13]);
540 }
541 623
624 if (scsicmd[0] == VERIFY)
625 scsi_10_lba_len(scsicmd, &block, &n_block);
626 else if (scsicmd[0] == VERIFY_16)
627 scsi_16_lba_len(scsicmd, &block, &n_block);
542 else 628 else
543 return 1; 629 goto invalid_fld;
544 630
545 if (!n_sect) 631 if (!n_block)
546 return 1; 632 goto nothing_to_do;
547 if (sect >= dev_sectors) 633 if (block >= dev_sectors)
548 return 1; 634 goto out_of_range;
549 if ((sect + n_sect) > dev_sectors) 635 if ((block + n_block) > dev_sectors)
550 return 1; 636 goto out_of_range;
551 if (lba48) { 637 if (lba48) {
552 if (n_sect > (64 * 1024)) 638 if (n_block > (64 * 1024))
553 return 1; 639 goto invalid_fld;
554 } else { 640 } else {
555 if (n_sect > 256) 641 if (n_block > 256)
556 return 1; 642 goto invalid_fld;
557 } 643 }
558 644
559 if (lba48) { 645 if (lba) {
560 tf->command = ATA_CMD_VERIFY_EXT; 646 if (lba48) {
647 tf->command = ATA_CMD_VERIFY_EXT;
648
649 tf->hob_nsect = (n_block >> 8) & 0xff;
650
651 tf->hob_lbah = (block >> 40) & 0xff;
652 tf->hob_lbam = (block >> 32) & 0xff;
653 tf->hob_lbal = (block >> 24) & 0xff;
654 } else {
655 tf->command = ATA_CMD_VERIFY;
656
657 tf->device |= (block >> 24) & 0xf;
658 }
561 659
562 tf->hob_nsect = (n_sect >> 8) & 0xff; 660 tf->nsect = n_block & 0xff;
563 661
564 tf->hob_lbah = (sect >> 40) & 0xff; 662 tf->lbah = (block >> 16) & 0xff;
565 tf->hob_lbam = (sect >> 32) & 0xff; 663 tf->lbam = (block >> 8) & 0xff;
566 tf->hob_lbal = (sect >> 24) & 0xff; 664 tf->lbal = block & 0xff;
665
666 tf->device |= ATA_LBA;
567 } else { 667 } else {
668 /* CHS */
669 u32 sect, head, cyl, track;
670
671 /* Convert LBA to CHS */
672 track = (u32)block / dev->sectors;
673 cyl = track / dev->heads;
674 head = track % dev->heads;
675 sect = (u32)block % dev->sectors + 1;
676
677 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
678 (u32)block, track, cyl, head, sect);
679
680 /* Check whether the converted CHS can fit.
681 Cylinder: 0-65535
682 Head: 0-15
683 Sector: 1-255*/
684 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
685 goto out_of_range;
686
568 tf->command = ATA_CMD_VERIFY; 687 tf->command = ATA_CMD_VERIFY;
569 688 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
570 tf->device |= (sect >> 24) & 0xf; 689 tf->lbal = sect;
690 tf->lbam = cyl;
691 tf->lbah = cyl >> 8;
692 tf->device |= head;
571 } 693 }
572 694
573 tf->nsect = n_sect & 0xff; 695 return 0;
574 696
575 tf->lbah = (sect >> 16) & 0xff; 697invalid_fld:
576 tf->lbam = (sect >> 8) & 0xff; 698 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
577 tf->lbal = sect & 0xff; 699 /* "Invalid field in cbd" */
700 return 1;
578 701
579 return 0; 702out_of_range:
703 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
704 /* "Logical Block Address out of range" */
705 return 1;
706
707nothing_to_do:
708 qc->scsicmd->result = SAM_STAT_GOOD;
709 return 1;
580} 710}
581 711
582/** 712/**
@@ -602,11 +732,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
602static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 732static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
603{ 733{
604 struct ata_taskfile *tf = &qc->tf; 734 struct ata_taskfile *tf = &qc->tf;
735 struct ata_device *dev = qc->dev;
736 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
605 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 737 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
738 u64 block;
739 u32 n_block;
606 740
607 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 741 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
608 tf->protocol = qc->dev->xfer_protocol; 742 tf->protocol = qc->dev->xfer_protocol;
609 tf->device |= ATA_LBA;
610 743
611 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || 744 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
612 scsicmd[0] == READ_16) { 745 scsicmd[0] == READ_16) {
@@ -616,89 +749,115 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
616 tf->flags |= ATA_TFLAG_WRITE; 749 tf->flags |= ATA_TFLAG_WRITE;
617 } 750 }
618 751
619 if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { 752 /* Calculate the SCSI LBA and transfer length. */
620 if (lba48) { 753 switch (scsicmd[0]) {
621 tf->hob_nsect = scsicmd[7]; 754 case READ_10:
622 tf->hob_lbal = scsicmd[2]; 755 case WRITE_10:
623 756 scsi_10_lba_len(scsicmd, &block, &n_block);
624 qc->nsect = ((unsigned int)scsicmd[7] << 8) | 757 break;
625 scsicmd[8]; 758 case READ_6:
626 } else { 759 case WRITE_6:
627 /* if we don't support LBA48 addressing, the request 760 scsi_6_lba_len(scsicmd, &block, &n_block);
628 * -may- be too large. */
629 if ((scsicmd[2] & 0xf0) || scsicmd[7])
630 return 1;
631
632 /* stores LBA27:24 in lower 4 bits of device reg */
633 tf->device |= scsicmd[2];
634 761
635 qc->nsect = scsicmd[8]; 762 /* for 6-byte r/w commands, transfer length 0
636 } 763 * means 256 blocks of data, not 0 block.
764 */
765 if (!n_block)
766 n_block = 256;
767 break;
768 case READ_16:
769 case WRITE_16:
770 scsi_16_lba_len(scsicmd, &block, &n_block);
771 break;
772 default:
773 DPRINTK("no-byte command\n");
774 goto invalid_fld;
775 }
637 776
638 tf->nsect = scsicmd[8]; 777 /* Check and compose ATA command */
639 tf->lbal = scsicmd[5]; 778 if (!n_block)
640 tf->lbam = scsicmd[4]; 779 /* For 10-byte and 16-byte SCSI R/W commands, transfer
641 tf->lbah = scsicmd[3]; 780 * length 0 means transfer 0 block of data.
781 * However, for ATA R/W commands, sector count 0 means
782 * 256 or 65536 sectors, not 0 sectors as in SCSI.
783 */
784 goto nothing_to_do;
642 785
643 VPRINTK("ten-byte command\n"); 786 if (lba) {
644 if (qc->nsect == 0) /* we don't support length==0 cmds */ 787 if (lba48) {
645 return 1; 788 /* The request -may- be too large for LBA48. */
646 return 0; 789 if ((block >> 48) || (n_block > 65536))
647 } 790 goto out_of_range;
648 791
649 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 792 tf->hob_nsect = (n_block >> 8) & 0xff;
650 qc->nsect = tf->nsect = scsicmd[4];
651 if (!qc->nsect) {
652 qc->nsect = 256;
653 if (lba48)
654 tf->hob_nsect = 1;
655 }
656 793
657 tf->lbal = scsicmd[3]; 794 tf->hob_lbah = (block >> 40) & 0xff;
658 tf->lbam = scsicmd[2]; 795 tf->hob_lbam = (block >> 32) & 0xff;
659 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else {
798 /* LBA28 */
660 799
661 VPRINTK("six-byte command\n"); 800 /* The request -may- be too large for LBA28. */
662 return 0; 801 if ((block >> 28) || (n_block > 256))
663 } 802 goto out_of_range;
664 803
665 if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { 804 tf->device |= (block >> 24) & 0xf;
666 /* rule out impossible LBAs and sector counts */ 805 }
667 if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
668 return 1;
669 806
670 if (lba48) { 807 qc->nsect = n_block;
671 tf->hob_nsect = scsicmd[12]; 808 tf->nsect = n_block & 0xff;
672 tf->hob_lbal = scsicmd[6];
673 tf->hob_lbam = scsicmd[5];
674 tf->hob_lbah = scsicmd[4];
675 809
676 qc->nsect = ((unsigned int)scsicmd[12] << 8) | 810 tf->lbah = (block >> 16) & 0xff;
677 scsicmd[13]; 811 tf->lbam = (block >> 8) & 0xff;
678 } else { 812 tf->lbal = block & 0xff;
679 /* once again, filter out impossible non-zero values */
680 if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
681 (scsicmd[6] & 0xf0))
682 return 1;
683 813
684 /* stores LBA27:24 in lower 4 bits of device reg */ 814 tf->device |= ATA_LBA;
685 tf->device |= scsicmd[6]; 815 } else {
816 /* CHS */
817 u32 sect, head, cyl, track;
818
819 /* The request -may- be too large for CHS addressing. */
820 if ((block >> 28) || (n_block > 256))
821 goto out_of_range;
822
823 /* Convert LBA to CHS */
824 track = (u32)block / dev->sectors;
825 cyl = track / dev->heads;
826 head = track % dev->heads;
827 sect = (u32)block % dev->sectors + 1;
828
829 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
830 (u32)block, track, cyl, head, sect);
831
832 /* Check whether the converted CHS can fit.
833 Cylinder: 0-65535
834 Head: 0-15
835 Sector: 1-255*/
836 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
837 goto out_of_range;
838
839 qc->nsect = n_block;
840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
841 tf->lbal = sect;
842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8;
844 tf->device |= head;
845 }
686 846
687 qc->nsect = scsicmd[13]; 847 return 0;
688 }
689 848
690 tf->nsect = scsicmd[13]; 849invalid_fld:
691 tf->lbal = scsicmd[9]; 850 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
692 tf->lbam = scsicmd[8]; 851 /* "Invalid field in cbd" */
693 tf->lbah = scsicmd[7]; 852 return 1;
694 853
695 VPRINTK("sixteen-byte command\n"); 854out_of_range:
696 if (qc->nsect == 0) /* we don't support length==0 cmds */ 855 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
697 return 1; 856 /* "Logical Block Address out of range" */
698 return 0; 857 return 1;
699 }
700 858
701 DPRINTK("no-byte command\n"); 859nothing_to_do:
860 qc->scsicmd->result = SAM_STAT_GOOD;
702 return 1; 861 return 1;
703} 862}
704 863
@@ -731,6 +890,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
731 * This function sets up an ata_queued_cmd structure for the 890 * This function sets up an ata_queued_cmd structure for the
732 * SCSI command, and sends that ata_queued_cmd to the hardware. 891 * SCSI command, and sends that ata_queued_cmd to the hardware.
733 * 892 *
893 * The xlat_func argument (actor) returns 0 if ready to execute
894 * ATA command, else 1 to finish translation. If 1 is returned
895 * then cmd->result (and possibly cmd->sense_buffer) are assumed
896 * to be set reflecting an error condition or clean (early)
897 * termination.
898 *
734 * LOCKING: 899 * LOCKING:
735 * spin_lock_irqsave(host_set lock) 900 * spin_lock_irqsave(host_set lock)
736 */ 901 */
@@ -747,7 +912,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
747 912
748 qc = ata_scsi_qc_new(ap, dev, cmd, done); 913 qc = ata_scsi_qc_new(ap, dev, cmd, done);
749 if (!qc) 914 if (!qc)
750 return; 915 goto err_mem;
751 916
752 /* data is present; dma-map it */ 917 /* data is present; dma-map it */
753 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 918 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -755,7 +920,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
755 if (unlikely(cmd->request_bufflen < 1)) { 920 if (unlikely(cmd->request_bufflen < 1)) {
756 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 921 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
757 ap->id, dev->devno); 922 ap->id, dev->devno);
758 goto err_out; 923 goto err_did;
759 } 924 }
760 925
761 if (cmd->use_sg) 926 if (cmd->use_sg)
@@ -770,19 +935,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
770 qc->complete_fn = ata_scsi_qc_complete; 935 qc->complete_fn = ata_scsi_qc_complete;
771 936
772 if (xlat_func(qc, scsicmd)) 937 if (xlat_func(qc, scsicmd))
773 goto err_out; 938 goto early_finish;
774 939
775 /* select device, send command to hardware */ 940 /* select device, send command to hardware */
776 if (ata_qc_issue(qc)) 941 if (ata_qc_issue(qc))
777 goto err_out; 942 goto err_did;
778 943
779 VPRINTK("EXIT\n"); 944 VPRINTK("EXIT\n");
780 return; 945 return;
781 946
782err_out: 947early_finish:
948 ata_qc_free(qc);
949 done(cmd);
950 DPRINTK("EXIT - early finish (good or error)\n");
951 return;
952
953err_did:
783 ata_qc_free(qc); 954 ata_qc_free(qc);
784 ata_bad_cdb(cmd, done); 955err_mem:
785 DPRINTK("EXIT - badcmd\n"); 956 cmd->result = (DID_ERROR << 16);
957 done(cmd);
958 DPRINTK("EXIT - internal\n");
959 return;
786} 960}
787 961
788/** 962/**
@@ -849,7 +1023,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
849 * Mapping the response buffer, calling the command's handler, 1023 * Mapping the response buffer, calling the command's handler,
850 * and handling the handler's return value. This return value 1024 * and handling the handler's return value. This return value
851 * indicates whether the handler wishes the SCSI command to be 1025 * indicates whether the handler wishes the SCSI command to be
852 * completed successfully, or not. 1026 * completed successfully (0), or not (in which case cmd->result
1027 * and sense buffer are assumed to be set).
853 * 1028 *
854 * LOCKING: 1029 * LOCKING:
855 * spin_lock_irqsave(host_set lock) 1030 * spin_lock_irqsave(host_set lock)
@@ -868,12 +1043,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
868 rc = actor(args, rbuf, buflen); 1043 rc = actor(args, rbuf, buflen);
869 ata_scsi_rbuf_put(cmd, rbuf); 1044 ata_scsi_rbuf_put(cmd, rbuf);
870 1045
871 if (rc) 1046 if (rc == 0)
872 ata_bad_cdb(cmd, args->done);
873 else {
874 cmd->result = SAM_STAT_GOOD; 1047 cmd->result = SAM_STAT_GOOD;
875 args->done(cmd); 1048 args->done(cmd);
876 }
877} 1049}
878 1050
879/** 1051/**
@@ -1179,8 +1351,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1179 * in the same manner) 1351 * in the same manner)
1180 */ 1352 */
1181 page_control = scsicmd[2] >> 6; 1353 page_control = scsicmd[2] >> 6;
1182 if ((page_control != 0) && (page_control != 3)) 1354 switch (page_control) {
1183 return 1; 1355 case 0: /* current */
1356 break; /* supported */
1357 case 3: /* saved */
1358 goto saving_not_supp;
1359 case 1: /* changeable */
1360 case 2: /* defaults */
1361 default:
1362 goto invalid_fld;
1363 }
1184 1364
1185 if (six_byte) 1365 if (six_byte)
1186 output_len = 4; 1366 output_len = 4;
@@ -1211,7 +1391,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1211 break; 1391 break;
1212 1392
1213 default: /* invalid page code */ 1393 default: /* invalid page code */
1214 return 1; 1394 goto invalid_fld;
1215 } 1395 }
1216 1396
1217 if (six_byte) { 1397 if (six_byte) {
@@ -1224,6 +1404,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1224 } 1404 }
1225 1405
1226 return 0; 1406 return 0;
1407
1408invalid_fld:
1409 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
1410 /* "Invalid field in cbd" */
1411 return 1;
1412
1413saving_not_supp:
1414 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
1415 /* "Saving parameters not supported" */
1416 return 1;
1227} 1417}
1228 1418
1229/** 1419/**
@@ -1246,10 +1436,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
1246 1436
1247 VPRINTK("ENTER\n"); 1437 VPRINTK("ENTER\n");
1248 1438
1249 if (ata_id_has_lba48(args->id)) 1439 if (ata_id_has_lba(args->id)) {
1250 n_sectors = ata_id_u64(args->id, 100); 1440 if (ata_id_has_lba48(args->id))
1251 else 1441 n_sectors = ata_id_u64(args->id, 100);
1252 n_sectors = ata_id_u32(args->id, 60); 1442 else
1443 n_sectors = ata_id_u32(args->id, 60);
1444 } else {
1445 /* CHS default translation */
1446 n_sectors = args->id[1] * args->id[3] * args->id[6];
1447
1448 if (ata_id_current_chs_valid(args->id))
1449 /* CHS current translation */
1450 n_sectors = ata_id_u32(args->id, 57);
1451 }
1452
1253 n_sectors--; /* ATA TotalUserSectors - 1 */ 1453 n_sectors--; /* ATA TotalUserSectors - 1 */
1254 1454
1255 if (args->cmd->cmnd[0] == READ_CAPACITY) { 1455 if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1313,6 +1513,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1313} 1513}
1314 1514
1315/** 1515/**
1516 * ata_scsi_set_sense - Set SCSI sense data and status
1517 * @cmd: SCSI request to be handled
1518 * @sk: SCSI-defined sense key
1519 * @asc: SCSI-defined additional sense code
1520 * @ascq: SCSI-defined additional sense code qualifier
1521 *
1522 * Helper function that builds a valid fixed format, current
1523 * response code and the given sense key (sk), additional sense
1524 * code (asc) and additional sense code qualifier (ascq) with
1525 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
1526 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
1527 *
1528 * LOCKING:
1529 * Not required
1530 */
1531
1532void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
1533{
1534 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1535
1536 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
1537 cmd->sense_buffer[2] = sk;
1538 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
1539 cmd->sense_buffer[12] = asc;
1540 cmd->sense_buffer[13] = ascq;
1541}
1542
1543/**
1316 * ata_scsi_badcmd - End a SCSI request with an error 1544 * ata_scsi_badcmd - End a SCSI request with an error
1317 * @cmd: SCSI request to be handled 1545 * @cmd: SCSI request to be handled
1318 * @done: SCSI command completion function 1546 * @done: SCSI command completion function
@@ -1330,30 +1558,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1330void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 1558void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
1331{ 1559{
1332 DPRINTK("ENTER\n"); 1560 DPRINTK("ENTER\n");
1333 cmd->result = SAM_STAT_CHECK_CONDITION; 1561 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
1334
1335 cmd->sense_buffer[0] = 0x70;
1336 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
1337 cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
1338 cmd->sense_buffer[12] = asc;
1339 cmd->sense_buffer[13] = ascq;
1340 1562
1341 done(cmd); 1563 done(cmd);
1342} 1564}
1343 1565
1566void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1567 struct scsi_cmnd *cmd)
1568{
1569 DECLARE_COMPLETION(wait);
1570 struct ata_queued_cmd *qc;
1571 unsigned long flags;
1572 int rc;
1573
1574 DPRINTK("ATAPI request sense\n");
1575
1576 qc = ata_qc_new_init(ap, dev);
1577 BUG_ON(qc == NULL);
1578
1579 /* FIXME: is this needed? */
1580 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
1581
1582 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
1583 qc->dma_dir = DMA_FROM_DEVICE;
1584
1585 memset(&qc->cdb, 0, ap->cdb_len);
1586 qc->cdb[0] = REQUEST_SENSE;
1587 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
1588
1589 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1590 qc->tf.command = ATA_CMD_PACKET;
1591
1592 qc->tf.protocol = ATA_PROT_ATAPI;
1593 qc->tf.lbam = (8 * 1024) & 0xff;
1594 qc->tf.lbah = (8 * 1024) >> 8;
1595 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
1596
1597 qc->waiting = &wait;
1598 qc->complete_fn = ata_qc_complete_noop;
1599
1600 spin_lock_irqsave(&ap->host_set->lock, flags);
1601 rc = ata_qc_issue(qc);
1602 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1603
1604 if (rc)
1605 ata_port_disable(ap);
1606 else
1607 wait_for_completion(&wait);
1608
1609 DPRINTK("EXIT\n");
1610}
1611
1344static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1612static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1345{ 1613{
1346 struct scsi_cmnd *cmd = qc->scsicmd; 1614 struct scsi_cmnd *cmd = qc->scsicmd;
1347 1615
1348 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { 1616 VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
1617
1618 if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
1619 ata_to_sense_error(qc, drv_stat);
1620
1621 else if (unlikely(drv_stat & ATA_ERR)) {
1349 DPRINTK("request check condition\n"); 1622 DPRINTK("request check condition\n");
1350 1623
1624 /* FIXME: command completion with check condition
1625 * but no sense causes the error handler to run,
1626 * which then issues REQUEST SENSE, fills in the sense
1627 * buffer, and completes the command (for the second
1628 * time). We need to issue REQUEST SENSE some other
1629 * way, to avoid completing the command twice.
1630 */
1351 cmd->result = SAM_STAT_CHECK_CONDITION; 1631 cmd->result = SAM_STAT_CHECK_CONDITION;
1352 1632
1353 qc->scsidone(cmd); 1633 qc->scsidone(cmd);
1354 1634
1355 return 1; 1635 return 1;
1356 } else { 1636 }
1637
1638 else {
1357 u8 *scsicmd = cmd->cmnd; 1639 u8 *scsicmd = cmd->cmnd;
1358 1640
1359 if (scsicmd[0] == INQUIRY) { 1641 if (scsicmd[0] == INQUIRY) {
@@ -1361,15 +1643,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1361 unsigned int buflen; 1643 unsigned int buflen;
1362 1644
1363 buflen = ata_scsi_rbuf_get(cmd, &buf); 1645 buflen = ata_scsi_rbuf_get(cmd, &buf);
1364 buf[2] = 0x5; 1646
1365 buf[3] = (buf[3] & 0xf0) | 2; 1647 /* ATAPI devices typically report zero for their SCSI version,
1648 * and sometimes deviate from the spec WRT response data
1649 * format. If SCSI version is reported as zero like normal,
1650 * then we make the following fixups: 1) Fake MMC-5 version,
1651 * to indicate to the Linux scsi midlayer this is a modern
1652 * device. 2) Ensure response data format / ATAPI information
1653 * are always correct.
1654 */
1655 /* FIXME: do we ever override EVPD pages and the like, with
1656 * this code?
1657 */
1658 if (buf[2] == 0) {
1659 buf[2] = 0x5;
1660 buf[3] = 0x32;
1661 }
1662
1366 ata_scsi_rbuf_put(cmd, buf); 1663 ata_scsi_rbuf_put(cmd, buf);
1367 } 1664 }
1665
1368 cmd->result = SAM_STAT_GOOD; 1666 cmd->result = SAM_STAT_GOOD;
1369 } 1667 }
1370 1668
1371 qc->scsidone(cmd); 1669 qc->scsidone(cmd);
1372
1373 return 0; 1670 return 0;
1374} 1671}
1375/** 1672/**
@@ -1630,7 +1927,7 @@ void ata_scsi_simulate(u16 *id,
1630 1927
1631 case INQUIRY: 1928 case INQUIRY:
1632 if (scsicmd[1] & 2) /* is CmdDt set? */ 1929 if (scsicmd[1] & 2) /* is CmdDt set? */
1633 ata_bad_cdb(cmd, done); 1930 ata_scsi_invalid_field(cmd, done);
1634 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 1931 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
1635 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 1932 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
1636 else if (scsicmd[2] == 0x00) 1933 else if (scsicmd[2] == 0x00)
@@ -1640,7 +1937,7 @@ void ata_scsi_simulate(u16 *id,
1640 else if (scsicmd[2] == 0x83) 1937 else if (scsicmd[2] == 0x83)
1641 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 1938 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
1642 else 1939 else
1643 ata_bad_cdb(cmd, done); 1940 ata_scsi_invalid_field(cmd, done);
1644 break; 1941 break;
1645 1942
1646 case MODE_SENSE: 1943 case MODE_SENSE:
@@ -1650,7 +1947,7 @@ void ata_scsi_simulate(u16 *id,
1650 1947
1651 case MODE_SELECT: /* unconditionally return */ 1948 case MODE_SELECT: /* unconditionally return */
1652 case MODE_SELECT_10: /* bad-field-in-cdb */ 1949 case MODE_SELECT_10: /* bad-field-in-cdb */
1653 ata_bad_cdb(cmd, done); 1950 ata_scsi_invalid_field(cmd, done);
1654 break; 1951 break;
1655 1952
1656 case READ_CAPACITY: 1953 case READ_CAPACITY:
@@ -1661,7 +1958,7 @@ void ata_scsi_simulate(u16 *id,
1661 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 1958 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
1662 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 1959 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
1663 else 1960 else
1664 ata_bad_cdb(cmd, done); 1961 ata_scsi_invalid_field(cmd, done);
1665 break; 1962 break;
1666 1963
1667 case REPORT_LUNS: 1964 case REPORT_LUNS:
@@ -1673,8 +1970,26 @@ void ata_scsi_simulate(u16 *id,
1673 1970
1674 /* all other commands */ 1971 /* all other commands */
1675 default: 1972 default:
1676 ata_bad_scsiop(cmd, done); 1973 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
1974 /* "Invalid command operation code" */
1975 done(cmd);
1677 break; 1976 break;
1678 } 1977 }
1679} 1978}
1680 1979
1980void ata_scsi_scan_host(struct ata_port *ap)
1981{
1982 struct ata_device *dev;
1983 unsigned int i;
1984
1985 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1986 return;
1987
1988 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1989 dev = &ap->device[i];
1990
1991 if (ata_dev_present(dev))
1992 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
1993 }
1994}
1995
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index d608b3a0f6fe..a18f2ac1d4a1 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,6 +39,7 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 44 struct ata_device *dev);
44extern void ata_qc_free(struct ata_queued_cmd *qc); 45extern void ata_qc_free(struct ata_queued_cmd *qc);
@@ -51,6 +52,9 @@ extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
51 52
52 53
53/* libata-scsi.c */ 54/* libata-scsi.c */
55extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
56 struct scsi_cmnd *cmd);
57extern void ata_scsi_scan_host(struct ata_port *ap);
54extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); 58extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
55extern int ata_scsi_error(struct Scsi_Host *host); 59extern int ata_scsi_error(struct Scsi_Host *host);
56extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 60extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -76,18 +80,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
76extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, 80extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
77 void (*done)(struct scsi_cmnd *), 81 void (*done)(struct scsi_cmnd *),
78 u8 asc, u8 ascq); 82 u8 asc, u8 ascq);
83extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
84 u8 sk, u8 asc, u8 ascq);
79extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 85extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
80 unsigned int (*actor) (struct ata_scsi_args *args, 86 unsigned int (*actor) (struct ata_scsi_args *args,
81 u8 *rbuf, unsigned int buflen)); 87 u8 *rbuf, unsigned int buflen));
82 88
83static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
84{
85 ata_scsi_badcmd(cmd, done, 0x20, 0x00);
86}
87
88static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
89{
90 ata_scsi_badcmd(cmd, done, 0x24, 0x00);
91}
92
93#endif /* __LIBATA_H__ */ 89#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 1b3148e842af..c3f637395734 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -34,6 +34,7 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/uio.h> 35#include <linux/uio.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <linux/fs.h>
37#include <linux/compat.h> 38#include <linux/compat.h>
38 39
39#include <scsi/scsi.h> 40#include <scsi/scsi.h>
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..d457f5673476 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -35,7 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 38#define DRV_VERSION "0.24"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 57
58 MV_Q_CT = 32, 58 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 59
62 MV_DMA_BOUNDARY = 0xffffffffU, 60 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 61 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
62
63 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
64 * CRPB needs alignment on a 256B boundary. Size == 256B
65 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
66 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
67 */
68 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
69 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
70 MV_MAX_SG_CT = 176,
71 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
72 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
73
74 /* Our DMA boundary is determined by an ePRD being unable to handle
75 * anything larger than 64KB
76 */
77 MV_DMA_BOUNDARY = 0xffffU,
64 78
65 MV_PORTS_PER_HC = 4, 79 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 81 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 83 MV_PORT_MASK = 3,
70 84
71 /* Host Flags */ 85 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 88 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
89 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
90 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
91 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
92 MV_FLAG_GLBL_SFT_RST),
75 93
76 chip_504x = 0, 94 chip_504x = 0,
77 chip_508x = 1, 95 chip_508x = 1,
78 chip_604x = 2, 96 chip_604x = 2,
79 chip_608x = 3, 97 chip_608x = 3,
80 98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
81 /* PCI interface registers */ 109 /* PCI interface registers */
82 110
111 PCI_COMMAND_OFS = 0xc00,
112
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 113 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 114 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 115 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +141,13 @@ enum {
111 HC_CFG_OFS = 0, 141 HC_CFG_OFS = 0,
112 142
113 HC_IRQ_CAUSE_OFS = 0x14, 143 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 144 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 145 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 146 DEV_IRQ = (1 << 8), /* shift by port # */
117 147
118 /* Shadow block registers */ 148 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 149 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 150 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 151
129 /* SATA registers */ 152 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 153 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
132 155
133 /* Port registers */ 156 /* Port registers */
134 EDMA_CFG_OFS = 0, 157 EDMA_CFG_OFS = 0,
158 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
159 EDMA_CFG_NCQ = (1 << 5),
160 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
161 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
162 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 163
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 164 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 165 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +189,85 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 189 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 190 EDMA_ERR_TRANS_PROTO),
163 191
192 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
193 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
194 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
195
196 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
197 EDMA_REQ_Q_PTR_SHIFT = 5,
198
199 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
200 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
201 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
202 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
203 EDMA_RSP_Q_PTR_SHIFT = 3,
204
164 EDMA_CMD_OFS = 0x28, 205 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 206 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 207 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 208 ATA_RST = (1 << 2),
168 209
169 /* BDMA is 6xxx part only */ 210 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 211 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 212
173 MV_UNDEF = 0, 213 /* Port private flags (pp_flags) */
214 MV_PP_FLAG_EDMA_EN = (1 << 0),
215 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 216};
175 217
176struct mv_port_priv { 218/* Command ReQuest Block: 32B */
219struct mv_crqb {
220 u32 sg_addr;
221 u32 sg_addr_hi;
222 u16 ctrl_flags;
223 u16 ata_cmd[11];
224};
177 225
226/* Command ResPonse Block: 8B */
227struct mv_crpb {
228 u16 id;
229 u16 flags;
230 u32 tmstmp;
178}; 231};
179 232
180struct mv_host_priv { 233/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
234struct mv_sg {
235 u32 addr;
236 u32 flags_size;
237 u32 addr_hi;
238 u32 reserved;
239};
181 240
241struct mv_port_priv {
242 struct mv_crqb *crqb;
243 dma_addr_t crqb_dma;
244 struct mv_crpb *crpb;
245 dma_addr_t crpb_dma;
246 struct mv_sg *sg_tbl;
247 dma_addr_t sg_tbl_dma;
248
249 unsigned req_producer; /* cp of req_in_ptr */
250 unsigned rsp_consumer; /* cp of rsp_out_ptr */
251 u32 pp_flags;
252};
253
254struct mv_host_priv {
255 u32 hp_flags;
182}; 256};
183 257
184static void mv_irq_clear(struct ata_port *ap); 258static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = {
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
218 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
219 .exec_command = ata_exec_command, 300 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select, 301 .dev_select = ata_std_dev_select,
221 302
222 .phy_reset = mv_phy_reset, 303 .phy_reset = mv_phy_reset,
223 304
224 .qc_prep = ata_qc_prep, 305 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 306 .qc_issue = mv_qc_issue,
226 307
227 .eng_timeout = ata_eng_timeout, 308 .eng_timeout = mv_eng_timeout,
228 309
229 .irq_handler = mv_interrupt, 310 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 311 .irq_clear = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 313 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 314 .scr_write = mv_scr_write,
234 315
235 .port_start = ata_port_start, 316 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 317 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 318 .host_stop = mv_host_stop,
238}; 319};
239 320
240static struct ata_port_info mv_port_info[] = { 321static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 322 { /* chip_504x */
242 .sht = &mv_sht, 323 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 324 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 325 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 326 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 327 .port_ops = &mv_ops,
248 }, 328 },
249 { /* chip_508x */ 329 { /* chip_508x */
250 .sht = &mv_sht, 330 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 331 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 332 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 333 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 334 .port_ops = &mv_ops,
257 }, 335 },
258 { /* chip_604x */ 336 { /* chip_604x */
259 .sht = &mv_sht, 337 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 339 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 340 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 341 .port_ops = &mv_ops,
266 }, 342 },
267 { /* chip_608x */ 343 { /* chip_608x */
268 .sht = &mv_sht, 344 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 345 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 346 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 347 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 348 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 349 .port_ops = &mv_ops,
276 }, 350 },
277}; 351};
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 380 (void) readl(addr); /* flush to avoid PCI posted write */
307} 381}
308 382
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 383static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 384{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 385 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 397 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 398}
331 399
332static inline int mv_get_hc_count(unsigned long flags) 400static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 401{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 402 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 403}
336 404
337static inline int mv_is_edma_active(struct ata_port *ap) 405static void mv_irq_clear(struct ata_port *ap)
406{
407}
408
409/**
410 * mv_start_dma - Enable eDMA engine
411 * @base: port base address
412 * @pp: port private data
413 *
414 * Verify the local cache of the eDMA state is accurate with an
415 * assert.
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
421{
422 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
423 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
424 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
425 }
426 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
427}
428
429/**
430 * mv_stop_dma - Disable eDMA engine
431 * @ap: ATA channel to manipulate
432 *
433 * Verify the local cache of the eDMA state is accurate with an
434 * assert.
435 *
436 * LOCKING:
437 * Inherited from caller.
438 */
439static void mv_stop_dma(struct ata_port *ap)
338{ 440{
339 void __iomem *port_mmio = mv_ap_base(ap); 441 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 442 struct mv_port_priv *pp = ap->private_data;
443 u32 reg;
444 int i;
445
446 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
447 /* Disable EDMA if active. The disable bit auto clears.
448 */
449 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
450 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
451 } else {
452 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
453 }
454
455 /* now properly wait for the eDMA to stop */
456 for (i = 1000; i > 0; i--) {
457 reg = readl(port_mmio + EDMA_CMD_OFS);
458 if (!(EDMA_EN & reg)) {
459 break;
460 }
461 udelay(100);
462 }
463
464 if (EDMA_EN & reg) {
465 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
466 /* FIXME: Consider doing a reset here to recover */
467 }
341} 468}
342 469
343static inline int mv_port_bdma_capable(struct ata_port *ap) 470#ifdef ATA_DEBUG
471static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 472{
345 return (ap->flags & MV_FLAG_BDMA); 473 int b, w;
474 for (b = 0; b < bytes; ) {
475 DPRINTK("%p: ", start + b);
476 for (w = 0; b < bytes && w < 4; w++) {
477 printk("%08x ",readl(start + b));
478 b += sizeof(u32);
479 }
480 printk("\n");
481 }
346} 482}
483#endif
347 484
348static void mv_irq_clear(struct ata_port *ap) 485static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
486{
487#ifdef ATA_DEBUG
488 int b, w;
489 u32 dw;
490 for (b = 0; b < bytes; ) {
491 DPRINTK("%02x: ", b);
492 for (w = 0; b < bytes && w < 4; w++) {
493 (void) pci_read_config_dword(pdev,b,&dw);
494 printk("%08x ",dw);
495 b += sizeof(u32);
496 }
497 printk("\n");
498 }
499#endif
500}
501static void mv_dump_all_regs(void __iomem *mmio_base, int port,
502 struct pci_dev *pdev)
349{ 503{
504#ifdef ATA_DEBUG
505 void __iomem *hc_base = mv_hc_base(mmio_base,
506 port >> MV_PORT_HC_SHIFT);
507 void __iomem *port_base;
508 int start_port, num_ports, p, start_hc, num_hcs, hc;
509
510 if (0 > port) {
511 start_hc = start_port = 0;
512 num_ports = 8; /* shld be benign for 4 port devs */
513 num_hcs = 2;
514 } else {
515 start_hc = port >> MV_PORT_HC_SHIFT;
516 start_port = port;
517 num_ports = num_hcs = 1;
518 }
519 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
520 num_ports > 1 ? num_ports - 1 : start_port);
521
522 if (NULL != pdev) {
523 DPRINTK("PCI config space regs:\n");
524 mv_dump_pci_cfg(pdev, 0x68);
525 }
526 DPRINTK("PCI regs:\n");
527 mv_dump_mem(mmio_base+0xc00, 0x3c);
528 mv_dump_mem(mmio_base+0xd00, 0x34);
529 mv_dump_mem(mmio_base+0xf00, 0x4);
530 mv_dump_mem(mmio_base+0x1d00, 0x6c);
531 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
532 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
533 DPRINTK("HC regs (HC %i):\n", hc);
534 mv_dump_mem(hc_base, 0x1c);
535 }
536 for (p = start_port; p < start_port + num_ports; p++) {
537 port_base = mv_port_base(mmio_base, p);
538 DPRINTK("EDMA regs (port %i):\n",p);
539 mv_dump_mem(port_base, 0x54);
540 DPRINTK("SATA regs (port %i):\n",p);
541 mv_dump_mem(port_base+0x300, 0x60);
542 }
543#endif
350} 544}
351 545
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 546static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 583 }
390} 584}
391 585
392static int mv_master_reset(void __iomem *mmio_base) 586/**
587 * mv_global_soft_reset - Perform the 6xxx global soft reset
588 * @mmio_base: base address of the HBA
589 *
590 * This routine only applies to 6xxx parts.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 596{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 597 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 598 int i, rc = 0;
396 u32 t; 599 u32 t;
397 600
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 601 /* Following procedure defined in PCI "main command and status
401 * register" table. 602 * register" table.
402 */ 603 */
403 t = readl(reg); 604 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 605 writel(t | STOP_PCI_MASTER, reg);
405 606
406 for (i = 0; i < 100; i++) { 607 for (i = 0; i < 1000; i++) {
407 msleep(10); 608 udelay(1);
408 t = readl(reg); 609 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 610 if (PCI_MASTER_EMPTY & t) {
410 break; 611 break;
411 } 612 }
412 } 613 }
413 if (!(PCI_MASTER_EMPTY & t)) { 614 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 615 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 616 rc = 1;
416 goto done; 617 goto done;
417 } 618 }
418 619
@@ -425,39 +626,398 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 626 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 627
427 if (!(GLOB_SFT_RST & t)) { 628 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 629 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 630 rc = 1;
430 goto done; 631 goto done;
431 } 632 }
432 633
433 /* clear reset */ 634 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 635 i = 5;
435 do { 636 do {
436 writel(t & ~GLOB_SFT_RST, reg); 637 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 638 t = readl(reg);
438 udelay(1); 639 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 640 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 641
441 if (GLOB_SFT_RST & t) { 642 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 643 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 644 rc = 1;
444 } 645 }
445 646done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 647 return rc;
449} 648}
450 649
451static void mv_err_intr(struct ata_port *ap) 650/**
651 * mv_host_stop - Host specific cleanup/stop routine.
652 * @host_set: host data structure
653 *
654 * Disable ints, cleanup host memory, call general purpose
655 * host_stop.
656 *
657 * LOCKING:
658 * Inherited from caller.
659 */
660static void mv_host_stop(struct ata_host_set *host_set)
452{ 661{
453 void __iomem *port_mmio; 662 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 663 struct pci_dev *pdev = to_pci_dev(host_set->dev);
664
665 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
666 pci_disable_msi(pdev);
667 } else {
668 pci_intx(pdev, 0);
669 }
670 kfree(hpriv);
671 ata_host_stop(host_set);
672}
673
674/**
675 * mv_port_start - Port specific init/start routine.
676 * @ap: ATA channel to manipulate
677 *
678 * Allocate and point to DMA memory, init port private memory,
679 * zero indices.
680 *
681 * LOCKING:
682 * Inherited from caller.
683 */
684static int mv_port_start(struct ata_port *ap)
685{
686 struct device *dev = ap->host_set->dev;
687 struct mv_port_priv *pp;
688 void __iomem *port_mmio = mv_ap_base(ap);
689 void *mem;
690 dma_addr_t mem_dma;
691
692 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
693 if (!pp) {
694 return -ENOMEM;
695 }
696 memset(pp, 0, sizeof(*pp));
697
698 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
699 GFP_KERNEL);
700 if (!mem) {
701 kfree(pp);
702 return -ENOMEM;
703 }
704 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
705
706 /* First item in chunk of DMA memory:
707 * 32-slot command request table (CRQB), 32 bytes each in size
708 */
709 pp->crqb = mem;
710 pp->crqb_dma = mem_dma;
711 mem += MV_CRQB_Q_SZ;
712 mem_dma += MV_CRQB_Q_SZ;
713
714 /* Second item:
715 * 32-slot command response table (CRPB), 8 bytes each in size
716 */
717 pp->crpb = mem;
718 pp->crpb_dma = mem_dma;
719 mem += MV_CRPB_Q_SZ;
720 mem_dma += MV_CRPB_Q_SZ;
721
722 /* Third item:
723 * Table of scatter-gather descriptors (ePRD), 16 bytes each
724 */
725 pp->sg_tbl = mem;
726 pp->sg_tbl_dma = mem_dma;
727
728 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
729 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
730
731 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
732 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
733 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
734
735 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
736 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
737
738 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
739 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
740 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
741
742 pp->req_producer = pp->rsp_consumer = 0;
743
744 /* Don't turn on EDMA here...do it before DMA commands only. Else
745 * we'll be unable to send non-data, PIO, etc due to restricted access
746 * to shadow regs.
747 */
748 ap->private_data = pp;
749 return 0;
750}
751
752/**
753 * mv_port_stop - Port specific cleanup/stop routine.
754 * @ap: ATA channel to manipulate
755 *
756 * Stop DMA, cleanup port memory.
757 *
758 * LOCKING:
759 * This routine uses the host_set lock to protect the DMA stop.
760 */
761static void mv_port_stop(struct ata_port *ap)
762{
763 struct device *dev = ap->host_set->dev;
764 struct mv_port_priv *pp = ap->private_data;
765 unsigned long flags;
766
767 spin_lock_irqsave(&ap->host_set->lock, flags);
768 mv_stop_dma(ap);
769 spin_unlock_irqrestore(&ap->host_set->lock, flags);
770
771 ap->private_data = NULL;
772 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
773 kfree(pp);
774}
775
776/**
777 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
778 * @qc: queued command whose SG list to source from
779 *
780 * Populate the SG list and mark the last entry.
781 *
782 * LOCKING:
783 * Inherited from caller.
784 */
785static void mv_fill_sg(struct ata_queued_cmd *qc)
786{
787 struct mv_port_priv *pp = qc->ap->private_data;
788 unsigned int i;
789
790 for (i = 0; i < qc->n_elem; i++) {
791 u32 sg_len;
792 dma_addr_t addr;
793
794 addr = sg_dma_address(&qc->sg[i]);
795 sg_len = sg_dma_len(&qc->sg[i]);
796
797 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
798 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
799 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
800 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
801 }
802 if (0 < qc->n_elem) {
803 pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL;
804 }
805}
806
807static inline unsigned mv_inc_q_index(unsigned *index)
808{
809 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
810 return *index;
811}
812
813static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
814{
815 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
816 (last ? CRQB_CMD_LAST : 0);
817}
455 818
456 /* bug here b/c we got an err int on a port we don't know about, 819/**
457 * so there's no way to clear it 820 * mv_qc_prep - Host specific command preparation.
821 * @qc: queued command to prepare
822 *
823 * This routine simply redirects to the general purpose routine
824 * if command is not DMA. Else, it handles prep of the CRQB
825 * (command request block), does some sanity checking, and calls
826 * the SG load routine.
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
831static void mv_qc_prep(struct ata_queued_cmd *qc)
832{
833 struct ata_port *ap = qc->ap;
834 struct mv_port_priv *pp = ap->private_data;
835 u16 *cw;
836 struct ata_taskfile *tf;
837 u16 flags = 0;
838
839 if (ATA_PROT_DMA != qc->tf.protocol) {
840 return;
841 }
842
843 /* the req producer index should be the same as we remember it */
844 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
845 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
846 pp->req_producer);
847
848 /* Fill in command request block
458 */ 849 */
459 BUG_ON(NULL == ap); 850 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
460 port_mmio = mv_ap_base(ap); 851 flags |= CRQB_FLAG_READ;
852 }
853 assert(MV_MAX_Q_DEPTH > qc->tag);
854 flags |= qc->tag << CRQB_TAG_SHIFT;
855
856 pp->crqb[pp->req_producer].sg_addr =
857 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
858 pp->crqb[pp->req_producer].sg_addr_hi =
859 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
860 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
861
862 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
863 tf = &qc->tf;
864
865 /* Sadly, the CRQB cannot accomodate all registers--there are
866 * only 11 bytes...so we must pick and choose required
867 * registers based on the command. So, we drop feature and
868 * hob_feature for [RW] DMA commands, but they are needed for
869 * NCQ. NCQ will drop hob_nsect.
870 */
871 switch (tf->command) {
872 case ATA_CMD_READ:
873 case ATA_CMD_READ_EXT:
874 case ATA_CMD_WRITE:
875 case ATA_CMD_WRITE_EXT:
876 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
877 break;
878#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
879 case ATA_CMD_FPDMA_READ:
880 case ATA_CMD_FPDMA_WRITE:
881 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
882 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
883 break;
884#endif /* FIXME: remove this line when NCQ added */
885 default:
886 /* The only other commands EDMA supports in non-queued and
887 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
888 * of which are defined/used by Linux. If we get here, this
889 * driver needs work.
890 *
891 * FIXME: modify libata to give qc_prep a return value and
892 * return error here.
893 */
894 BUG_ON(tf->command);
895 break;
896 }
897 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
898 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
899 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
901 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
903 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
905 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
906
907 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
908 return;
909 }
910 mv_fill_sg(qc);
911}
912
913/**
914 * mv_qc_issue - Initiate a command to the host
915 * @qc: queued command to start
916 *
917 * This routine simply redirects to the general purpose routine
918 * if command is not DMA. Else, it sanity checks our local
919 * caches of the request producer/consumer indices then enables
920 * DMA and bumps the request producer index.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925static int mv_qc_issue(struct ata_queued_cmd *qc)
926{
927 void __iomem *port_mmio = mv_ap_base(qc->ap);
928 struct mv_port_priv *pp = qc->ap->private_data;
929 u32 in_ptr;
930
931 if (ATA_PROT_DMA != qc->tf.protocol) {
932 /* We're about to send a non-EDMA capable command to the
933 * port. Turn off EDMA so there won't be problems accessing
934 * shadow block, etc registers.
935 */
936 mv_stop_dma(qc->ap);
937 return ata_qc_issue_prot(qc);
938 }
939
940 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
941
942 /* the req producer index should be the same as we remember it */
943 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
944 pp->req_producer);
945 /* until we do queuing, the queue should be empty at this point */
946 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
947 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
948 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
949
950 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
951
952 mv_start_dma(port_mmio, pp);
953
954 /* and write the request in pointer to kick the EDMA to life */
955 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
956 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
957 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
958
959 return 0;
960}
961
962/**
963 * mv_get_crpb_status - get status from most recently completed cmd
964 * @ap: ATA channel to manipulate
965 *
966 * This routine is for use when the port is in DMA mode, when it
967 * will be using the CRPB (command response block) method of
968 * returning command completion information. We assert indices
969 * are good, grab status, and bump the response consumer index to
970 * prove that we're up to date.
971 *
972 * LOCKING:
973 * Inherited from caller.
974 */
975static u8 mv_get_crpb_status(struct ata_port *ap)
976{
977 void __iomem *port_mmio = mv_ap_base(ap);
978 struct mv_port_priv *pp = ap->private_data;
979 u32 out_ptr;
980
981 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
982
983 /* the response consumer index should be the same as we remember it */
984 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
985 pp->rsp_consumer);
986
987 /* increment our consumer index... */
988 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
989
990 /* and, until we do NCQ, there should only be 1 CRPB waiting */
991 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
992 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
993 pp->rsp_consumer);
994
995 /* write out our inc'd consumer index so EDMA knows we're caught up */
996 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
997 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
998 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
999
1000 /* Return ATA status register for completed CRPB */
1001 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1002}
1003
1004/**
1005 * mv_err_intr - Handle error interrupts on the port
1006 * @ap: ATA channel to manipulate
1007 *
1008 * In most cases, just clear the interrupt and move on. However,
1009 * some cases require an eDMA reset, which is done right before
1010 * the COMRESET in mv_phy_reset(). The SERR case requires a
1011 * clear of pending errors in the SATA SERROR register. Finally,
1012 * if the port disabled DMA, update our cached copy to match.
1013 *
1014 * LOCKING:
1015 * Inherited from caller.
1016 */
1017static void mv_err_intr(struct ata_port *ap)
1018{
1019 void __iomem *port_mmio = mv_ap_base(ap);
1020 u32 edma_err_cause, serr = 0;
461 1021
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1022 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1023
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1025 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1026 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1027 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1028 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1029 struct mv_port_priv *pp = ap->private_data;
1030 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1031 }
1032 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1033 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1034
471 /* Clear EDMA now that SERR cleanup done */ 1035 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1036 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1041 }
478} 1042}
479 1043
480/* Handle any outstanding interrupts in a single SATAHC 1044/**
1045 * mv_host_intr - Handle all interrupts on the given host controller
1046 * @host_set: host specific structure
1047 * @relevant: port error bits relevant to this host controller
1048 * @hc: which host controller we're to look at
1049 *
1050 * Read then write clear the HC interrupt status then walk each
1051 * port connected to the HC and see if it needs servicing. Port
1052 * success ints are reported in the HC interrupt status reg, the
1053 * port error ints are reported in the higher level main
1054 * interrupt status register and thus are passed in via the
1055 * 'relevant' argument.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
481 */ 1059 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1060static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1061 unsigned int hc)
@@ -487,8 +1065,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1065 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1068 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1069 u8 ata_status = 0;
492 1070
493 if (hc == 0) { 1071 if (hc == 0) {
494 port0 = 0; 1072 port0 = 0;
@@ -499,7 +1077,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1077 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1078 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1079 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1080 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1081 }
504 1082
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1083 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1086,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1086 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1087 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1088 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1089 handled = 0; /* ensure ata_status is set if handled++ */
512 1090
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1091 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1092 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1093 */
516 /* This is needed to clear the ATA INTRQ. 1094 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1095 handled++;
1096 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1097 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1098 */
519 ata_status = readb((void __iomem *) 1099 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1100 ap->ioaddr.status_addr);
1101 handled++;
521 } 1102 }
522 1103
523 shift = port * 2; 1104 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1105 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1106 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1107 }
527 if ((PORT0_ERR << shift) & relevant) { 1108 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1109 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1110 /* OR in ATA_ERR to ensure libata knows we took one */
530 ata_status = readb((void __iomem *) 1111 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR; 1112 ap->ioaddr.status_addr) | ATA_ERR;
1113 handled++;
532 } 1114 }
533 1115
534 if (ap) { 1116 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1117 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1118 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1119 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1120 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1121 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1122 ata_qc_complete(qc, ata_status);
542 } 1123 }
@@ -545,17 +1126,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
545 VPRINTK("EXIT\n"); 1126 VPRINTK("EXIT\n");
546} 1127}
547 1128
1129/**
1130 * mv_interrupt -
1131 * @irq: unused
1132 * @dev_instance: private data; in this case the host structure
1133 * @regs: unused
1134 *
1135 * Read the read only register to determine if any host
1136 * controllers have pending interrupts. If so, call lower level
1137 * routine to handle. Also check for PCI errors which are only
1138 * reported here.
1139 *
1140 * LOCKING:
1141 * This routine holds the host_set lock while processing pending
1142 * interrupts.
1143 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1144static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1145 struct pt_regs *regs)
550{ 1146{
551 struct ata_host_set *host_set = dev_instance; 1147 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1148 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1149 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1150 u32 irq_stat;
555 1151
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1152 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1153
560 /* check the cases where we either have nothing pending or have read 1154 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1155 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1158,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1158 return IRQ_NONE;
565 } 1159 }
566 1160
1161 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1162 spin_lock(&host_set->lock);
568 1163
569 for (hc = 0; hc < n_hcs; hc++) { 1164 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1165 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1166 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1167 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1168 handled++;
574 } 1169 }
575 } 1170 }
576 if (PCI_ERR & irq_stat) { 1171 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1172 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1173 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1174
1175 DPRINTK("All regs @ PCI error\n");
1176 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1177
1178 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1179 handled++;
1180 }
582 spin_unlock(&host_set->lock); 1181 spin_unlock(&host_set->lock);
583 1182
584 return IRQ_RETVAL(handled); 1183 return IRQ_RETVAL(handled);
585} 1184}
586 1185
1186/**
1187 * mv_check_err - Return the error shadow register to caller.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Marvell requires DMA to be stopped before accessing shadow
1191 * registers. So we do that, then return the needed register.
1192 *
1193 * LOCKING:
1194 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1195 */
1196static u8 mv_check_err(struct ata_port *ap)
1197{
1198 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1199 return readb((void __iomem *) ap->ioaddr.error_addr);
1200}
1201
1202/**
1203 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1204 * @ap: ATA channel to manipulate
1205 *
1206 * Part of this is taken from __sata_phy_reset and modified to
1207 * not sleep since this routine gets called from interrupt level.
1208 *
1209 * LOCKING:
1210 * Inherited from caller. This is coded to safe to call at
1211 * interrupt level, i.e. it does not sleep.
1212 */
587static void mv_phy_reset(struct ata_port *ap) 1213static void mv_phy_reset(struct ata_port *ap)
588{ 1214{
589 void __iomem *port_mmio = mv_ap_base(ap); 1215 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1216 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1217 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1218 unsigned long timeout;
593 1219
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1220 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1221
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1222 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1223
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1224 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1225 udelay(25); /* allow reset propagation */
610 1226
611 /* Spec never mentions clearing the bit. Marvell's driver does 1227 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1228 * clear the bit, however.
613 */ 1229 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1230 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1231
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1232 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1233 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1234 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1235
618 /* proceed to init communications via the scr_control reg */ 1236 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1237 scr_write_flush(ap, SCR_CONTROL, 0x301);
1238 mdelay(1);
1239 scr_write_flush(ap, SCR_CONTROL, 0x300);
1240 timeout = jiffies + (HZ * 1);
1241 do {
1242 mdelay(10);
1243 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1244 break;
1245 } while (time_before(jiffies, timeout));
620 1246
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1247 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1248 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1249 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1250
1251 if (sata_dev_present(ap)) {
1252 ata_port_probe(ap);
1253 } else {
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1255 ap->id, scr_read(ap, SCR_STATUS));
1256 ata_port_disable(ap);
623 return; 1257 return;
624 } 1258 }
1259 ap->cbl = ATA_CBL_SATA;
625 1260
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1261 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1262 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1271,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1271 VPRINTK("EXIT\n");
637} 1272}
638 1273
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1274/**
1275 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1276 * @ap: ATA channel to manipulate
1277 *
1278 * Intent is to clear all pending error conditions, reset the
1279 * chip/bus, fail the command, and move on.
1280 *
1281 * LOCKING:
1282 * This routine holds the host_set lock while failing the command.
1283 */
1284static void mv_eng_timeout(struct ata_port *ap)
1285{
1286 struct ata_queued_cmd *qc;
1287 unsigned long flags;
1288
1289 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1290 DPRINTK("All regs @ start of eng_timeout\n");
1291 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1292 to_pci_dev(ap->host_set->dev));
1293
1294 qc = ata_qc_from_tag(ap, ap->active_tag);
1295 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1296 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1297 &qc->scsicmd->cmnd);
1298
1299 mv_err_intr(ap);
1300 mv_phy_reset(ap);
1301
1302 if (!qc) {
1303 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1304 ap->id);
1305 } else {
1306 /* hack alert! We cannot use the supplied completion
1307 * function from inside the ->eh_strategy_handler() thread.
1308 * libata is the only user of ->eh_strategy_handler() in
1309 * any kernel, so the default scsi_done() assumes it is
1310 * not being called from the SCSI EH.
1311 */
1312 spin_lock_irqsave(&ap->host_set->lock, flags);
1313 qc->scsidone = scsi_finish_command;
1314 ata_qc_complete(qc, ATA_ERR);
1315 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1316 }
1317}
1318
1319/**
1320 * mv_port_init - Perform some early initialization on a single port.
1321 * @port: libata data structure storing shadow register addresses
1322 * @port_mmio: base address of the port
1323 *
1324 * Initialize shadow register mmio addresses, clear outstanding
1325 * interrupts on the port, and unmask interrupts for the future
1326 * start of the port.
1327 *
1328 * LOCKING:
1329 * Inherited from caller.
1330 */
1331static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1332{
641 /* PIO related setup */ 1333 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1334 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1335
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1336 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1337 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1338 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1339 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1340 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1341 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1342 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1343 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1344 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1345 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1346 port->status_addr =
1347 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1348 /* special case: control/altstatus doesn't have ATA_REG_ address */
1349 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1350
1351 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1352 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1353
1354 /* Clear any currently outstanding port interrupt conditions */
1355 serr_ofs = mv_scr_offset(SCR_ERROR);
1356 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1357 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1358
654 /* unmask all EDMA error interrupts */ 1359 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1360 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1361
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1362 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1363 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1364 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1365 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1366}
662 1367
1368/**
1369 * mv_host_init - Perform some early initialization of the host.
1370 * @probe_ent: early data struct representing the host
1371 *
1372 * If possible, do an early global reset of the host. Then do
1373 * our port init and clear/unmask all/relevant host interrupts.
1374 *
1375 * LOCKING:
1376 * Inherited from caller.
1377 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1378static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1379{
665 int rc = 0, n_hc, port, hc; 1380 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1381 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1382 void __iomem *port_mmio;
668 1383
669 if (mv_master_reset(probe_ent->mmio_base)) { 1384 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1385 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1386 rc = 1;
671 goto done; 1387 goto done;
672 } 1388 }
@@ -676,17 +1392,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1392
677 for (port = 0; port < probe_ent->n_ports; port++) { 1393 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1394 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1395 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1396 }
681 1397
682 for (hc = 0; hc < n_hc; hc++) { 1398 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1399 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1400
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1401 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1402 "(before clear)=0x%08x\n", hc,
1403 readl(hc_mmio + HC_CFG_OFS),
1404 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1405
1406 /* Clear any currently outstanding hc interrupt conditions */
1407 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1408 }
687 1409
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1410 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1411 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1412
1413 /* and unmask interrupt generation for host regs */
1414 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1415 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1416
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1417 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1418 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1420,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1420 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1421 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1422 readl(mmio + PCI_IRQ_MASK_OFS));
697 1423done:
698 done:
699 return rc; 1424 return rc;
700} 1425}
701 1426
1427/**
1428 * mv_print_info - Dump key info to kernel log for perusal.
1429 * @probe_ent: early data struct representing the host
1430 *
1431 * FIXME: complete this.
1432 *
1433 * LOCKING:
1434 * Inherited from caller.
1435 */
1436static void mv_print_info(struct ata_probe_ent *probe_ent)
1437{
1438 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1439 struct mv_host_priv *hpriv = probe_ent->private_data;
1440 u8 rev_id, scc;
1441 const char *scc_s;
1442
1443 /* Use this to determine the HW stepping of the chip so we know
1444 * what errata to workaround
1445 */
1446 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1447
1448 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1449 if (scc == 0)
1450 scc_s = "SCSI";
1451 else if (scc == 0x01)
1452 scc_s = "RAID";
1453 else
1454 scc_s = "unknown";
1455
1456 printk(KERN_INFO DRV_NAME
1457 "(%s) %u slots %u ports %s mode IRQ via %s\n",
1458 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1459 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1460}
1461
1462/**
1463 * mv_init_one - handle a positive probe of a Marvell host
1464 * @pdev: PCI device found
1465 * @ent: PCI device ID entry for the matched host
1466 *
1467 * LOCKING:
1468 * Inherited from caller.
1469 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1470static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1471{
704 static int printed_version = 0; 1472 static int printed_version = 0;
@@ -706,16 +1474,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1474 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1475 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1476 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1477 int pci_dev_busy = 0, rc;
710 int rc;
711 1478
712 if (!printed_version++) { 1479 if (!printed_version++) {
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1480 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
714 } 1481 }
715 1482
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718
719 rc = pci_enable_device(pdev); 1483 rc = pci_enable_device(pdev);
720 if (rc) { 1484 if (rc) {
721 return rc; 1485 return rc;
@@ -727,8 +1491,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1491 goto err_out;
728 } 1492 }
729 1493
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1494 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1495 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1496 rc = -ENOMEM;
@@ -739,8 +1501,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1501 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1502 INIT_LIST_HEAD(&probe_ent->node);
741 1503
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1504 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1505 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1506 rc = -ENOMEM;
746 goto err_out_free_ent; 1507 goto err_out_free_ent;
@@ -769,37 +1530,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1530 if (rc) {
770 goto err_out_hpriv; 1531 goto err_out_hpriv;
771 } 1532 }
772/* mv_print_info(probe_ent); */
773 1533
774 { 1534 /* Enable interrupts */
775 int b, w; 1535 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1536 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1537 } else {
778 for (b = 0; b < 0x40; ) { 1538 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1539 }
787 1540
788 /* FIXME: check ata_device_add return value */ 1541 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1542 mv_print_info(probe_ent);
790 kfree(probe_ent); 1543
1544 if (ata_device_add(probe_ent) == 0) {
1545 rc = -ENODEV; /* No devices discovered */
1546 goto err_out_dev_add;
1547 }
791 1548
1549 kfree(probe_ent);
792 return 0; 1550 return 0;
793 1551
794 err_out_hpriv: 1552err_out_dev_add:
1553 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1554 pci_disable_msi(pdev);
1555 } else {
1556 pci_intx(pdev, 0);
1557 }
1558err_out_hpriv:
795 kfree(hpriv); 1559 kfree(hpriv);
796 err_out_iounmap: 1560err_out_iounmap:
797 iounmap(mmio_base); 1561 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1562err_out_free_ent:
799 kfree(probe_ent); 1563 kfree(probe_ent);
800 err_out_regions: 1564err_out_regions:
801 pci_release_regions(pdev); 1565 pci_release_regions(pdev);
802 err_out: 1566err_out:
803 if (!pci_dev_busy) { 1567 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1568 pci_disable_device(pdev);
805 } 1569 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index c05653c7779d..9fa2535dd937 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -29,6 +29,8 @@
29 * NV-specific details such as register offsets, SATA phy location, 29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc. 30 * hotplug info, etc.
31 * 31 *
32 * 0.09
33 * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
32 * 34 *
33 * 0.08 35 * 0.08
34 * - Added support for MCP51 and MCP55. 36 * - Added support for MCP51 and MCP55.
@@ -132,9 +134,7 @@ enum nv_host_type
132 GENERIC, 134 GENERIC,
133 NFORCE2, 135 NFORCE2,
134 NFORCE3, 136 NFORCE3,
135 CK804, 137 CK804
136 MCP51,
137 MCP55
138}; 138};
139 139
140static struct pci_device_id nv_pci_tbl[] = { 140static struct pci_device_id nv_pci_tbl[] = {
@@ -153,13 +153,13 @@ static struct pci_device_id nv_pci_tbl[] = {
153 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, 153 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
155 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, 155 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
157 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, 157 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, 159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, 161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
164 PCI_ANY_ID, PCI_ANY_ID, 164 PCI_ANY_ID, PCI_ANY_ID,
165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
405 rc = -ENOMEM; 405 rc = -ENOMEM;
406 406
407 ppi = &nv_port_info; 407 ppi = &nv_port_info;
408 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 408 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
409 if (!probe_ent) 409 if (!probe_ent)
410 goto err_out_regions; 410 goto err_out_regions;
411 411
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 538ad727bd2e..def7e0d9dacb 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
438 break; 438 break;
439 439
440 default: 440 default:
441 ap->stats.idle_irq++; 441 ap->stats.idle_irq++;
442 break; 442 break;
443 } 443 }
444 444
445 return handled; 445 return handled;
446} 446}
447 447
448static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index b227e51d12f4..0761a3234fcf 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
263 goto err_out_regions; 263 goto err_out_regions;
264 264
265 ppi = &sis_port_info; 265 ppi = &sis_port_info;
266 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 266 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
267 if (!probe_ent) { 267 if (!probe_ent) {
268 rc = -ENOMEM; 268 rc = -ENOMEM;
269 goto err_out_regions; 269 goto err_out_regions;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 4c9fb8b71be1..9c06f2abe7f7 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
202 goto err_out_regions; 202 goto err_out_regions;
203 203
204 ppi = &uli_port_info; 204 ppi = &uli_port_info;
205 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 205 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
206 if (!probe_ent) { 206 if (!probe_ent) {
207 rc = -ENOMEM; 207 rc = -ENOMEM;
208 goto err_out_regions; 208 goto err_out_regions;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 128b996b07b7..565872479b9a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
212 struct ata_probe_ent *probe_ent; 212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 213 struct ata_port_info *ppi = &svia_port_info;
214 214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 216 if (!probe_ent)
217 return NULL; 217 return NULL;
218 218
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 5959e6755a81..656c0e8d160e 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
518 518
519 quot = up->port.uartclk / (16 * new_baud); 519 quot = up->port.uartclk / (16 * new_baud);
520 520
521 spin_unlock(&up->port.lock);
522
523 sunsu_change_speed(&up->port, up->cflag, 0, quot); 521 sunsu_change_speed(&up->port, up->cflag, 0, quot);
524
525 spin_lock(&up->port.lock);
526} 522}
527 523
528static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) 524static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break)