aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-09 11:16:14 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-09 11:16:14 -0400
commitc4052da6f0c01a0b059d125d72bb934d0980b798 (patch)
treead50a17e4d14b8f6f1773158d956d424575d1712 /drivers
parentcedc9a478d8c6265879dc3839ef3d4849a709184 (diff)
parent3d3467f0fdf61a421361c00cf84fcf0f1a6dc1e8 (diff)
Merge branch 'upstream'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/fore200e.c10
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c45
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/ucb1x00.h2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/bonding/bond_main.c285
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c31
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/skge.c24
-rw-r--r--drivers/net/starfire.c46
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/21142.c2
-rw-r--r--drivers/net/wireless/orinoco.c14
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c37
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/ahci.c31
-rw-r--r--drivers/scsi/libata-core.c433
-rw-r--r--drivers/scsi/libata-scsi.c689
-rw-r--r--drivers/scsi/libata.h16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/sata_mv.c1142
-rw-r--r--drivers/scsi/sata_nv.c16
-rw-r--r--drivers/scsi/sata_promise.c6
-rw-r--r--drivers/scsi/sata_sil24.c875
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/serial/sunsu.c4
36 files changed, 2856 insertions, 905 deletions
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 2bf723a7b6e6..6f1a83c9d9e0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq)
178 178
179 179
180static void* 180static void*
181fore200e_kmalloc(int size, int flags) 181fore200e_kmalloc(int size, unsigned int __nocast flags)
182{ 182{
183 void* chunk = kmalloc(size, flags); 183 void *chunk = kzalloc(size, flags);
184 184
185 if (chunk) 185 if (!chunk)
186 memset(chunk, 0x00, size); 186 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
187 else
188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
189 187
190 return chunk; 188 return chunk;
191} 189}
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 95a976c96eb8..70458cb061c6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
47MODULE_PARM_DESC(debug, "Enable debug output"); 47MODULE_PARM_DESC(debug, "Enable debug output");
48 48
49module_param_named(cards_limit, drm_cards_limit, int, 0444); 49module_param_named(cards_limit, drm_cards_limit, int, 0444);
50module_param_named(debug, drm_debug, int, 0666); 50module_param_named(debug, drm_debug, int, 0600);
51 51
52drm_head_t **drm_heads; 52drm_head_t **drm_heads;
53struct drm_sysfs_class *drm_class; 53struct drm_sysfs_class *drm_class;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index bb0b3a8de14b..1422285d537c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -69,7 +69,8 @@ int cn_already_initialized = 0;
69 * a new message. 69 * a new message.
70 * 70 *
71 */ 71 */
72int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) 72int cn_netlink_send(struct cn_msg *msg, u32 __group,
73 unsigned int __nocast gfp_mask)
73{ 74{
74 struct cn_callback_entry *__cbq; 75 struct cn_callback_entry *__cbq;
75 unsigned int size; 76 unsigned int size;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ffbcd40418d5..23a3f56c7899 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -503,6 +503,25 @@ err_free_aux:
503 return err; 503 return err;
504} 504}
505 505
506static void mthca_free_icms(struct mthca_dev *mdev)
507{
508 u8 status;
509
510 mthca_free_icm_table(mdev, mdev->mcg_table.table);
511 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
512 mthca_free_icm_table(mdev, mdev->srq_table.table);
513 mthca_free_icm_table(mdev, mdev->cq_table.table);
514 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
515 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
516 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
517 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
518 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
519 mthca_unmap_eq_icm(mdev);
520
521 mthca_UNMAP_ICM_AUX(mdev, &status);
522 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
523}
524
506static int __devinit mthca_init_arbel(struct mthca_dev *mdev) 525static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
507{ 526{
508 struct mthca_dev_lim dev_lim; 527 struct mthca_dev_lim dev_lim;
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
580 return 0; 599 return 0;
581 600
582err_free_icm: 601err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 602 mthca_free_icms(mdev);
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
585 mthca_free_icm_table(mdev, mdev->cq_table.table);
586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
588 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
589 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
590 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
591 mthca_unmap_eq_icm(mdev);
592
593 mthca_UNMAP_ICM_AUX(mdev, &status);
594 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
595 603
596err_stop_fw: 604err_stop_fw:
597 mthca_UNMAP_FA(mdev, &status); 605 mthca_UNMAP_FA(mdev, &status);
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
611 mthca_CLOSE_HCA(mdev, 0, &status); 619 mthca_CLOSE_HCA(mdev, 0, &status);
612 620
613 if (mthca_is_memfree(mdev)) { 621 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 622 mthca_free_icms(mdev);
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626 623
627 mthca_UNMAP_FA(mdev, &status); 624 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 625 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 704f48e0b6a7..6c5bf07489f4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -474,7 +474,7 @@ err:
474 spin_unlock(&priv->lock); 474 spin_unlock(&priv->lock);
475} 475}
476 476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev) 477static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
478{ 478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480 480
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
569 569
570 if (skb->dst && skb->dst->neighbour) { 570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev); 572 ipoib_path_lookup(skb, dev);
573 goto out; 573 goto out;
574 } 574 }
575 575
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 10f6ce1bc0ab..612564ac6f7b 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void)
642module_init(ucb1x00_init); 642module_init(ucb1x00_init);
643module_exit(ucb1x00_exit); 643module_exit(ucb1x00_exit);
644 644
645EXPORT_SYMBOL(ucb1x00_class);
646
647EXPORT_SYMBOL(ucb1x00_io_set_dir); 645EXPORT_SYMBOL(ucb1x00_io_set_dir);
648EXPORT_SYMBOL(ucb1x00_io_write); 646EXPORT_SYMBOL(ucb1x00_io_write);
649EXPORT_SYMBOL(ucb1x00_io_read); 647EXPORT_SYMBOL(ucb1x00_io_read);
diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h
index 6b632644f59a..9c9a647d8b7b 100644
--- a/drivers/mfd/ucb1x00.h
+++ b/drivers/mfd/ucb1x00.h
@@ -106,8 +106,6 @@ struct ucb1x00_irq {
106 void (*fn)(int, void *); 106 void (*fn)(int, void *);
107}; 107};
108 108
109extern struct class ucb1x00_class;
110
111struct ucb1x00 { 109struct ucb1x00 {
112 spinlock_t lock; 110 spinlock_t lock;
113 struct mcp *mcp; 111 struct mcp *mcp;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a908c4690a7..c748b0e16419 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1655,7 +1655,7 @@ config LAN_SAA9730
1655 1655
1656config NET_POCKET 1656config NET_POCKET
1657 bool "Pocket and portable adapters" 1657 bool "Pocket and portable adapters"
1658 depends on NET_ETHERNET && ISA 1658 depends on NET_ETHERNET && PARPORT
1659 ---help--- 1659 ---help---
1660 Cute little network (Ethernet) devices which attach to the parallel 1660 Cute little network (Ethernet) devices which attach to the parallel
1661 port ("pocket adapters"), commonly used with laptops. If you have 1661 port ("pocket adapters"), commonly used with laptops. If you have
@@ -1679,7 +1679,7 @@ config NET_POCKET
1679 1679
1680config ATP 1680config ATP
1681 tristate "AT-LAN-TEC/RealTek pocket adapter support" 1681 tristate "AT-LAN-TEC/RealTek pocket adapter support"
1682 depends on NET_POCKET && ISA && X86 1682 depends on NET_POCKET && PARPORT && X86
1683 select CRC32 1683 select CRC32
1684 ---help--- 1684 ---help---
1685 This is a network (Ethernet) device which attaches to your parallel 1685 This is a network (Ethernet) device which attaches to your parallel
@@ -1694,7 +1694,7 @@ config ATP
1694 1694
1695config DE600 1695config DE600
1696 tristate "D-Link DE600 pocket adapter support" 1696 tristate "D-Link DE600 pocket adapter support"
1697 depends on NET_POCKET && ISA 1697 depends on NET_POCKET && PARPORT
1698 ---help--- 1698 ---help---
1699 This is a network (Ethernet) device which attaches to your parallel 1699 This is a network (Ethernet) device which attaches to your parallel
1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the
@@ -1709,7 +1709,7 @@ config DE600
1709 1709
1710config DE620 1710config DE620
1711 tristate "D-Link DE620 pocket adapter support" 1711 tristate "D-Link DE620 pocket adapter support"
1712 depends on NET_POCKET && ISA 1712 depends on NET_POCKET && PARPORT
1713 ---help--- 1713 ---help---
1714 This is a network (Ethernet) device which attaches to your parallel 1714 This is a network (Ethernet) device which attaches to your parallel
1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bf81cd45e4d4..f0a5b772a386 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -487,6 +487,8 @@
487 * * Added xmit_hash_policy_layer34() 487 * * Added xmit_hash_policy_layer34()
488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4. 488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
489 * Set version to 2.6.3. 489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
490 */ 492 */
491 493
492//#define BONDING_DEBUG 1 494//#define BONDING_DEBUG 1
@@ -595,14 +597,7 @@ static int arp_ip_count = 0;
595static int bond_mode = BOND_MODE_ROUNDROBIN; 597static int bond_mode = BOND_MODE_ROUNDROBIN;
596static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
597static int lacp_fast = 0; 599static int lacp_fast = 0;
598static int app_abi_ver = 0; 600
599static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
600 * we receive from the application. Once set,
601 * it won't be changed, and the module will
602 * refuse to enslave/release interfaces if the
603 * command comes from an application using
604 * another ABI version.
605 */
606struct bond_parm_tbl { 601struct bond_parm_tbl {
607 char *modename; 602 char *modename;
608 int mode; 603 int mode;
@@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond)
1294/* 1289/*
1295 * Copy all the Multicast addresses from src to the bonding device dst 1290 * Copy all the Multicast addresses from src to the bonding device dst
1296 */ 1291 */
1297static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) 1292static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
1293 unsigned int __nocast gfp_flag)
1298{ 1294{
1299 struct dev_mc_list *dmi, *new_dmi; 1295 struct dev_mc_list *dmi, *new_dmi;
1300 1296
1301 for (dmi = mc_list; dmi; dmi = dmi->next) { 1297 for (dmi = mc_list; dmi; dmi = dmi->next) {
1302 new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); 1298 new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
1303 1299
1304 if (!new_dmi) { 1300 if (!new_dmi) {
1305 /* FIXME: Potential memory leak !!! */ 1301 /* FIXME: Potential memory leak !!! */
@@ -1702,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1702 } 1698 }
1703 } 1699 }
1704 1700
1705 if (app_abi_ver >= 1) { 1701 /*
1706 /* The application is using an ABI, which requires the 1702 * Old ifenslave binaries are no longer supported. These can
1707 * slave interface to be closed. 1703 * be identified with moderate accurary by the state of the slave:
1708 */ 1704 * the current ifenslave will set the interface down prior to
1709 if ((slave_dev->flags & IFF_UP)) { 1705 * enslaving it; the old ifenslave will not.
1710 printk(KERN_ERR DRV_NAME 1706 */
1711 ": Error: %s is up\n", 1707 if ((slave_dev->flags & IFF_UP)) {
1712 slave_dev->name); 1708 printk(KERN_ERR DRV_NAME ": %s is up. "
1713 res = -EPERM; 1709 "This may be due to an out of date ifenslave.\n",
1714 goto err_undo_flags; 1710 slave_dev->name);
1715 } 1711 res = -EPERM;
1716 1712 goto err_undo_flags;
1717 if (slave_dev->set_mac_address == NULL) { 1713 }
1718 printk(KERN_ERR DRV_NAME
1719 ": Error: The slave device you specified does "
1720 "not support setting the MAC address.\n");
1721 printk(KERN_ERR
1722 "Your kernel likely does not support slave "
1723 "devices.\n");
1724 1714
1725 res = -EOPNOTSUPP; 1715 if (slave_dev->set_mac_address == NULL) {
1726 goto err_undo_flags; 1716 printk(KERN_ERR DRV_NAME
1727 } 1717 ": Error: The slave device you specified does "
1728 } else { 1718 "not support setting the MAC address.\n");
1729 /* The application is not using an ABI, which requires the 1719 printk(KERN_ERR
1730 * slave interface to be open. 1720 "Your kernel likely does not support slave devices.\n");
1731 */
1732 if (!(slave_dev->flags & IFF_UP)) {
1733 printk(KERN_ERR DRV_NAME
1734 ": Error: %s is not running\n",
1735 slave_dev->name);
1736 res = -EINVAL;
1737 goto err_undo_flags;
1738 }
1739 1721
1740 if ((bond->params.mode == BOND_MODE_8023AD) || 1722 res = -EOPNOTSUPP;
1741 (bond->params.mode == BOND_MODE_TLB) || 1723 goto err_undo_flags;
1742 (bond->params.mode == BOND_MODE_ALB)) {
1743 printk(KERN_ERR DRV_NAME
1744 ": Error: to use %s mode, you must upgrade "
1745 "ifenslave.\n",
1746 bond_mode_name(bond->params.mode));
1747 res = -EOPNOTSUPP;
1748 goto err_undo_flags;
1749 }
1750 } 1724 }
1751 1725
1752 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); 1726 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1762,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1762 */ 1736 */
1763 new_slave->original_flags = slave_dev->flags; 1737 new_slave->original_flags = slave_dev->flags;
1764 1738
1765 if (app_abi_ver >= 1) { 1739 /*
1766 /* save slave's original ("permanent") mac address for 1740 * Save slave's original ("permanent") mac address for modes
1767 * modes that needs it, and for restoring it upon release, 1741 * that need it, and for restoring it upon release, and then
1768 * and then set it to the master's address 1742 * set it to the master's address
1769 */ 1743 */
1770 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1744 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1771 1745
1772 /* set slave to master's mac address 1746 /*
1773 * The application already set the master's 1747 * Set slave to master's mac address. The application already
1774 * mac address to that of the first slave 1748 * set the master's mac address to that of the first slave
1775 */ 1749 */
1776 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); 1750 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
1777 addr.sa_family = slave_dev->type; 1751 addr.sa_family = slave_dev->type;
1778 res = dev_set_mac_address(slave_dev, &addr); 1752 res = dev_set_mac_address(slave_dev, &addr);
1779 if (res) { 1753 if (res) {
1780 dprintk("Error %d calling set_mac_address\n", res); 1754 dprintk("Error %d calling set_mac_address\n", res);
1781 goto err_free; 1755 goto err_free;
1782 } 1756 }
1783 1757
1784 /* open the slave since the application closed it */ 1758 /* open the slave since the application closed it */
1785 res = dev_open(slave_dev); 1759 res = dev_open(slave_dev);
1786 if (res) { 1760 if (res) {
1787 dprintk("Openning slave %s failed\n", slave_dev->name); 1761 dprintk("Openning slave %s failed\n", slave_dev->name);
1788 goto err_restore_mac; 1762 goto err_restore_mac;
1789 }
1790 } 1763 }
1791 1764
1792 res = netdev_set_master(slave_dev, bond_dev); 1765 res = netdev_set_master(slave_dev, bond_dev);
1793 if (res) { 1766 if (res) {
1794 dprintk("Error %d calling netdev_set_master\n", res); 1767 dprintk("Error %d calling netdev_set_master\n", res);
1795 if (app_abi_ver < 1) { 1768 goto err_close;
1796 goto err_free;
1797 } else {
1798 goto err_close;
1799 }
1800 } 1769 }
1801 1770
1802 new_slave->dev = slave_dev; 1771 new_slave->dev = slave_dev;
@@ -1997,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1997 1966
1998 write_unlock_bh(&bond->lock); 1967 write_unlock_bh(&bond->lock);
1999 1968
2000 if (app_abi_ver < 1) {
2001 /*
2002 * !!! This is to support old versions of ifenslave.
2003 * We can remove this in 2.5 because our ifenslave takes
2004 * care of this for us.
2005 * We check to see if the master has a mac address yet.
2006 * If not, we'll give it the mac address of our slave device.
2007 */
2008 int ndx = 0;
2009
2010 for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
2011 dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
2012 ndx);
2013 if (bond_dev->dev_addr[ndx] != 0) {
2014 dprintk("Found non-zero byte at ndx=%d\n",
2015 ndx);
2016 break;
2017 }
2018 }
2019
2020 if (ndx == bond_dev->addr_len) {
2021 /*
2022 * We got all the way through the address and it was
2023 * all 0's.
2024 */
2025 dprintk("%s doesn't have a MAC address yet. \n",
2026 bond_dev->name);
2027 dprintk("Going to give assign it from %s.\n",
2028 slave_dev->name);
2029 bond_sethwaddr(bond_dev, slave_dev);
2030 }
2031 }
2032
2033 printk(KERN_INFO DRV_NAME 1969 printk(KERN_INFO DRV_NAME
2034 ": %s: enslaving %s as a%s interface with a%s link.\n", 1970 ": %s: enslaving %s as a%s interface with a%s link.\n",
2035 bond_dev->name, slave_dev->name, 1971 bond_dev->name, slave_dev->name,
@@ -2227,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2227 /* close slave before restoring its mac address */ 2163 /* close slave before restoring its mac address */
2228 dev_close(slave_dev); 2164 dev_close(slave_dev);
2229 2165
2230 if (app_abi_ver >= 1) { 2166 /* restore original ("permanent") mac address */
2231 /* restore original ("permanent") mac address */ 2167 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2232 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2168 addr.sa_family = slave_dev->type;
2233 addr.sa_family = slave_dev->type; 2169 dev_set_mac_address(slave_dev, &addr);
2234 dev_set_mac_address(slave_dev, &addr);
2235 }
2236 2170
2237 /* restore the original state of the 2171 /* restore the original state of the
2238 * IFF_NOARP flag that might have been 2172 * IFF_NOARP flag that might have been
@@ -2320,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev)
2320 /* close slave before restoring its mac address */ 2254 /* close slave before restoring its mac address */
2321 dev_close(slave_dev); 2255 dev_close(slave_dev);
2322 2256
2323 if (app_abi_ver >= 1) { 2257 /* restore original ("permanent") mac address*/
2324 /* restore original ("permanent") mac address*/ 2258 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2325 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2259 addr.sa_family = slave_dev->type;
2326 addr.sa_family = slave_dev->type; 2260 dev_set_mac_address(slave_dev, &addr);
2327 dev_set_mac_address(slave_dev, &addr);
2328 }
2329 2261
2330 /* restore the original state of the IFF_NOARP flag that might have 2262 /* restore the original state of the IFF_NOARP flag that might have
2331 * been set by bond_set_slave_inactive_flags() 2263 * been set by bond_set_slave_inactive_flags()
@@ -2423,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2423 return res; 2355 return res;
2424} 2356}
2425 2357
2426static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
2427{
2428 struct ethtool_drvinfo info;
2429 void __user *addr = ifr->ifr_data;
2430 uint32_t cmd;
2431
2432 if (get_user(cmd, (uint32_t __user *)addr)) {
2433 return -EFAULT;
2434 }
2435
2436 switch (cmd) {
2437 case ETHTOOL_GDRVINFO:
2438 if (copy_from_user(&info, addr, sizeof(info))) {
2439 return -EFAULT;
2440 }
2441
2442 if (strcmp(info.driver, "ifenslave") == 0) {
2443 int new_abi_ver;
2444 char *endptr;
2445
2446 new_abi_ver = simple_strtoul(info.fw_version,
2447 &endptr, 0);
2448 if (*endptr) {
2449 printk(KERN_ERR DRV_NAME
2450 ": Error: got invalid ABI "
2451 "version from application\n");
2452
2453 return -EINVAL;
2454 }
2455
2456 if (orig_app_abi_ver == -1) {
2457 orig_app_abi_ver = new_abi_ver;
2458 }
2459
2460 app_abi_ver = new_abi_ver;
2461 }
2462
2463 strncpy(info.driver, DRV_NAME, 32);
2464 strncpy(info.version, DRV_VERSION, 32);
2465 snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
2466
2467 if (copy_to_user(addr, &info, sizeof(info))) {
2468 return -EFAULT;
2469 }
2470
2471 return 0;
2472 default:
2473 return -EOPNOTSUPP;
2474 }
2475}
2476
2477static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2358static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2478{ 2359{
2479 struct bonding *bond = bond_dev->priv; 2360 struct bonding *bond = bond_dev->priv;
@@ -3442,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
3442 seq_printf(seq, "Link Failure Count: %d\n", 3323 seq_printf(seq, "Link Failure Count: %d\n",
3443 slave->link_failure_count); 3324 slave->link_failure_count);
3444 3325
3445 if (app_abi_ver >= 1) { 3326 seq_printf(seq,
3446 seq_printf(seq, 3327 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
3447 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", 3328 slave->perm_hwaddr[0], slave->perm_hwaddr[1],
3448 slave->perm_hwaddr[0], 3329 slave->perm_hwaddr[2], slave->perm_hwaddr[3],
3449 slave->perm_hwaddr[1], 3330 slave->perm_hwaddr[4], slave->perm_hwaddr[5]);
3450 slave->perm_hwaddr[2],
3451 slave->perm_hwaddr[3],
3452 slave->perm_hwaddr[4],
3453 slave->perm_hwaddr[5]);
3454 }
3455 3331
3456 if (bond->params.mode == BOND_MODE_8023AD) { 3332 if (bond->params.mode == BOND_MODE_8023AD) {
3457 const struct aggregator *agg 3333 const struct aggregator *agg
@@ -4010,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4010 struct ifslave k_sinfo; 3886 struct ifslave k_sinfo;
4011 struct ifslave __user *u_sinfo = NULL; 3887 struct ifslave __user *u_sinfo = NULL;
4012 struct mii_ioctl_data *mii = NULL; 3888 struct mii_ioctl_data *mii = NULL;
4013 int prev_abi_ver = orig_app_abi_ver;
4014 int res = 0; 3889 int res = 0;
4015 3890
4016 dprintk("bond_ioctl: master=%s, cmd=%d\n", 3891 dprintk("bond_ioctl: master=%s, cmd=%d\n",
4017 bond_dev->name, cmd); 3892 bond_dev->name, cmd);
4018 3893
4019 switch (cmd) { 3894 switch (cmd) {
4020 case SIOCETHTOOL:
4021 return bond_ethtool_ioctl(bond_dev, ifr);
4022 case SIOCGMIIPHY: 3895 case SIOCGMIIPHY:
4023 mii = if_mii(ifr); 3896 mii = if_mii(ifr);
4024 if (!mii) { 3897 if (!mii) {
@@ -4090,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4090 return -EPERM; 3963 return -EPERM;
4091 } 3964 }
4092 3965
4093 if (orig_app_abi_ver == -1) {
4094 /* no orig_app_abi_ver was provided yet, so we'll use the
4095 * current one from now on, even if it's 0
4096 */
4097 orig_app_abi_ver = app_abi_ver;
4098
4099 } else if (orig_app_abi_ver != app_abi_ver) {
4100 printk(KERN_ERR DRV_NAME
4101 ": Error: already using ifenslave ABI version %d; to "
4102 "upgrade ifenslave to version %d, you must first "
4103 "reload bonding.\n",
4104 orig_app_abi_ver, app_abi_ver);
4105 return -EINVAL;
4106 }
4107
4108 slave_dev = dev_get_by_name(ifr->ifr_slave); 3966 slave_dev = dev_get_by_name(ifr->ifr_slave);
4109 3967
4110 dprintk("slave_dev=%p: \n", slave_dev); 3968 dprintk("slave_dev=%p: \n", slave_dev);
@@ -4137,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4137 dev_put(slave_dev); 3995 dev_put(slave_dev);
4138 } 3996 }
4139 3997
4140 if (res < 0) {
4141 /* The ioctl failed, so there's no point in changing the
4142 * orig_app_abi_ver. We'll restore it's value just in case
4143 * we've changed it earlier in this function.
4144 */
4145 orig_app_abi_ver = prev_abi_ver;
4146 }
4147
4148 return res; 3998 return res;
4149} 3999}
4150 4000
@@ -4578,9 +4428,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4578 } 4428 }
4579} 4429}
4580 4430
4431static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4432 struct ethtool_drvinfo *drvinfo)
4433{
4434 strncpy(drvinfo->driver, DRV_NAME, 32);
4435 strncpy(drvinfo->version, DRV_VERSION, 32);
4436 snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
4437}
4438
4581static struct ethtool_ops bond_ethtool_ops = { 4439static struct ethtool_ops bond_ethtool_ops = {
4582 .get_tx_csum = ethtool_op_get_tx_csum, 4440 .get_tx_csum = ethtool_op_get_tx_csum,
4583 .get_sg = ethtool_op_get_sg, 4441 .get_sg = ethtool_op_get_sg,
4442 .get_drvinfo = bond_ethtool_get_drvinfo,
4584}; 4443};
4585 4444
4586/* 4445/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 388196980862..bbf9da8af624 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -40,8 +40,8 @@
40#include "bond_3ad.h" 40#include "bond_3ad.h"
41#include "bond_alb.h" 41#include "bond_alb.h"
42 42
43#define DRV_VERSION "2.6.3" 43#define DRV_VERSION "2.6.4"
44#define DRV_RELDATE "June 8, 2005" 44#define DRV_RELDATE "September 26, 2005"
45#define DRV_NAME "bonding" 45#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 47
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 0de3bb906174..14e9b6315f20 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1875 rc = -ENODEV; 1875 rc = -ENODEV;
1876 goto bail; 1876 goto bail;
1877 } 1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1878 1881
1879 /* Setup initial PHY config & startup aneg */ 1882 /* Setup initial PHY config & startup aneg */
1880 if (ep->phy_mii.def->ops->init) 1883 if (ep->phy_mii.def->ops->init)
@@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1882 netif_carrier_off(ndev); 1885 netif_carrier_off(ndev);
1883 if (ep->phy_mii.def->features & SUPPORTED_Autoneg) 1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1884 ep->want_autoneg = 1; 1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890
1891 /* Select highest supported speed/duplex */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) {
1893 ep->phy_mii.speed = SPEED_1000;
1894 ep->phy_mii.duplex = DUPLEX_FULL;
1895 } else if (ep->phy_mii.def->features &
1896 SUPPORTED_1000baseT_Half) {
1897 ep->phy_mii.speed = SPEED_1000;
1898 ep->phy_mii.duplex = DUPLEX_HALF;
1899 } else if (ep->phy_mii.def->features &
1900 SUPPORTED_100baseT_Full) {
1901 ep->phy_mii.speed = SPEED_100;
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else {
1912 ep->phy_mii.speed = SPEED_10;
1913 ep->phy_mii.duplex = DUPLEX_HALF;
1914 }
1915 }
1885 emac_start_link(ep, NULL); 1916 emac_start_link(ep, NULL);
1886 1917
1887 /* read the MAC Address */ 1918 /* read the MAC Address */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e64df4d0800b..83334db2921c 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
584 return 0; 584 return 0;
585} 585}
586 586
587static inline int rx_refill(struct net_device *ndev, int gfp) 587static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
588{ 588{
589 struct ns83820 *dev = PRIV(ndev); 589 struct ns83820 *dev = PRIV(ndev);
590 unsigned i; 590 unsigned i;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index d652e1eddb45..c7cca842e5ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
1832{ 1832{
1833 struct dev_mc_list *mc_addr; 1833 struct dev_mc_list *mc_addr;
1834 1834
1835 for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { 1835 for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
1836 u_int position = ether_crc(6, mc_addr->dmi_addr); 1836 u_int position = ether_crc(6, mc_addr->dmi_addr);
1837#ifndef final_version /* Verify multicast address. */ 1837#ifndef final_version /* Verify multicast address. */
1838 if ((mc_addr->dmi_addr[0] & 1) == 0) 1838 if ((mc_addr->dmi_addr[0] & 1) == 0)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index fd398da4993b..c2e6484ef138 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2837,21 +2837,29 @@ static void skge_netpoll(struct net_device *dev)
2837static int skge_set_mac_address(struct net_device *dev, void *p) 2837static int skge_set_mac_address(struct net_device *dev, void *p)
2838{ 2838{
2839 struct skge_port *skge = netdev_priv(dev); 2839 struct skge_port *skge = netdev_priv(dev);
2840 struct sockaddr *addr = p; 2840 struct skge_hw *hw = skge->hw;
2841 int err = 0; 2841 unsigned port = skge->port;
2842 const struct sockaddr *addr = p;
2842 2843
2843 if (!is_valid_ether_addr(addr->sa_data)) 2844 if (!is_valid_ether_addr(addr->sa_data))
2844 return -EADDRNOTAVAIL; 2845 return -EADDRNOTAVAIL;
2845 2846
2846 skge_down(dev); 2847 spin_lock_bh(&hw->phy_lock);
2847 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2848 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2848 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, 2849 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2849 dev->dev_addr, ETH_ALEN); 2850 dev->dev_addr, ETH_ALEN);
2850 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, 2851 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2851 dev->dev_addr, ETH_ALEN); 2852 dev->dev_addr, ETH_ALEN);
2852 if (dev->flags & IFF_UP) 2853
2853 err = skge_up(dev); 2854 if (hw->chip_id == CHIP_ID_GENESIS)
2854 return err; 2855 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2856 else {
2857 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2858 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2859 }
2860 spin_unlock_bh(&hw->phy_lock);
2861
2862 return 0;
2855} 2863}
2856 2864
2857static const struct { 2865static const struct {
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 88b89dc95c77..efdb179ecc8c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -133,14 +133,18 @@
133 - finally added firmware (GPL'ed by Adaptec) 133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x 134 - removed compatibility code for 2.2.x
135 135
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
139
136TODO: - fix forced speed/duplexing code (broken a long time ago, when 140TODO: - fix forced speed/duplexing code (broken a long time ago, when
137 somebody converted the driver to use the generic MII code) 141 somebody converted the driver to use the generic MII code)
138 - fix VLAN support 142 - fix VLAN support
139*/ 143*/
140 144
141#define DRV_NAME "starfire" 145#define DRV_NAME "starfire"
142#define DRV_VERSION "1.03+LK1.4.2" 146#define DRV_VERSION "1.03+LK1.4.2.1"
143#define DRV_RELDATE "January 19, 2005" 147#define DRV_RELDATE "October 3, 2005"
144 148
145#include <linux/config.h> 149#include <linux/config.h>
146#include <linux/version.h> 150#include <linux/version.h>
@@ -165,6 +169,14 @@ TODO: - fix forced speed/duplexing code (broken a long time ago, when
165 * of length 1. If and when this is fixed, the #define below can be removed. 169 * of length 1. If and when this is fixed, the #define below can be removed.
166 */ 170 */
167#define HAS_BROKEN_FIRMWARE 171#define HAS_BROKEN_FIRMWARE
172
173/*
174 * If using the broken firmware, data must be padded to the next 32-bit boundary.
175 */
176#ifdef HAS_BROKEN_FIRMWARE
177#define PADDING_MASK 3
178#endif
179
168/* 180/*
169 * Define this if using the driver with the zero-copy patch 181 * Define this if using the driver with the zero-copy patch
170 */ 182 */
@@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, };
257 * This SUCKS. 269 * This SUCKS.
258 * We need a much better method to determine if dma_addr_t is 64-bit. 270 * We need a much better method to determine if dma_addr_t is 64-bit.
259 */ 271 */
260#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) 272#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
261/* 64-bit dma_addr_t */ 273/* 64-bit dma_addr_t */
262#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 274#define ADDR_64BITS /* This chip uses 64 bit addresses. */
275#define netdrv_addr_t u64
263#define cpu_to_dma(x) cpu_to_le64(x) 276#define cpu_to_dma(x) cpu_to_le64(x)
264#define dma_to_cpu(x) le64_to_cpu(x) 277#define dma_to_cpu(x) le64_to_cpu(x)
265#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 278#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
@@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, };
268#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 281#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
269#define RX_DESC_ADDR_SIZE RxDescAddr64bit 282#define RX_DESC_ADDR_SIZE RxDescAddr64bit
270#else /* 32-bit dma_addr_t */ 283#else /* 32-bit dma_addr_t */
284#define netdrv_addr_t u32
271#define cpu_to_dma(x) cpu_to_le32(x) 285#define cpu_to_dma(x) cpu_to_le32(x)
272#define dma_to_cpu(x) le32_to_cpu(x) 286#define dma_to_cpu(x) le32_to_cpu(x)
273#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 287#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
@@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1333 } 1347 }
1334 1348
1335#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1349#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1336 { 1350 if (skb->ip_summed == CHECKSUM_HW) {
1337 int has_bad_length = 0; 1351 skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
1338 1352 if (skb == NULL)
1339 if (skb_first_frag_len(skb) == 1) 1353 return NETDEV_TX_OK;
1340 has_bad_length = 1;
1341 else {
1342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1343 if (skb_shinfo(skb)->frags[i].size == 1) {
1344 has_bad_length = 1;
1345 break;
1346 }
1347 }
1348
1349 if (has_bad_length)
1350 skb_checksum_help(skb, 0);
1351 } 1354 }
1352#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1355#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1353 1356
@@ -2127,13 +2130,12 @@ static int __init starfire_init (void)
2127#endif 2130#endif
2128#endif 2131#endif
2129 2132
2130#ifndef ADDR_64BITS
2131 /* we can do this test only at run-time... sigh */ 2133 /* we can do this test only at run-time... sigh */
2132 if (sizeof(dma_addr_t) == sizeof(u64)) { 2134 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2133 printk("This driver has not been ported to this 64-bit architecture yet\n"); 2135 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2134 return -ENODEV; 2136 return -ENODEV;
2135 } 2137 }
2136#endif /* not ADDR_64BITS */ 2138
2137 return pci_module_init (&starfire_driver); 2139 return pci_module_init (&starfire_driver);
2138} 2140}
2139 2141
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index ff8ae5f79970..16edbb1a4a7a 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1035,7 +1035,8 @@ struct gem {
1035 1035
1036#define ALIGNED_RX_SKB_ADDR(addr) \ 1036#define ALIGNED_RX_SKB_ADDR(addr) \
1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
1038static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags) 1038static __inline__ struct sk_buff *gem_alloc_skb(int size,
1039 unsigned int __nocast gfp_flags)
1039{ 1040{
1040 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
1041 1042
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index e7b001017b9a..32057e65808b 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
531 if (!time_after(jiffies, timeout)) continue; 531 if (!time_after(jiffies, timeout)) continue;
532 DPRINTK( "Hardware timeout during initialization.\n"); 532 DPRINTK( "Hardware timeout during initialization.\n");
533 iounmap(t_mmio); 533 iounmap(t_mmio);
534 kfree(ti);
535 return -ENODEV; 534 return -ENODEV;
536 } 535 }
537 ti->sram_phys = 536 ti->sram_phys =
@@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
645 DPRINTK("Unknown shared ram paging info %01X\n", 644 DPRINTK("Unknown shared ram paging info %01X\n",
646 ti->shared_ram_paging); 645 ti->shared_ram_paging);
647 iounmap(t_mmio); 646 iounmap(t_mmio);
648 kfree(ti);
649 return -ENODEV; 647 return -ENODEV;
650 break; 648 break;
651 } /*end switch shared_ram_paging */ 649 } /*end switch shared_ram_paging */
@@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
675 "driver limit (%05x), adapter not started.\n", 673 "driver limit (%05x), adapter not started.\n",
676 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); 674 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
677 iounmap(t_mmio); 675 iounmap(t_mmio);
678 kfree(ti);
679 return -ENODEV; 676 return -ENODEV;
680 } else { /* seems cool, record what we have figured out */ 677 } else { /* seems cool, record what we have figured out */
681 ti->sram_base = new_base >> 12; 678 ti->sram_base = new_base >> 12;
@@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
690 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", 687 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
691 irq); 688 irq);
692 iounmap(t_mmio); 689 iounmap(t_mmio);
693 kfree(ti);
694 return -ENODEV; 690 return -ENODEV;
695 } 691 }
696 /*?? Now, allocate some of the PIO PORTs for this driver.. */ 692 /*?? Now, allocate some of the PIO PORTs for this driver.. */
@@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
699 DPRINTK("Could not grab PIO range. Halting driver.\n"); 695 DPRINTK("Could not grab PIO range. Halting driver.\n");
700 free_irq(dev->irq, dev); 696 free_irq(dev->irq, dev);
701 iounmap(t_mmio); 697 iounmap(t_mmio);
702 kfree(ti);
703 return -EBUSY; 698 return -EBUSY;
704 } 699 }
705 700
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 5db694c4eb02..683f14b01c06 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
172 int i; 172 int i;
173 for (i = 0; i < tp->mtable->leafcount; i++) 173 for (i = 0; i < tp->mtable->leafcount; i++)
174 if (tp->mtable->mleaf[i].media == dev->if_port) { 174 if (tp->mtable->mleaf[i].media == dev->if_port) {
175 int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65)); 175 int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
176 tp->cur_index = i; 176 tp->cur_index = i;
177 tulip_select_media(dev, startup); 177 tulip_select_media(dev, startup);
178 setup_done = 1; 178 setup_done = 1;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 6deb7cc810cc..cf3daaa1b369 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -503,9 +503,14 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
503 return 0; 503 return 0;
504 } 504 }
505 505
506 /* Length of the packet body */ 506 /* Check packet length, pad short packets, round up odd length */
507 /* FIXME: what if the skb is smaller than this? */ 507 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
508 len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); 508 if (skb->len < len) {
509 skb = skb_padto(skb, len);
510 if (skb == NULL)
511 goto fail;
512 }
513 len -= ETH_HLEN;
509 514
510 eh = (struct ethhdr *)skb->data; 515 eh = (struct ethhdr *)skb->data;
511 516
@@ -557,8 +562,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
557 p = skb->data; 562 p = skb->data;
558 } 563 }
559 564
560 /* Round up for odd length packets */ 565 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len,
561 err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
562 txfid, data_off); 566 txfid, data_off);
563 if (err) { 567 if (err) {
564 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 568 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 2ad4797ce024..9963479ba89f 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -686,6 +686,7 @@ struct qeth_seqno {
686 __u32 pdu_hdr; 686 __u32 pdu_hdr;
687 __u32 pdu_hdr_ack; 687 __u32 pdu_hdr_ack;
688 __u16 ipa; 688 __u16 ipa;
689 __u32 pkt_seqno;
689}; 690};
690 691
691struct qeth_reply { 692struct qeth_reply {
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
848 "on interface %s", QETH_CARD_IFNAME(card)); 849 "on interface %s", QETH_CARD_IFNAME(card));
849 return -ENOMEM; 850 return -ENOMEM;
850 } 851 }
852 kfree_skb(*skb);
851 *skb = new_skb; 853 *skb = new_skb;
852 } 854 }
853 return 0; 855 return 0;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 71de834ece1a..bd28e2438d7f 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -511,7 +511,7 @@ static int
511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) 511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
512{ 512{
513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; 513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
514 int rc = 0; 514 int rc = 0, rc2 = 0, rc3 = 0;
515 enum qeth_card_states recover_flag; 515 enum qeth_card_states recover_flag;
516 516
517 QETH_DBF_TEXT(setup, 3, "setoffl"); 517 QETH_DBF_TEXT(setup, 3, "setoffl");
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
523 CARD_BUS_ID(card)); 523 CARD_BUS_ID(card));
524 return -ERESTARTSYS; 524 return -ERESTARTSYS;
525 } 525 }
526 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || 526 rc = ccw_device_set_offline(CARD_DDEV(card));
527 (rc = ccw_device_set_offline(CARD_WDEV(card))) || 527 rc2 = ccw_device_set_offline(CARD_WDEV(card));
528 (rc = ccw_device_set_offline(CARD_RDEV(card)))) { 528 rc3 = ccw_device_set_offline(CARD_RDEV(card));
529 if (!rc)
530 rc = (rc2) ? rc2 : rc3;
531 if (rc)
529 QETH_DBF_TEXT_(setup, 2, "1err%d", rc); 532 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
530 }
531 if (recover_flag == CARD_STATE_UP) 533 if (recover_flag == CARD_STATE_UP)
532 card->state = CARD_STATE_RECOVER; 534 card->state = CARD_STATE_RECOVER;
533 qeth_notify_processes(); 535 qeth_notify_processes();
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card)
1046 spin_lock_init(&card->vlanlock); 1048 spin_lock_init(&card->vlanlock);
1047 card->vlangrp = NULL; 1049 card->vlangrp = NULL;
1048#endif 1050#endif
1051 spin_lock_init(&card->lock);
1049 spin_lock_init(&card->ip_lock); 1052 spin_lock_init(&card->ip_lock);
1050 spin_lock_init(&card->thread_mask_lock); 1053 spin_lock_init(&card->thread_mask_lock);
1051 card->thread_start_mask = 0; 1054 card->thread_start_mask = 0;
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data)
1626 spin_unlock_irqrestore(&reply->card->lock, flags); 1629 spin_unlock_irqrestore(&reply->card->lock, flags);
1627} 1630}
1628 1631
1629static void
1630qeth_reset_ip_addresses(struct qeth_card *card)
1631{
1632 QETH_DBF_TEXT(trace, 2, "rstipadd");
1633
1634 qeth_clear_ip_list(card, 0, 1);
1635 /* this function will also schedule the SET_IP_THREAD */
1636 qeth_set_multicast_list(card->dev);
1637}
1638
1639static struct qeth_ipa_cmd * 1632static struct qeth_ipa_cmd *
1640qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1633qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1641{ 1634{
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1664 "IP address reset.\n", 1657 "IP address reset.\n",
1665 QETH_CARD_IFNAME(card), 1658 QETH_CARD_IFNAME(card),
1666 card->info.chpid); 1659 card->info.chpid);
1667 card->lan_online = 1;
1668 netif_carrier_on(card->dev); 1660 netif_carrier_on(card->dev);
1669 qeth_reset_ip_addresses(card); 1661 qeth_schedule_recovery(card);
1670 return NULL; 1662 return NULL;
1671 case IPA_CMD_REGISTER_LOCAL_ADDR: 1663 case IPA_CMD_REGISTER_LOCAL_ADDR:
1672 QETH_DBF_TEXT(trace,3, "irla"); 1664 QETH_DBF_TEXT(trace,3, "irla");
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2387 skb_pull(skb, VLAN_HLEN); 2379 skb_pull(skb, VLAN_HLEN);
2388 } 2380 }
2389#endif 2381#endif
2382 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2390 return vlan_id; 2383 return vlan_id;
2391} 2384}
2392 2385
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card)
3014 return -ENOMEM; 3007 return -ENOMEM;
3015 } 3008 }
3016 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ 3009 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3017 ptr = (void *) __get_free_page(GFP_KERNEL); 3010 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3018 if (!ptr) { 3011 if (!ptr) {
3019 while (j > 0) 3012 while (j > 0)
3020 free_page((unsigned long) 3013 free_page((unsigned long)
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3058 if (card->qdio.state == QETH_QDIO_ALLOCATED) 3051 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3059 return 0; 3052 return 0;
3060 3053
3061 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); 3054 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3055 GFP_KERNEL|GFP_DMA);
3062 if (!card->qdio.in_q) 3056 if (!card->qdio.in_q)
3063 return - ENOMEM; 3057 return - ENOMEM;
3064 QETH_DBF_TEXT(setup, 2, "inq"); 3058 QETH_DBF_TEXT(setup, 2, "inq");
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3083 } 3077 }
3084 for (i = 0; i < card->qdio.no_out_queues; ++i){ 3078 for (i = 0; i < card->qdio.no_out_queues; ++i){
3085 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), 3079 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3086 GFP_KERNEL); 3080 GFP_KERNEL|GFP_DMA);
3087 if (!card->qdio.out_qs[i]){ 3081 if (!card->qdio.out_qs[i]){
3088 while (i > 0) 3082 while (i > 0)
3089 kfree(card->qdio.out_qs[--i]); 3083 kfree(card->qdio.out_qs[--i]);
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6470 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 6464 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6471 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 6465 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6472 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 6466 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6467 /* Disable IPV6 support hard coded for Hipersockets */
6468 if(card->info.type == QETH_CARD_TYPE_IQD)
6469 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
6473 } else { 6470 } else {
6474#ifdef CONFIG_QETH_IPV6 6471#ifdef CONFIG_QETH_IPV6
6475 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 6472 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 20019b82b4a8..be96cb78e3b5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -521,6 +521,14 @@ config SCSI_SATA_SIL
521 521
522 If unsure, say N. 522 If unsure, say N.
523 523
524config SCSI_SATA_SIL24
525 tristate "Silicon Image 3124/3132 SATA support"
526 depends on SCSI_SATA && PCI && EXPERIMENTAL
527 help
528 This option enables support for Silicon Image 3124/3132 Serial ATA.
529
530 If unsure, say N.
531
524config SCSI_SATA_SIS 532config SCSI_SATA_SIS
525 tristate "SiS 964/180 SATA support" 533 tristate "SiS 964/180 SATA support"
526 depends on SCSI_SATA && PCI && EXPERIMENTAL 534 depends on SCSI_SATA && PCI && EXPERIMENTAL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 48529d180ca8..e2e3d8671930 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o 130obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o 131obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o 132obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
133obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
133obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o 134obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
134obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o 135obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
135obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o 136obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 6e4bb36f8d7c..f0d8f89b5d40 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -680,17 +680,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
680 680
681 for (i = 0; i < host_set->n_ports; i++) { 681 for (i = 0; i < host_set->n_ports; i++) {
682 struct ata_port *ap; 682 struct ata_port *ap;
683 u32 tmp;
684 683
685 VPRINTK("port %u\n", i); 684 if (!(irq_stat & (1 << i)))
685 continue;
686
686 ap = host_set->ports[i]; 687 ap = host_set->ports[i];
687 tmp = irq_stat & (1 << i); 688 if (ap) {
688 if (tmp && ap) {
689 struct ata_queued_cmd *qc; 689 struct ata_queued_cmd *qc;
690 qc = ata_qc_from_tag(ap, ap->active_tag); 690 qc = ata_qc_from_tag(ap, ap->active_tag);
691 if (ahci_host_intr(ap, qc)) 691 if (!ahci_host_intr(ap, qc))
692 irq_ack |= (1 << i); 692 if (ata_ratelimit()) {
693 struct pci_dev *pdev =
694 to_pci_dev(ap->host_set->dev);
695 printk(KERN_WARNING
696 "ahci(%s): unhandled interrupt on port %u\n",
697 pci_name(pdev), i);
698 }
699
700 VPRINTK("port %u\n", i);
701 } else {
702 VPRINTK("port %u (no irq)\n", i);
703 if (ata_ratelimit()) {
704 struct pci_dev *pdev =
705 to_pci_dev(ap->host_set->dev);
706 printk(KERN_WARNING
707 "ahci(%s): interrupt on disabled port %u\n",
708 pci_name(pdev), i);
709 }
693 } 710 }
711
712 irq_ack |= (1 << i);
694 } 713 }
695 714
696 if (irq_ack) { 715 if (irq_ack) {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 943b44c3c16f..9aa93087d495 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -48,6 +48,7 @@
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/suspend.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/jiffies.h>
51#include <scsi/scsi.h> 52#include <scsi/scsi.h>
52#include "scsi.h" 53#include "scsi.h"
53#include "scsi_priv.h" 54#include "scsi_priv.h"
@@ -62,6 +63,7 @@
62static unsigned int ata_busy_sleep (struct ata_port *ap, 63static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat, 64 unsigned long tmout_pat,
64 unsigned long tmout); 65 unsigned long tmout);
66static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
65static void ata_set_mode(struct ata_port *ap); 67static void ata_set_mode(struct ata_port *ap);
66static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
67static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); 69static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
@@ -69,7 +71,6 @@ static int fgb(u32 bitmap);
69static int ata_choose_xfer_mode(struct ata_port *ap, 71static int ata_choose_xfer_mode(struct ata_port *ap,
70 u8 *xfer_mode_out, 72 u8 *xfer_mode_out,
71 unsigned int *xfer_shift_out); 73 unsigned int *xfer_shift_out);
72static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
73static void __ata_qc_complete(struct ata_queued_cmd *qc); 74static void __ata_qc_complete(struct ata_queued_cmd *qc);
74 75
75static unsigned int ata_unique_id = 1; 76static unsigned int ata_unique_id = 1;
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
1131static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1132static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1132{ 1133{
1133 struct ata_device *dev = &ap->device[device]; 1134 struct ata_device *dev = &ap->device[device];
1134 unsigned int i; 1135 unsigned int major_version;
1135 u16 tmp; 1136 u16 tmp;
1136 unsigned long xfer_modes; 1137 unsigned long xfer_modes;
1137 u8 status; 1138 u8 status;
@@ -1229,9 +1230,9 @@ retry:
1229 * common ATA, ATAPI feature tests 1230 * common ATA, ATAPI feature tests
1230 */ 1231 */
1231 1232
1232 /* we require LBA and DMA support (bits 8 & 9 of word 49) */ 1233 /* we require DMA support (bits 8 of word 49) */
1233 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { 1234 if (!ata_id_has_dma(dev->id)) {
1234 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); 1235 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 goto err_out_nosup; 1236 goto err_out_nosup;
1236 } 1237 }
1237 1238
@@ -1251,32 +1252,69 @@ retry:
1251 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1252 if (!ata_id_is_ata(dev->id)) /* sanity check */
1252 goto err_out_nosup; 1253 goto err_out_nosup;
1253 1254
1255 /* get major version */
1254 tmp = dev->id[ATA_ID_MAJOR_VER]; 1256 tmp = dev->id[ATA_ID_MAJOR_VER];
1255 for (i = 14; i >= 1; i--) 1257 for (major_version = 14; major_version >= 1; major_version--)
1256 if (tmp & (1 << i)) 1258 if (tmp & (1 << major_version))
1257 break; 1259 break;
1258 1260
1259 /* we require at least ATA-3 */ 1261 /*
1260 if (i < 3) { 1262 * The exact sequence expected by certain pre-ATA4 drives is:
1261 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); 1263 * SRST RESET
1262 goto err_out_nosup; 1264 * IDENTIFY
1263 } 1265 * INITIALIZE DEVICE PARAMETERS
1266 * anything else..
1267 * Some drives were very specific about that exact sequence.
1268 */
1269 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1270 ata_dev_init_params(ap, dev);
1271
1272 if (ata_id_has_lba(dev->id)) {
1273 dev->flags |= ATA_DFLAG_LBA;
1274
1275 if (ata_id_has_lba48(dev->id)) {
1276 dev->flags |= ATA_DFLAG_LBA48;
1277 dev->n_sectors = ata_id_u64(dev->id, 100);
1278 } else {
1279 dev->n_sectors = ata_id_u32(dev->id, 60);
1280 }
1281
1282 /* print device info to dmesg */
1283 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1284 ap->id, device,
1285 major_version,
1286 ata_mode_string(xfer_modes),
1287 (unsigned long long)dev->n_sectors,
1288 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1289 } else {
1290 /* CHS */
1291
1292 /* Default translation */
1293 dev->cylinders = dev->id[1];
1294 dev->heads = dev->id[3];
1295 dev->sectors = dev->id[6];
1296 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1297
1298 if (ata_id_current_chs_valid(dev->id)) {
1299 /* Current CHS translation is valid. */
1300 dev->cylinders = dev->id[54];
1301 dev->heads = dev->id[55];
1302 dev->sectors = dev->id[56];
1303
1304 dev->n_sectors = ata_id_u32(dev->id, 57);
1305 }
1306
1307 /* print device info to dmesg */
1308 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1309 ap->id, device,
1310 major_version,
1311 ata_mode_string(xfer_modes),
1312 (unsigned long long)dev->n_sectors,
1313 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1264 1314
1265 if (ata_id_has_lba48(dev->id)) {
1266 dev->flags |= ATA_DFLAG_LBA48;
1267 dev->n_sectors = ata_id_u64(dev->id, 100);
1268 } else {
1269 dev->n_sectors = ata_id_u32(dev->id, 60);
1270 } 1315 }
1271 1316
1272 ap->host->max_cmd_len = 16; 1317 ap->host->max_cmd_len = 16;
1273
1274 /* print device info to dmesg */
1275 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1276 ap->id, device,
1277 ata_mode_string(xfer_modes),
1278 (unsigned long long)dev->n_sectors,
1279 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1280 } 1318 }
1281 1319
1282 /* ATAPI-specific feature tests */ 1320 /* ATAPI-specific feature tests */
@@ -2144,6 +2182,54 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2144} 2182}
2145 2183
2146/** 2184/**
2185 * ata_dev_init_params - Issue INIT DEV PARAMS command
2186 * @ap: Port associated with device @dev
2187 * @dev: Device to which command will be sent
2188 *
2189 * LOCKING:
2190 */
2191
2192static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2193{
2194 DECLARE_COMPLETION(wait);
2195 struct ata_queued_cmd *qc;
2196 int rc;
2197 unsigned long flags;
2198 u16 sectors = dev->id[6];
2199 u16 heads = dev->id[3];
2200
2201 /* Number of sectors per track 1-255. Number of heads 1-16 */
2202 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2203 return;
2204
2205 /* set up init dev params taskfile */
2206 DPRINTK("init dev params \n");
2207
2208 qc = ata_qc_new_init(ap, dev);
2209 BUG_ON(qc == NULL);
2210
2211 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2212 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2213 qc->tf.protocol = ATA_PROT_NODATA;
2214 qc->tf.nsect = sectors;
2215 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2216
2217 qc->waiting = &wait;
2218 qc->complete_fn = ata_qc_complete_noop;
2219
2220 spin_lock_irqsave(&ap->host_set->lock, flags);
2221 rc = ata_qc_issue(qc);
2222 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2223
2224 if (rc)
2225 ata_port_disable(ap);
2226 else
2227 wait_for_completion(&wait);
2228
2229 DPRINTK("EXIT\n");
2230}
2231
2232/**
2147 * ata_sg_clean - Unmap DMA memory associated with command 2233 * ata_sg_clean - Unmap DMA memory associated with command
2148 * @qc: Command containing DMA memory to be released 2234 * @qc: Command containing DMA memory to be released
2149 * 2235 *
@@ -2507,20 +2593,20 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2507static unsigned long ata_pio_poll(struct ata_port *ap) 2593static unsigned long ata_pio_poll(struct ata_port *ap)
2508{ 2594{
2509 u8 status; 2595 u8 status;
2510 unsigned int poll_state = PIO_ST_UNKNOWN; 2596 unsigned int poll_state = HSM_ST_UNKNOWN;
2511 unsigned int reg_state = PIO_ST_UNKNOWN; 2597 unsigned int reg_state = HSM_ST_UNKNOWN;
2512 const unsigned int tmout_state = PIO_ST_TMOUT; 2598 const unsigned int tmout_state = HSM_ST_TMOUT;
2513 2599
2514 switch (ap->pio_task_state) { 2600 switch (ap->hsm_task_state) {
2515 case PIO_ST: 2601 case HSM_ST:
2516 case PIO_ST_POLL: 2602 case HSM_ST_POLL:
2517 poll_state = PIO_ST_POLL; 2603 poll_state = HSM_ST_POLL;
2518 reg_state = PIO_ST; 2604 reg_state = HSM_ST;
2519 break; 2605 break;
2520 case PIO_ST_LAST: 2606 case HSM_ST_LAST:
2521 case PIO_ST_LAST_POLL: 2607 case HSM_ST_LAST_POLL:
2522 poll_state = PIO_ST_LAST_POLL; 2608 poll_state = HSM_ST_LAST_POLL;
2523 reg_state = PIO_ST_LAST; 2609 reg_state = HSM_ST_LAST;
2524 break; 2610 break;
2525 default: 2611 default:
2526 BUG(); 2612 BUG();
@@ -2530,14 +2616,14 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2530 status = ata_chk_status(ap); 2616 status = ata_chk_status(ap);
2531 if (status & ATA_BUSY) { 2617 if (status & ATA_BUSY) {
2532 if (time_after(jiffies, ap->pio_task_timeout)) { 2618 if (time_after(jiffies, ap->pio_task_timeout)) {
2533 ap->pio_task_state = tmout_state; 2619 ap->hsm_task_state = tmout_state;
2534 return 0; 2620 return 0;
2535 } 2621 }
2536 ap->pio_task_state = poll_state; 2622 ap->hsm_task_state = poll_state;
2537 return ATA_SHORT_PAUSE; 2623 return ATA_SHORT_PAUSE;
2538 } 2624 }
2539 2625
2540 ap->pio_task_state = reg_state; 2626 ap->hsm_task_state = reg_state;
2541 return 0; 2627 return 0;
2542} 2628}
2543 2629
@@ -2562,14 +2648,14 @@ static int ata_pio_complete (struct ata_port *ap)
2562 * we enter, BSY will be cleared in a chk-status or two. If not, 2648 * we enter, BSY will be cleared in a chk-status or two. If not,
2563 * the drive is probably seeking or something. Snooze for a couple 2649 * the drive is probably seeking or something. Snooze for a couple
2564 * msecs, then chk-status again. If still busy, fall back to 2650 * msecs, then chk-status again. If still busy, fall back to
2565 * PIO_ST_POLL state. 2651 * HSM_ST_POLL state.
2566 */ 2652 */
2567 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2653 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2568 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2654 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2569 msleep(2); 2655 msleep(2);
2570 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2656 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2571 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2657 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2572 ap->pio_task_state = PIO_ST_LAST_POLL; 2658 ap->hsm_task_state = HSM_ST_LAST_POLL;
2573 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2659 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2574 return 0; 2660 return 0;
2575 } 2661 }
@@ -2577,14 +2663,14 @@ static int ata_pio_complete (struct ata_port *ap)
2577 2663
2578 drv_stat = ata_wait_idle(ap); 2664 drv_stat = ata_wait_idle(ap);
2579 if (!ata_ok(drv_stat)) { 2665 if (!ata_ok(drv_stat)) {
2580 ap->pio_task_state = PIO_ST_ERR; 2666 ap->hsm_task_state = HSM_ST_ERR;
2581 return 0; 2667 return 0;
2582 } 2668 }
2583 2669
2584 qc = ata_qc_from_tag(ap, ap->active_tag); 2670 qc = ata_qc_from_tag(ap, ap->active_tag);
2585 assert(qc != NULL); 2671 assert(qc != NULL);
2586 2672
2587 ap->pio_task_state = PIO_ST_IDLE; 2673 ap->hsm_task_state = HSM_ST_IDLE;
2588 2674
2589 ata_poll_qc_complete(qc, drv_stat); 2675 ata_poll_qc_complete(qc, drv_stat);
2590 2676
@@ -2744,7 +2830,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2744 unsigned char *buf; 2830 unsigned char *buf;
2745 2831
2746 if (qc->cursect == (qc->nsect - 1)) 2832 if (qc->cursect == (qc->nsect - 1))
2747 ap->pio_task_state = PIO_ST_LAST; 2833 ap->hsm_task_state = HSM_ST_LAST;
2748 2834
2749 page = sg[qc->cursg].page; 2835 page = sg[qc->cursg].page;
2750 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2836 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2794,7 +2880,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2794 unsigned int offset, count; 2880 unsigned int offset, count;
2795 2881
2796 if (qc->curbytes + bytes >= qc->nbytes) 2882 if (qc->curbytes + bytes >= qc->nbytes)
2797 ap->pio_task_state = PIO_ST_LAST; 2883 ap->hsm_task_state = HSM_ST_LAST;
2798 2884
2799next_sg: 2885next_sg:
2800 if (unlikely(qc->cursg >= qc->n_elem)) { 2886 if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2816,7 +2902,7 @@ next_sg:
2816 for (i = 0; i < words; i++) 2902 for (i = 0; i < words; i++)
2817 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 2903 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2818 2904
2819 ap->pio_task_state = PIO_ST_LAST; 2905 ap->hsm_task_state = HSM_ST_LAST;
2820 return; 2906 return;
2821 } 2907 }
2822 2908
@@ -2897,7 +2983,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2897err_out: 2983err_out:
2898 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 2984 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2899 ap->id, dev->devno); 2985 ap->id, dev->devno);
2900 ap->pio_task_state = PIO_ST_ERR; 2986 ap->hsm_task_state = HSM_ST_ERR;
2901} 2987}
2902 2988
2903/** 2989/**
@@ -2919,14 +3005,14 @@ static void ata_pio_block(struct ata_port *ap)
2919 * a chk-status or two. If not, the drive is probably seeking 3005 * a chk-status or two. If not, the drive is probably seeking
2920 * or something. Snooze for a couple msecs, then 3006 * or something. Snooze for a couple msecs, then
2921 * chk-status again. If still busy, fall back to 3007 * chk-status again. If still busy, fall back to
2922 * PIO_ST_POLL state. 3008 * HSM_ST_POLL state.
2923 */ 3009 */
2924 status = ata_busy_wait(ap, ATA_BUSY, 5); 3010 status = ata_busy_wait(ap, ATA_BUSY, 5);
2925 if (status & ATA_BUSY) { 3011 if (status & ATA_BUSY) {
2926 msleep(2); 3012 msleep(2);
2927 status = ata_busy_wait(ap, ATA_BUSY, 10); 3013 status = ata_busy_wait(ap, ATA_BUSY, 10);
2928 if (status & ATA_BUSY) { 3014 if (status & ATA_BUSY) {
2929 ap->pio_task_state = PIO_ST_POLL; 3015 ap->hsm_task_state = HSM_ST_POLL;
2930 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 3016 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2931 return; 3017 return;
2932 } 3018 }
@@ -2938,7 +3024,7 @@ static void ata_pio_block(struct ata_port *ap)
2938 if (is_atapi_taskfile(&qc->tf)) { 3024 if (is_atapi_taskfile(&qc->tf)) {
2939 /* no more data to transfer or unsupported ATAPI command */ 3025 /* no more data to transfer or unsupported ATAPI command */
2940 if ((status & ATA_DRQ) == 0) { 3026 if ((status & ATA_DRQ) == 0) {
2941 ap->pio_task_state = PIO_ST_LAST; 3027 ap->hsm_task_state = HSM_ST_LAST;
2942 return; 3028 return;
2943 } 3029 }
2944 3030
@@ -2946,7 +3032,7 @@ static void ata_pio_block(struct ata_port *ap)
2946 } else { 3032 } else {
2947 /* handle BSY=0, DRQ=0 as error */ 3033 /* handle BSY=0, DRQ=0 as error */
2948 if ((status & ATA_DRQ) == 0) { 3034 if ((status & ATA_DRQ) == 0) {
2949 ap->pio_task_state = PIO_ST_ERR; 3035 ap->hsm_task_state = HSM_ST_ERR;
2950 return; 3036 return;
2951 } 3037 }
2952 3038
@@ -2966,7 +3052,7 @@ static void ata_pio_error(struct ata_port *ap)
2966 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", 3052 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2967 ap->id, drv_stat); 3053 ap->id, drv_stat);
2968 3054
2969 ap->pio_task_state = PIO_ST_IDLE; 3055 ap->hsm_task_state = HSM_ST_IDLE;
2970 3056
2971 ata_poll_qc_complete(qc, drv_stat | ATA_ERR); 3057 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2972} 3058}
@@ -2981,25 +3067,25 @@ fsm_start:
2981 timeout = 0; 3067 timeout = 0;
2982 qc_completed = 0; 3068 qc_completed = 0;
2983 3069
2984 switch (ap->pio_task_state) { 3070 switch (ap->hsm_task_state) {
2985 case PIO_ST_IDLE: 3071 case HSM_ST_IDLE:
2986 return; 3072 return;
2987 3073
2988 case PIO_ST: 3074 case HSM_ST:
2989 ata_pio_block(ap); 3075 ata_pio_block(ap);
2990 break; 3076 break;
2991 3077
2992 case PIO_ST_LAST: 3078 case HSM_ST_LAST:
2993 qc_completed = ata_pio_complete(ap); 3079 qc_completed = ata_pio_complete(ap);
2994 break; 3080 break;
2995 3081
2996 case PIO_ST_POLL: 3082 case HSM_ST_POLL:
2997 case PIO_ST_LAST_POLL: 3083 case HSM_ST_LAST_POLL:
2998 timeout = ata_pio_poll(ap); 3084 timeout = ata_pio_poll(ap);
2999 break; 3085 break;
3000 3086
3001 case PIO_ST_TMOUT: 3087 case HSM_ST_TMOUT:
3002 case PIO_ST_ERR: 3088 case HSM_ST_ERR:
3003 ata_pio_error(ap); 3089 ata_pio_error(ap);
3004 return; 3090 return;
3005 } 3091 }
@@ -3010,52 +3096,6 @@ fsm_start:
3010 goto fsm_start; 3096 goto fsm_start;
3011} 3097}
3012 3098
3013static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
3014 struct scsi_cmnd *cmd)
3015{
3016 DECLARE_COMPLETION(wait);
3017 struct ata_queued_cmd *qc;
3018 unsigned long flags;
3019 int rc;
3020
3021 DPRINTK("ATAPI request sense\n");
3022
3023 qc = ata_qc_new_init(ap, dev);
3024 BUG_ON(qc == NULL);
3025
3026 /* FIXME: is this needed? */
3027 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
3028
3029 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
3030 qc->dma_dir = DMA_FROM_DEVICE;
3031
3032 memset(&qc->cdb, 0, ap->cdb_len);
3033 qc->cdb[0] = REQUEST_SENSE;
3034 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3035
3036 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3037 qc->tf.command = ATA_CMD_PACKET;
3038
3039 qc->tf.protocol = ATA_PROT_ATAPI;
3040 qc->tf.lbam = (8 * 1024) & 0xff;
3041 qc->tf.lbah = (8 * 1024) >> 8;
3042 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
3043
3044 qc->waiting = &wait;
3045 qc->complete_fn = ata_qc_complete_noop;
3046
3047 spin_lock_irqsave(&ap->host_set->lock, flags);
3048 rc = ata_qc_issue(qc);
3049 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3050
3051 if (rc)
3052 ata_port_disable(ap);
3053 else
3054 wait_for_completion(&wait);
3055
3056 DPRINTK("EXIT\n");
3057}
3058
3059/** 3099/**
3060 * ata_qc_timeout - Handle timeout of queued command 3100 * ata_qc_timeout - Handle timeout of queued command
3061 * @qc: Command that timed out 3101 * @qc: Command that timed out
@@ -3173,14 +3213,14 @@ void ata_eng_timeout(struct ata_port *ap)
3173 DPRINTK("ENTER\n"); 3213 DPRINTK("ENTER\n");
3174 3214
3175 qc = ata_qc_from_tag(ap, ap->active_tag); 3215 qc = ata_qc_from_tag(ap, ap->active_tag);
3176 if (!qc) { 3216 if (qc)
3217 ata_qc_timeout(qc);
3218 else {
3177 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3219 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3178 ap->id); 3220 ap->id);
3179 goto out; 3221 goto out;
3180 } 3222 }
3181 3223
3182 ata_qc_timeout(qc);
3183
3184out: 3224out:
3185 DPRINTK("EXIT\n"); 3225 DPRINTK("EXIT\n");
3186} 3226}
@@ -3238,14 +3278,18 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3238 3278
3239 ata_tf_init(ap, &qc->tf, dev->devno); 3279 ata_tf_init(ap, &qc->tf, dev->devno);
3240 3280
3241 if (dev->flags & ATA_DFLAG_LBA48) 3281 if (dev->flags & ATA_DFLAG_LBA) {
3242 qc->tf.flags |= ATA_TFLAG_LBA48; 3282 qc->tf.flags |= ATA_TFLAG_LBA;
3283
3284 if (dev->flags & ATA_DFLAG_LBA48)
3285 qc->tf.flags |= ATA_TFLAG_LBA48;
3286 }
3243 } 3287 }
3244 3288
3245 return qc; 3289 return qc;
3246} 3290}
3247 3291
3248static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) 3292int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3249{ 3293{
3250 return 0; 3294 return 0;
3251} 3295}
@@ -3442,7 +3486,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3442 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3486 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3443 ata_qc_set_polling(qc); 3487 ata_qc_set_polling(qc);
3444 ata_tf_to_host_nolock(ap, &qc->tf); 3488 ata_tf_to_host_nolock(ap, &qc->tf);
3445 ap->pio_task_state = PIO_ST; 3489 ap->hsm_task_state = HSM_ST;
3446 queue_work(ata_wq, &ap->pio_task); 3490 queue_work(ata_wq, &ap->pio_task);
3447 break; 3491 break;
3448 3492
@@ -3668,7 +3712,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3668 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3712 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3669 host_stat = readb(mmio + ATA_DMA_STATUS); 3713 host_stat = readb(mmio + ATA_DMA_STATUS);
3670 } else 3714 } else
3671 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3715 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3672 return host_stat; 3716 return host_stat;
3673} 3717}
3674 3718
@@ -3888,7 +3932,7 @@ static void atapi_packet_task(void *_data)
3888 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 3932 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3889 3933
3890 /* PIO commands are handled by polling */ 3934 /* PIO commands are handled by polling */
3891 ap->pio_task_state = PIO_ST; 3935 ap->hsm_task_state = HSM_ST;
3892 queue_work(ata_wq, &ap->pio_task); 3936 queue_work(ata_wq, &ap->pio_task);
3893 } 3937 }
3894 3938
@@ -4202,7 +4246,7 @@ int ata_device_add(struct ata_probe_ent *ent)
4202 for (i = 0; i < count; i++) { 4246 for (i = 0; i < count; i++) {
4203 struct ata_port *ap = host_set->ports[i]; 4247 struct ata_port *ap = host_set->ports[i];
4204 4248
4205 scsi_scan_host(ap->host); 4249 ata_scsi_scan_host(ap);
4206 } 4250 }
4207 4251
4208 dev_set_drvdata(dev, host_set); 4252 dev_set_drvdata(dev, host_set);
@@ -4362,85 +4406,87 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4362 * ata_pci_init_native_mode - Initialize native-mode driver 4406 * ata_pci_init_native_mode - Initialize native-mode driver
4363 * @pdev: pci device to be initialized 4407 * @pdev: pci device to be initialized
4364 * @port: array[2] of pointers to port info structures. 4408 * @port: array[2] of pointers to port info structures.
4409 * @ports: bitmap of ports present
4365 * 4410 *
4366 * Utility function which allocates and initializes an 4411 * Utility function which allocates and initializes an
4367 * ata_probe_ent structure for a standard dual-port 4412 * ata_probe_ent structure for a standard dual-port
4368 * PIO-based IDE controller. The returned ata_probe_ent 4413 * PIO-based IDE controller. The returned ata_probe_ent
4369 * structure can be passed to ata_device_add(). The returned 4414 * structure can be passed to ata_device_add(). The returned
4370 * ata_probe_ent structure should then be freed with kfree(). 4415 * ata_probe_ent structure should then be freed with kfree().
4416 *
4417 * The caller need only pass the address of the primary port, the
4418 * secondary will be deduced automatically. If the device has non
4419 * standard secondary port mappings this function can be called twice,
4420 * once for each interface.
4371 */ 4421 */
4372 4422
4373struct ata_probe_ent * 4423struct ata_probe_ent *
4374ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) 4424ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4375{ 4425{
4376 struct ata_probe_ent *probe_ent = 4426 struct ata_probe_ent *probe_ent =
4377 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4427 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4428 int p = 0;
4429
4378 if (!probe_ent) 4430 if (!probe_ent)
4379 return NULL; 4431 return NULL;
4380 4432
4381 probe_ent->n_ports = 2;
4382 probe_ent->irq = pdev->irq; 4433 probe_ent->irq = pdev->irq;
4383 probe_ent->irq_flags = SA_SHIRQ; 4434 probe_ent->irq_flags = SA_SHIRQ;
4384 4435
4385 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); 4436 if (ports & ATA_PORT_PRIMARY) {
4386 probe_ent->port[0].altstatus_addr = 4437 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4387 probe_ent->port[0].ctl_addr = 4438 probe_ent->port[p].altstatus_addr =
4388 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4439 probe_ent->port[p].ctl_addr =
4389 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4440 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4390 4441 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4391 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); 4442 ata_std_ports(&probe_ent->port[p]);
4392 probe_ent->port[1].altstatus_addr = 4443 p++;
4393 probe_ent->port[1].ctl_addr = 4444 }
4394 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4395 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4396 4445
4397 ata_std_ports(&probe_ent->port[0]); 4446 if (ports & ATA_PORT_SECONDARY) {
4398 ata_std_ports(&probe_ent->port[1]); 4447 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4448 probe_ent->port[p].altstatus_addr =
4449 probe_ent->port[p].ctl_addr =
4450 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4451 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4452 ata_std_ports(&probe_ent->port[p]);
4453 p++;
4454 }
4399 4455
4456 probe_ent->n_ports = p;
4400 return probe_ent; 4457 return probe_ent;
4401} 4458}
4402 4459
4403static struct ata_probe_ent * 4460static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
4404ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4405 struct ata_probe_ent **ppe2)
4406{ 4461{
4407 struct ata_probe_ent *probe_ent, *probe_ent2; 4462 struct ata_probe_ent *probe_ent;
4408 4463
4409 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4464 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4410 if (!probe_ent) 4465 if (!probe_ent)
4411 return NULL; 4466 return NULL;
4412 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4413 if (!probe_ent2) {
4414 kfree(probe_ent);
4415 return NULL;
4416 }
4417 4467
4418 probe_ent->n_ports = 1; 4468
4419 probe_ent->irq = 14;
4420
4421 probe_ent->hard_port_no = 0;
4422 probe_ent->legacy_mode = 1; 4469 probe_ent->legacy_mode = 1;
4423 4470 probe_ent->n_ports = 1;
4424 probe_ent2->n_ports = 1; 4471 probe_ent->hard_port_no = port_num;
4425 probe_ent2->irq = 15; 4472
4426 4473 switch(port_num)
4427 probe_ent2->hard_port_no = 1; 4474 {
4428 probe_ent2->legacy_mode = 1; 4475 case 0:
4429 4476 probe_ent->irq = 14;
4430 probe_ent->port[0].cmd_addr = 0x1f0; 4477 probe_ent->port[0].cmd_addr = 0x1f0;
4431 probe_ent->port[0].altstatus_addr = 4478 probe_ent->port[0].altstatus_addr =
4432 probe_ent->port[0].ctl_addr = 0x3f6; 4479 probe_ent->port[0].ctl_addr = 0x3f6;
4433 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); 4480 break;
4434 4481 case 1:
4435 probe_ent2->port[0].cmd_addr = 0x170; 4482 probe_ent->irq = 15;
4436 probe_ent2->port[0].altstatus_addr = 4483 probe_ent->port[0].cmd_addr = 0x170;
4437 probe_ent2->port[0].ctl_addr = 0x376; 4484 probe_ent->port[0].altstatus_addr =
4438 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; 4485 probe_ent->port[0].ctl_addr = 0x376;
4439 4486 break;
4487 }
4488 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4440 ata_std_ports(&probe_ent->port[0]); 4489 ata_std_ports(&probe_ent->port[0]);
4441 ata_std_ports(&probe_ent2->port[0]);
4442
4443 *ppe2 = probe_ent2;
4444 return probe_ent; 4490 return probe_ent;
4445} 4491}
4446 4492
@@ -4469,7 +4515,7 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4469int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4515int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4470 unsigned int n_ports) 4516 unsigned int n_ports)
4471{ 4517{
4472 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; 4518 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4473 struct ata_port_info *port[2]; 4519 struct ata_port_info *port[2];
4474 u8 tmp8, mask; 4520 u8 tmp8, mask;
4475 unsigned int legacy_mode = 0; 4521 unsigned int legacy_mode = 0;
@@ -4486,7 +4532,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4486 4532
4487 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4533 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4488 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4534 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4489 /* TODO: support transitioning to native mode? */ 4535 /* TODO: What if one channel is in native mode ... */
4490 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4536 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4491 mask = (1 << 2) | (1 << 0); 4537 mask = (1 << 2) | (1 << 0);
4492 if ((tmp8 & mask) != mask) 4538 if ((tmp8 & mask) != mask)
@@ -4494,11 +4540,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4494 } 4540 }
4495 4541
4496 /* FIXME... */ 4542 /* FIXME... */
4497 if ((!legacy_mode) && (n_ports > 1)) { 4543 if ((!legacy_mode) && (n_ports > 2)) {
4498 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); 4544 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4499 return -EINVAL; 4545 n_ports = 2;
4546 /* For now */
4500 } 4547 }
4501 4548
4549 /* FIXME: Really for ATA it isn't safe because the device may be
4550 multi-purpose and we want to leave it alone if it was already
4551 enabled. Secondly for shared use as Arjan says we want refcounting
4552
4553 Checking dev->is_enabled is insufficient as this is not set at
4554 boot for the primary video which is BIOS enabled
4555 */
4556
4502 rc = pci_enable_device(pdev); 4557 rc = pci_enable_device(pdev);
4503 if (rc) 4558 if (rc)
4504 return rc; 4559 return rc;
@@ -4509,6 +4564,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4509 goto err_out; 4564 goto err_out;
4510 } 4565 }
4511 4566
4567 /* FIXME: Should use platform specific mappers for legacy port ranges */
4512 if (legacy_mode) { 4568 if (legacy_mode) {
4513 if (!request_region(0x1f0, 8, "libata")) { 4569 if (!request_region(0x1f0, 8, "libata")) {
4514 struct resource *conflict, res; 4570 struct resource *conflict, res;
@@ -4553,10 +4609,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4553 goto err_out_regions; 4609 goto err_out_regions;
4554 4610
4555 if (legacy_mode) { 4611 if (legacy_mode) {
4556 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); 4612 if (legacy_mode & (1 << 0))
4557 } else 4613 probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
4558 probe_ent = ata_pci_init_native_mode(pdev, port); 4614 if (legacy_mode & (1 << 1))
4559 if (!probe_ent) { 4615 probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
4616 } else {
4617 if (n_ports == 2)
4618 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4619 else
4620 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4621 }
4622 if (!probe_ent && !probe_ent2) {
4560 rc = -ENOMEM; 4623 rc = -ENOMEM;
4561 goto err_out_regions; 4624 goto err_out_regions;
4562 } 4625 }
@@ -4668,6 +4731,27 @@ static void __exit ata_exit(void)
4668module_init(ata_init); 4731module_init(ata_init);
4669module_exit(ata_exit); 4732module_exit(ata_exit);
4670 4733
4734static unsigned long ratelimit_time;
4735static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4736
4737int ata_ratelimit(void)
4738{
4739 int rc;
4740 unsigned long flags;
4741
4742 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4743
4744 if (time_after(jiffies, ratelimit_time)) {
4745 rc = 1;
4746 ratelimit_time = jiffies + (HZ/5);
4747 } else
4748 rc = 0;
4749
4750 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4751
4752 return rc;
4753}
4754
4671/* 4755/*
4672 * libata is essentially a library of internal helper functions for 4756 * libata is essentially a library of internal helper functions for
4673 * low-level ATA host controller drivers. As such, the API/ABI is 4757 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4709,6 +4793,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
4709EXPORT_SYMBOL_GPL(__sata_phy_reset); 4793EXPORT_SYMBOL_GPL(__sata_phy_reset);
4710EXPORT_SYMBOL_GPL(ata_bus_reset); 4794EXPORT_SYMBOL_GPL(ata_bus_reset);
4711EXPORT_SYMBOL_GPL(ata_port_disable); 4795EXPORT_SYMBOL_GPL(ata_port_disable);
4796EXPORT_SYMBOL_GPL(ata_ratelimit);
4712EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4797EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4713EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4798EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4714EXPORT_SYMBOL_GPL(ata_scsi_error); 4799EXPORT_SYMBOL_GPL(ata_scsi_error);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index ee3f1050fb5f..4cf43de4060e 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -49,6 +49,14 @@ static struct ata_device *
49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); 49ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
50 50
51 51
52static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
53 void (*done)(struct scsi_cmnd *))
54{
55 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
56 /* "Invalid field in cbd" */
57 done(cmd);
58}
59
52/** 60/**
53 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 61 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
54 * @sdev: SCSI device for which BIOS geometry is to be determined 62 * @sdev: SCSI device for which BIOS geometry is to be determined
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
182{ 190{
183 struct scsi_cmnd *cmd = qc->scsicmd; 191 struct scsi_cmnd *cmd = qc->scsicmd;
184 u8 err = 0; 192 u8 err = 0;
185 unsigned char *sb = cmd->sense_buffer;
186 /* Based on the 3ware driver translation table */ 193 /* Based on the 3ware driver translation table */
187 static unsigned char sense_table[][4] = { 194 static unsigned char sense_table[][4] = {
188 /* BBD|ECC|ID|MAR */ 195 /* BBD|ECC|ID|MAR */
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
225 }; 232 };
226 int i = 0; 233 int i = 0;
227 234
228 cmd->result = SAM_STAT_CHECK_CONDITION;
229
230 /* 235 /*
231 * Is this an error we can process/parse 236 * Is this an error we can process/parse
232 */ 237 */
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
281 /* Look for best matches first */ 286 /* Look for best matches first */
282 if((sense_table[i][0] & err) == sense_table[i][0]) 287 if((sense_table[i][0] & err) == sense_table[i][0])
283 { 288 {
284 sb[0] = 0x70; 289 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
285 sb[2] = sense_table[i][1]; 290 sense_table[i][2] /* asc */,
286 sb[7] = 0x0a; 291 sense_table[i][3] /* ascq */ );
287 sb[12] = sense_table[i][2];
288 sb[13] = sense_table[i][3];
289 return; 292 return;
290 } 293 }
291 i++; 294 i++;
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
300 { 303 {
301 if(stat_table[i][0] & drv_stat) 304 if(stat_table[i][0] & drv_stat)
302 { 305 {
303 sb[0] = 0x70; 306 ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
304 sb[2] = stat_table[i][1]; 307 sense_table[i][2] /* asc */,
305 sb[7] = 0x0a; 308 sense_table[i][3] /* ascq */ );
306 sb[12] = stat_table[i][2];
307 sb[13] = stat_table[i][3];
308 return; 309 return;
309 } 310 }
310 i++; 311 i++;
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
313 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); 314 printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
314 /* additional-sense-code[-qualifier] */ 315 /* additional-sense-code[-qualifier] */
315 316
316 sb[0] = 0x70;
317 sb[2] = MEDIUM_ERROR;
318 sb[7] = 0x0A;
319 if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 317 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
320 sb[12] = 0x11; /* "unrecovered read error" */ 318 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4);
321 sb[13] = 0x04; 319 /* "unrecovered read error" */
322 } else { 320 } else {
323 sb[12] = 0x0C; /* "write error - */ 321 ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2);
324 sb[13] = 0x02; /* auto-reallocation failed" */ 322 /* "write error - auto-reallocation failed" */
325 } 323 }
326} 324}
327 325
@@ -440,15 +438,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
440 ; /* ignore IMMED bit, violates sat-r05 */ 438 ; /* ignore IMMED bit, violates sat-r05 */
441 } 439 }
442 if (scsicmd[4] & 0x2) 440 if (scsicmd[4] & 0x2)
443 return 1; /* LOEJ bit set not supported */ 441 goto invalid_fld; /* LOEJ bit set not supported */
444 if (((scsicmd[4] >> 4) & 0xf) != 0) 442 if (((scsicmd[4] >> 4) & 0xf) != 0)
445 return 1; /* power conditions not supported */ 443 goto invalid_fld; /* power conditions not supported */
446 if (scsicmd[4] & 0x1) { 444 if (scsicmd[4] & 0x1) {
447 tf->nsect = 1; /* 1 sector, lba=0 */ 445 tf->nsect = 1; /* 1 sector, lba=0 */
448 tf->lbah = 0x0; 446
449 tf->lbam = 0x0; 447 if (qc->dev->flags & ATA_DFLAG_LBA) {
450 tf->lbal = 0x0; 448 qc->tf.flags |= ATA_TFLAG_LBA;
451 tf->device |= ATA_LBA; 449
450 tf->lbah = 0x0;
451 tf->lbam = 0x0;
452 tf->lbal = 0x0;
453 tf->device |= ATA_LBA;
454 } else {
455 /* CHS */
456 tf->lbal = 0x1; /* sect */
457 tf->lbam = 0x0; /* cyl low */
458 tf->lbah = 0x0; /* cyl high */
459 }
460
452 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 461 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
453 } else { 462 } else {
454 tf->nsect = 0; /* time period value (0 implies now) */ 463 tf->nsect = 0; /* time period value (0 implies now) */
@@ -463,6 +472,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
463 */ 472 */
464 473
465 return 0; 474 return 0;
475
476invalid_fld:
477 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
478 /* "Invalid field in cbd" */
479 return 1;
466} 480}
467 481
468 482
@@ -498,6 +512,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
498} 512}
499 513
500/** 514/**
515 * scsi_6_lba_len - Get LBA and transfer length
516 * @scsicmd: SCSI command to translate
517 *
518 * Calculate LBA and transfer length for 6-byte commands.
519 *
520 * RETURNS:
521 * @plba: the LBA
522 * @plen: the transfer length
523 */
524
525static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
526{
527 u64 lba = 0;
528 u32 len = 0;
529
530 VPRINTK("six-byte command\n");
531
532 lba |= ((u64)scsicmd[2]) << 8;
533 lba |= ((u64)scsicmd[3]);
534
535 len |= ((u32)scsicmd[4]);
536
537 *plba = lba;
538 *plen = len;
539}
540
541/**
542 * scsi_10_lba_len - Get LBA and transfer length
543 * @scsicmd: SCSI command to translate
544 *
545 * Calculate LBA and transfer length for 10-byte commands.
546 *
547 * RETURNS:
548 * @plba: the LBA
549 * @plen: the transfer length
550 */
551
552static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
553{
554 u64 lba = 0;
555 u32 len = 0;
556
557 VPRINTK("ten-byte command\n");
558
559 lba |= ((u64)scsicmd[2]) << 24;
560 lba |= ((u64)scsicmd[3]) << 16;
561 lba |= ((u64)scsicmd[4]) << 8;
562 lba |= ((u64)scsicmd[5]);
563
564 len |= ((u32)scsicmd[7]) << 8;
565 len |= ((u32)scsicmd[8]);
566
567 *plba = lba;
568 *plen = len;
569}
570
571/**
572 * scsi_16_lba_len - Get LBA and transfer length
573 * @scsicmd: SCSI command to translate
574 *
575 * Calculate LBA and transfer length for 16-byte commands.
576 *
577 * RETURNS:
578 * @plba: the LBA
579 * @plen: the transfer length
580 */
581
582static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
583{
584 u64 lba = 0;
585 u32 len = 0;
586
587 VPRINTK("sixteen-byte command\n");
588
589 lba |= ((u64)scsicmd[2]) << 56;
590 lba |= ((u64)scsicmd[3]) << 48;
591 lba |= ((u64)scsicmd[4]) << 40;
592 lba |= ((u64)scsicmd[5]) << 32;
593 lba |= ((u64)scsicmd[6]) << 24;
594 lba |= ((u64)scsicmd[7]) << 16;
595 lba |= ((u64)scsicmd[8]) << 8;
596 lba |= ((u64)scsicmd[9]);
597
598 len |= ((u32)scsicmd[10]) << 24;
599 len |= ((u32)scsicmd[11]) << 16;
600 len |= ((u32)scsicmd[12]) << 8;
601 len |= ((u32)scsicmd[13]);
602
603 *plba = lba;
604 *plen = len;
605}
606
607/**
501 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 608 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
502 * @qc: Storage for translated ATA taskfile 609 * @qc: Storage for translated ATA taskfile
503 * @scsicmd: SCSI command to translate 610 * @scsicmd: SCSI command to translate
@@ -514,79 +621,102 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
514static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 621static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
515{ 622{
516 struct ata_taskfile *tf = &qc->tf; 623 struct ata_taskfile *tf = &qc->tf;
624 struct ata_device *dev = qc->dev;
625 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
517 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 626 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
518 u64 dev_sectors = qc->dev->n_sectors; 627 u64 dev_sectors = qc->dev->n_sectors;
519 u64 sect = 0; 628 u64 block;
520 u32 n_sect = 0; 629 u32 n_block;
521 630
522 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 631 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
523 tf->protocol = ATA_PROT_NODATA; 632 tf->protocol = ATA_PROT_NODATA;
524 tf->device |= ATA_LBA;
525
526 if (scsicmd[0] == VERIFY) {
527 sect |= ((u64)scsicmd[2]) << 24;
528 sect |= ((u64)scsicmd[3]) << 16;
529 sect |= ((u64)scsicmd[4]) << 8;
530 sect |= ((u64)scsicmd[5]);
531
532 n_sect |= ((u32)scsicmd[7]) << 8;
533 n_sect |= ((u32)scsicmd[8]);
534 }
535
536 else if (scsicmd[0] == VERIFY_16) {
537 sect |= ((u64)scsicmd[2]) << 56;
538 sect |= ((u64)scsicmd[3]) << 48;
539 sect |= ((u64)scsicmd[4]) << 40;
540 sect |= ((u64)scsicmd[5]) << 32;
541 sect |= ((u64)scsicmd[6]) << 24;
542 sect |= ((u64)scsicmd[7]) << 16;
543 sect |= ((u64)scsicmd[8]) << 8;
544 sect |= ((u64)scsicmd[9]);
545
546 n_sect |= ((u32)scsicmd[10]) << 24;
547 n_sect |= ((u32)scsicmd[11]) << 16;
548 n_sect |= ((u32)scsicmd[12]) << 8;
549 n_sect |= ((u32)scsicmd[13]);
550 }
551 633
634 if (scsicmd[0] == VERIFY)
635 scsi_10_lba_len(scsicmd, &block, &n_block);
636 else if (scsicmd[0] == VERIFY_16)
637 scsi_16_lba_len(scsicmd, &block, &n_block);
552 else 638 else
553 return 1; 639 goto invalid_fld;
554 640
555 if (!n_sect) 641 if (!n_block)
556 return 1; 642 goto nothing_to_do;
557 if (sect >= dev_sectors) 643 if (block >= dev_sectors)
558 return 1; 644 goto out_of_range;
559 if ((sect + n_sect) > dev_sectors) 645 if ((block + n_block) > dev_sectors)
560 return 1; 646 goto out_of_range;
561 if (lba48) { 647 if (lba48) {
562 if (n_sect > (64 * 1024)) 648 if (n_block > (64 * 1024))
563 return 1; 649 goto invalid_fld;
564 } else { 650 } else {
565 if (n_sect > 256) 651 if (n_block > 256)
566 return 1; 652 goto invalid_fld;
567 } 653 }
568 654
569 if (lba48) { 655 if (lba) {
570 tf->command = ATA_CMD_VERIFY_EXT; 656 if (lba48) {
657 tf->command = ATA_CMD_VERIFY_EXT;
658
659 tf->hob_nsect = (n_block >> 8) & 0xff;
660
661 tf->hob_lbah = (block >> 40) & 0xff;
662 tf->hob_lbam = (block >> 32) & 0xff;
663 tf->hob_lbal = (block >> 24) & 0xff;
664 } else {
665 tf->command = ATA_CMD_VERIFY;
666
667 tf->device |= (block >> 24) & 0xf;
668 }
571 669
572 tf->hob_nsect = (n_sect >> 8) & 0xff; 670 tf->nsect = n_block & 0xff;
573 671
574 tf->hob_lbah = (sect >> 40) & 0xff; 672 tf->lbah = (block >> 16) & 0xff;
575 tf->hob_lbam = (sect >> 32) & 0xff; 673 tf->lbam = (block >> 8) & 0xff;
576 tf->hob_lbal = (sect >> 24) & 0xff; 674 tf->lbal = block & 0xff;
675
676 tf->device |= ATA_LBA;
577 } else { 677 } else {
678 /* CHS */
679 u32 sect, head, cyl, track;
680
681 /* Convert LBA to CHS */
682 track = (u32)block / dev->sectors;
683 cyl = track / dev->heads;
684 head = track % dev->heads;
685 sect = (u32)block % dev->sectors + 1;
686
687 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
688 (u32)block, track, cyl, head, sect);
689
690 /* Check whether the converted CHS can fit.
691 Cylinder: 0-65535
692 Head: 0-15
693 Sector: 1-255*/
694 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
695 goto out_of_range;
696
578 tf->command = ATA_CMD_VERIFY; 697 tf->command = ATA_CMD_VERIFY;
579 698 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
580 tf->device |= (sect >> 24) & 0xf; 699 tf->lbal = sect;
700 tf->lbam = cyl;
701 tf->lbah = cyl >> 8;
702 tf->device |= head;
581 } 703 }
582 704
583 tf->nsect = n_sect & 0xff; 705 return 0;
584 706
585 tf->lbah = (sect >> 16) & 0xff; 707invalid_fld:
586 tf->lbam = (sect >> 8) & 0xff; 708 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
587 tf->lbal = sect & 0xff; 709 /* "Invalid field in cbd" */
710 return 1;
588 711
589 return 0; 712out_of_range:
713 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
714 /* "Logical Block Address out of range" */
715 return 1;
716
717nothing_to_do:
718 qc->scsicmd->result = SAM_STAT_GOOD;
719 return 1;
590} 720}
591 721
592/** 722/**
@@ -612,11 +742,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
612static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) 742static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
613{ 743{
614 struct ata_taskfile *tf = &qc->tf; 744 struct ata_taskfile *tf = &qc->tf;
745 struct ata_device *dev = qc->dev;
746 unsigned int lba = tf->flags & ATA_TFLAG_LBA;
615 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; 747 unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
748 u64 block;
749 u32 n_block;
616 750
617 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
618 tf->protocol = qc->dev->xfer_protocol; 752 tf->protocol = qc->dev->xfer_protocol;
619 tf->device |= ATA_LBA;
620 753
621 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || 754 if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
622 scsicmd[0] == READ_16) { 755 scsicmd[0] == READ_16) {
@@ -626,89 +759,115 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
626 tf->flags |= ATA_TFLAG_WRITE; 759 tf->flags |= ATA_TFLAG_WRITE;
627 } 760 }
628 761
629 if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { 762 /* Calculate the SCSI LBA and transfer length. */
630 if (lba48) { 763 switch (scsicmd[0]) {
631 tf->hob_nsect = scsicmd[7]; 764 case READ_10:
632 tf->hob_lbal = scsicmd[2]; 765 case WRITE_10:
633 766 scsi_10_lba_len(scsicmd, &block, &n_block);
634 qc->nsect = ((unsigned int)scsicmd[7] << 8) | 767 break;
635 scsicmd[8]; 768 case READ_6:
636 } else { 769 case WRITE_6:
637 /* if we don't support LBA48 addressing, the request 770 scsi_6_lba_len(scsicmd, &block, &n_block);
638 * -may- be too large. */
639 if ((scsicmd[2] & 0xf0) || scsicmd[7])
640 return 1;
641
642 /* stores LBA27:24 in lower 4 bits of device reg */
643 tf->device |= scsicmd[2];
644 771
645 qc->nsect = scsicmd[8]; 772 /* for 6-byte r/w commands, transfer length 0
646 } 773 * means 256 blocks of data, not 0 block.
774 */
775 if (!n_block)
776 n_block = 256;
777 break;
778 case READ_16:
779 case WRITE_16:
780 scsi_16_lba_len(scsicmd, &block, &n_block);
781 break;
782 default:
783 DPRINTK("no-byte command\n");
784 goto invalid_fld;
785 }
647 786
648 tf->nsect = scsicmd[8]; 787 /* Check and compose ATA command */
649 tf->lbal = scsicmd[5]; 788 if (!n_block)
650 tf->lbam = scsicmd[4]; 789 /* For 10-byte and 16-byte SCSI R/W commands, transfer
651 tf->lbah = scsicmd[3]; 790 * length 0 means transfer 0 block of data.
791 * However, for ATA R/W commands, sector count 0 means
792 * 256 or 65536 sectors, not 0 sectors as in SCSI.
793 */
794 goto nothing_to_do;
652 795
653 VPRINTK("ten-byte command\n"); 796 if (lba) {
654 if (qc->nsect == 0) /* we don't support length==0 cmds */ 797 if (lba48) {
655 return 1; 798 /* The request -may- be too large for LBA48. */
656 return 0; 799 if ((block >> 48) || (n_block > 65536))
657 } 800 goto out_of_range;
658 801
659 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 802 tf->hob_nsect = (n_block >> 8) & 0xff;
660 qc->nsect = tf->nsect = scsicmd[4];
661 if (!qc->nsect) {
662 qc->nsect = 256;
663 if (lba48)
664 tf->hob_nsect = 1;
665 }
666 803
667 tf->lbal = scsicmd[3]; 804 tf->hob_lbah = (block >> 40) & 0xff;
668 tf->lbam = scsicmd[2]; 805 tf->hob_lbam = (block >> 32) & 0xff;
669 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 806 tf->hob_lbal = (block >> 24) & 0xff;
807 } else {
808 /* LBA28 */
670 809
671 VPRINTK("six-byte command\n"); 810 /* The request -may- be too large for LBA28. */
672 return 0; 811 if ((block >> 28) || (n_block > 256))
673 } 812 goto out_of_range;
674 813
675 if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { 814 tf->device |= (block >> 24) & 0xf;
676 /* rule out impossible LBAs and sector counts */ 815 }
677 if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
678 return 1;
679 816
680 if (lba48) { 817 qc->nsect = n_block;
681 tf->hob_nsect = scsicmd[12]; 818 tf->nsect = n_block & 0xff;
682 tf->hob_lbal = scsicmd[6];
683 tf->hob_lbam = scsicmd[5];
684 tf->hob_lbah = scsicmd[4];
685 819
686 qc->nsect = ((unsigned int)scsicmd[12] << 8) | 820 tf->lbah = (block >> 16) & 0xff;
687 scsicmd[13]; 821 tf->lbam = (block >> 8) & 0xff;
688 } else { 822 tf->lbal = block & 0xff;
689 /* once again, filter out impossible non-zero values */
690 if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
691 (scsicmd[6] & 0xf0))
692 return 1;
693 823
694 /* stores LBA27:24 in lower 4 bits of device reg */ 824 tf->device |= ATA_LBA;
695 tf->device |= scsicmd[6]; 825 } else {
826 /* CHS */
827 u32 sect, head, cyl, track;
828
829 /* The request -may- be too large for CHS addressing. */
830 if ((block >> 28) || (n_block > 256))
831 goto out_of_range;
832
833 /* Convert LBA to CHS */
834 track = (u32)block / dev->sectors;
835 cyl = track / dev->heads;
836 head = track % dev->heads;
837 sect = (u32)block % dev->sectors + 1;
838
839 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
840 (u32)block, track, cyl, head, sect);
841
842 /* Check whether the converted CHS can fit.
843 Cylinder: 0-65535
844 Head: 0-15
845 Sector: 1-255*/
846 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
847 goto out_of_range;
848
849 qc->nsect = n_block;
850 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
851 tf->lbal = sect;
852 tf->lbam = cyl;
853 tf->lbah = cyl >> 8;
854 tf->device |= head;
855 }
696 856
697 qc->nsect = scsicmd[13]; 857 return 0;
698 }
699 858
700 tf->nsect = scsicmd[13]; 859invalid_fld:
701 tf->lbal = scsicmd[9]; 860 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
702 tf->lbam = scsicmd[8]; 861 /* "Invalid field in cbd" */
703 tf->lbah = scsicmd[7]; 862 return 1;
704 863
705 VPRINTK("sixteen-byte command\n"); 864out_of_range:
706 if (qc->nsect == 0) /* we don't support length==0 cmds */ 865 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
707 return 1; 866 /* "Logical Block Address out of range" */
708 return 0; 867 return 1;
709 }
710 868
711 DPRINTK("no-byte command\n"); 869nothing_to_do:
870 qc->scsicmd->result = SAM_STAT_GOOD;
712 return 1; 871 return 1;
713} 872}
714 873
@@ -741,6 +900,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
741 * This function sets up an ata_queued_cmd structure for the 900 * This function sets up an ata_queued_cmd structure for the
742 * SCSI command, and sends that ata_queued_cmd to the hardware. 901 * SCSI command, and sends that ata_queued_cmd to the hardware.
743 * 902 *
903 * The xlat_func argument (actor) returns 0 if ready to execute
904 * ATA command, else 1 to finish translation. If 1 is returned
905 * then cmd->result (and possibly cmd->sense_buffer) are assumed
906 * to be set reflecting an error condition or clean (early)
907 * termination.
908 *
744 * LOCKING: 909 * LOCKING:
745 * spin_lock_irqsave(host_set lock) 910 * spin_lock_irqsave(host_set lock)
746 */ 911 */
@@ -757,7 +922,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
757 922
758 qc = ata_scsi_qc_new(ap, dev, cmd, done); 923 qc = ata_scsi_qc_new(ap, dev, cmd, done);
759 if (!qc) 924 if (!qc)
760 return; 925 goto err_mem;
761 926
762 /* data is present; dma-map it */ 927 /* data is present; dma-map it */
763 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 928 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -765,7 +930,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
765 if (unlikely(cmd->request_bufflen < 1)) { 930 if (unlikely(cmd->request_bufflen < 1)) {
766 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 931 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
767 ap->id, dev->devno); 932 ap->id, dev->devno);
768 goto err_out; 933 goto err_did;
769 } 934 }
770 935
771 if (cmd->use_sg) 936 if (cmd->use_sg)
@@ -780,19 +945,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
780 qc->complete_fn = ata_scsi_qc_complete; 945 qc->complete_fn = ata_scsi_qc_complete;
781 946
782 if (xlat_func(qc, scsicmd)) 947 if (xlat_func(qc, scsicmd))
783 goto err_out; 948 goto early_finish;
784 949
785 /* select device, send command to hardware */ 950 /* select device, send command to hardware */
786 if (ata_qc_issue(qc)) 951 if (ata_qc_issue(qc))
787 goto err_out; 952 goto err_did;
788 953
789 VPRINTK("EXIT\n"); 954 VPRINTK("EXIT\n");
790 return; 955 return;
791 956
792err_out: 957early_finish:
958 ata_qc_free(qc);
959 done(cmd);
960 DPRINTK("EXIT - early finish (good or error)\n");
961 return;
962
963err_did:
793 ata_qc_free(qc); 964 ata_qc_free(qc);
794 ata_bad_cdb(cmd, done); 965err_mem:
795 DPRINTK("EXIT - badcmd\n"); 966 cmd->result = (DID_ERROR << 16);
967 done(cmd);
968 DPRINTK("EXIT - internal\n");
969 return;
796} 970}
797 971
798/** 972/**
@@ -859,7 +1033,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
859 * Mapping the response buffer, calling the command's handler, 1033 * Mapping the response buffer, calling the command's handler,
860 * and handling the handler's return value. This return value 1034 * and handling the handler's return value. This return value
861 * indicates whether the handler wishes the SCSI command to be 1035 * indicates whether the handler wishes the SCSI command to be
862 * completed successfully, or not. 1036 * completed successfully (0), or not (in which case cmd->result
1037 * and sense buffer are assumed to be set).
863 * 1038 *
864 * LOCKING: 1039 * LOCKING:
865 * spin_lock_irqsave(host_set lock) 1040 * spin_lock_irqsave(host_set lock)
@@ -878,12 +1053,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
878 rc = actor(args, rbuf, buflen); 1053 rc = actor(args, rbuf, buflen);
879 ata_scsi_rbuf_put(cmd, rbuf); 1054 ata_scsi_rbuf_put(cmd, rbuf);
880 1055
881 if (rc) 1056 if (rc == 0)
882 ata_bad_cdb(cmd, args->done);
883 else {
884 cmd->result = SAM_STAT_GOOD; 1057 cmd->result = SAM_STAT_GOOD;
885 args->done(cmd); 1058 args->done(cmd);
886 }
887} 1059}
888 1060
889/** 1061/**
@@ -1189,8 +1361,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1189 * in the same manner) 1361 * in the same manner)
1190 */ 1362 */
1191 page_control = scsicmd[2] >> 6; 1363 page_control = scsicmd[2] >> 6;
1192 if ((page_control != 0) && (page_control != 3)) 1364 switch (page_control) {
1193 return 1; 1365 case 0: /* current */
1366 break; /* supported */
1367 case 3: /* saved */
1368 goto saving_not_supp;
1369 case 1: /* changeable */
1370 case 2: /* defaults */
1371 default:
1372 goto invalid_fld;
1373 }
1194 1374
1195 if (six_byte) 1375 if (six_byte)
1196 output_len = 4; 1376 output_len = 4;
@@ -1221,7 +1401,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1221 break; 1401 break;
1222 1402
1223 default: /* invalid page code */ 1403 default: /* invalid page code */
1224 return 1; 1404 goto invalid_fld;
1225 } 1405 }
1226 1406
1227 if (six_byte) { 1407 if (six_byte) {
@@ -1234,6 +1414,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1234 } 1414 }
1235 1415
1236 return 0; 1416 return 0;
1417
1418invalid_fld:
1419 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
1420 /* "Invalid field in cbd" */
1421 return 1;
1422
1423saving_not_supp:
1424 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
1425 /* "Saving parameters not supported" */
1426 return 1;
1237} 1427}
1238 1428
1239/** 1429/**
@@ -1256,10 +1446,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
1256 1446
1257 VPRINTK("ENTER\n"); 1447 VPRINTK("ENTER\n");
1258 1448
1259 if (ata_id_has_lba48(args->id)) 1449 if (ata_id_has_lba(args->id)) {
1260 n_sectors = ata_id_u64(args->id, 100); 1450 if (ata_id_has_lba48(args->id))
1261 else 1451 n_sectors = ata_id_u64(args->id, 100);
1262 n_sectors = ata_id_u32(args->id, 60); 1452 else
1453 n_sectors = ata_id_u32(args->id, 60);
1454 } else {
1455 /* CHS default translation */
1456 n_sectors = args->id[1] * args->id[3] * args->id[6];
1457
1458 if (ata_id_current_chs_valid(args->id))
1459 /* CHS current translation */
1460 n_sectors = ata_id_u32(args->id, 57);
1461 }
1462
1263 n_sectors--; /* ATA TotalUserSectors - 1 */ 1463 n_sectors--; /* ATA TotalUserSectors - 1 */
1264 1464
1265 if (args->cmd->cmnd[0] == READ_CAPACITY) { 1465 if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1323,6 +1523,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1323} 1523}
1324 1524
1325/** 1525/**
1526 * ata_scsi_set_sense - Set SCSI sense data and status
1527 * @cmd: SCSI request to be handled
1528 * @sk: SCSI-defined sense key
1529 * @asc: SCSI-defined additional sense code
1530 * @ascq: SCSI-defined additional sense code qualifier
1531 *
1532 * Helper function that builds a valid fixed format, current
1533 * response code and the given sense key (sk), additional sense
1534 * code (asc) and additional sense code qualifier (ascq) with
1535 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
1536 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
1537 *
1538 * LOCKING:
1539 * Not required
1540 */
1541
1542void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
1543{
1544 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1545
1546 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
1547 cmd->sense_buffer[2] = sk;
1548 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
1549 cmd->sense_buffer[12] = asc;
1550 cmd->sense_buffer[13] = ascq;
1551}
1552
1553/**
1326 * ata_scsi_badcmd - End a SCSI request with an error 1554 * ata_scsi_badcmd - End a SCSI request with an error
1327 * @cmd: SCSI request to be handled 1555 * @cmd: SCSI request to be handled
1328 * @done: SCSI command completion function 1556 * @done: SCSI command completion function
@@ -1340,30 +1568,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
1340void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 1568void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
1341{ 1569{
1342 DPRINTK("ENTER\n"); 1570 DPRINTK("ENTER\n");
1343 cmd->result = SAM_STAT_CHECK_CONDITION; 1571 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
1344
1345 cmd->sense_buffer[0] = 0x70;
1346 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
1347 cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
1348 cmd->sense_buffer[12] = asc;
1349 cmd->sense_buffer[13] = ascq;
1350 1572
1351 done(cmd); 1573 done(cmd);
1352} 1574}
1353 1575
1576void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
1577 struct scsi_cmnd *cmd)
1578{
1579 DECLARE_COMPLETION(wait);
1580 struct ata_queued_cmd *qc;
1581 unsigned long flags;
1582 int rc;
1583
1584 DPRINTK("ATAPI request sense\n");
1585
1586 qc = ata_qc_new_init(ap, dev);
1587 BUG_ON(qc == NULL);
1588
1589 /* FIXME: is this needed? */
1590 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
1591
1592 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
1593 qc->dma_dir = DMA_FROM_DEVICE;
1594
1595 memset(&qc->cdb, 0, ap->cdb_len);
1596 qc->cdb[0] = REQUEST_SENSE;
1597 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
1598
1599 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1600 qc->tf.command = ATA_CMD_PACKET;
1601
1602 qc->tf.protocol = ATA_PROT_ATAPI;
1603 qc->tf.lbam = (8 * 1024) & 0xff;
1604 qc->tf.lbah = (8 * 1024) >> 8;
1605 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
1606
1607 qc->waiting = &wait;
1608 qc->complete_fn = ata_qc_complete_noop;
1609
1610 spin_lock_irqsave(&ap->host_set->lock, flags);
1611 rc = ata_qc_issue(qc);
1612 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1613
1614 if (rc)
1615 ata_port_disable(ap);
1616 else
1617 wait_for_completion(&wait);
1618
1619 DPRINTK("EXIT\n");
1620}
1621
1354static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) 1622static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1355{ 1623{
1356 struct scsi_cmnd *cmd = qc->scsicmd; 1624 struct scsi_cmnd *cmd = qc->scsicmd;
1357 1625
1358 if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { 1626 VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
1627
1628 if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
1629 ata_to_sense_error(qc, drv_stat);
1630
1631 else if (unlikely(drv_stat & ATA_ERR)) {
1359 DPRINTK("request check condition\n"); 1632 DPRINTK("request check condition\n");
1360 1633
1634 /* FIXME: command completion with check condition
1635 * but no sense causes the error handler to run,
1636 * which then issues REQUEST SENSE, fills in the sense
1637 * buffer, and completes the command (for the second
1638 * time). We need to issue REQUEST SENSE some other
1639 * way, to avoid completing the command twice.
1640 */
1361 cmd->result = SAM_STAT_CHECK_CONDITION; 1641 cmd->result = SAM_STAT_CHECK_CONDITION;
1362 1642
1363 qc->scsidone(cmd); 1643 qc->scsidone(cmd);
1364 1644
1365 return 1; 1645 return 1;
1366 } else { 1646 }
1647
1648 else {
1367 u8 *scsicmd = cmd->cmnd; 1649 u8 *scsicmd = cmd->cmnd;
1368 1650
1369 if (scsicmd[0] == INQUIRY) { 1651 if (scsicmd[0] == INQUIRY) {
@@ -1371,15 +1653,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
1371 unsigned int buflen; 1653 unsigned int buflen;
1372 1654
1373 buflen = ata_scsi_rbuf_get(cmd, &buf); 1655 buflen = ata_scsi_rbuf_get(cmd, &buf);
1374 buf[2] = 0x5; 1656
1375 buf[3] = (buf[3] & 0xf0) | 2; 1657 /* ATAPI devices typically report zero for their SCSI version,
1658 * and sometimes deviate from the spec WRT response data
1659 * format. If SCSI version is reported as zero like normal,
1660 * then we make the following fixups: 1) Fake MMC-5 version,
1661 * to indicate to the Linux scsi midlayer this is a modern
1662 * device. 2) Ensure response data format / ATAPI information
1663 * are always correct.
1664 */
1665 /* FIXME: do we ever override EVPD pages and the like, with
1666 * this code?
1667 */
1668 if (buf[2] == 0) {
1669 buf[2] = 0x5;
1670 buf[3] = 0x32;
1671 }
1672
1376 ata_scsi_rbuf_put(cmd, buf); 1673 ata_scsi_rbuf_put(cmd, buf);
1377 } 1674 }
1675
1378 cmd->result = SAM_STAT_GOOD; 1676 cmd->result = SAM_STAT_GOOD;
1379 } 1677 }
1380 1678
1381 qc->scsidone(cmd); 1679 qc->scsidone(cmd);
1382
1383 return 0; 1680 return 0;
1384} 1681}
1385/** 1682/**
@@ -1640,7 +1937,7 @@ void ata_scsi_simulate(u16 *id,
1640 1937
1641 case INQUIRY: 1938 case INQUIRY:
1642 if (scsicmd[1] & 2) /* is CmdDt set? */ 1939 if (scsicmd[1] & 2) /* is CmdDt set? */
1643 ata_bad_cdb(cmd, done); 1940 ata_scsi_invalid_field(cmd, done);
1644 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 1941 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
1645 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 1942 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
1646 else if (scsicmd[2] == 0x00) 1943 else if (scsicmd[2] == 0x00)
@@ -1650,7 +1947,7 @@ void ata_scsi_simulate(u16 *id,
1650 else if (scsicmd[2] == 0x83) 1947 else if (scsicmd[2] == 0x83)
1651 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 1948 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
1652 else 1949 else
1653 ata_bad_cdb(cmd, done); 1950 ata_scsi_invalid_field(cmd, done);
1654 break; 1951 break;
1655 1952
1656 case MODE_SENSE: 1953 case MODE_SENSE:
@@ -1660,7 +1957,7 @@ void ata_scsi_simulate(u16 *id,
1660 1957
1661 case MODE_SELECT: /* unconditionally return */ 1958 case MODE_SELECT: /* unconditionally return */
1662 case MODE_SELECT_10: /* bad-field-in-cdb */ 1959 case MODE_SELECT_10: /* bad-field-in-cdb */
1663 ata_bad_cdb(cmd, done); 1960 ata_scsi_invalid_field(cmd, done);
1664 break; 1961 break;
1665 1962
1666 case READ_CAPACITY: 1963 case READ_CAPACITY:
@@ -1671,7 +1968,7 @@ void ata_scsi_simulate(u16 *id,
1671 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 1968 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
1672 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 1969 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
1673 else 1970 else
1674 ata_bad_cdb(cmd, done); 1971 ata_scsi_invalid_field(cmd, done);
1675 break; 1972 break;
1676 1973
1677 case REPORT_LUNS: 1974 case REPORT_LUNS:
@@ -1683,8 +1980,26 @@ void ata_scsi_simulate(u16 *id,
1683 1980
1684 /* all other commands */ 1981 /* all other commands */
1685 default: 1982 default:
1686 ata_bad_scsiop(cmd, done); 1983 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
1984 /* "Invalid command operation code" */
1985 done(cmd);
1687 break; 1986 break;
1688 } 1987 }
1689} 1988}
1690 1989
1990void ata_scsi_scan_host(struct ata_port *ap)
1991{
1992 struct ata_device *dev;
1993 unsigned int i;
1994
1995 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1996 return;
1997
1998 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1999 dev = &ap->device[i];
2000
2001 if (ata_dev_present(dev))
2002 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
2003 }
2004}
2005
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index d608b3a0f6fe..a18f2ac1d4a1 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -39,6 +39,7 @@ struct ata_scsi_args {
39 39
40/* libata-core.c */ 40/* libata-core.c */
41extern int atapi_enabled; 41extern int atapi_enabled;
42extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 43extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 44 struct ata_device *dev);
44extern void ata_qc_free(struct ata_queued_cmd *qc); 45extern void ata_qc_free(struct ata_queued_cmd *qc);
@@ -51,6 +52,9 @@ extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
51 52
52 53
53/* libata-scsi.c */ 54/* libata-scsi.c */
55extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
56 struct scsi_cmnd *cmd);
57extern void ata_scsi_scan_host(struct ata_port *ap);
54extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); 58extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
55extern int ata_scsi_error(struct Scsi_Host *host); 59extern int ata_scsi_error(struct Scsi_Host *host);
56extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 60extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -76,18 +80,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
76extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, 80extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
77 void (*done)(struct scsi_cmnd *), 81 void (*done)(struct scsi_cmnd *),
78 u8 asc, u8 ascq); 82 u8 asc, u8 ascq);
83extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
84 u8 sk, u8 asc, u8 ascq);
79extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 85extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
80 unsigned int (*actor) (struct ata_scsi_args *args, 86 unsigned int (*actor) (struct ata_scsi_args *args,
81 u8 *rbuf, unsigned int buflen)); 87 u8 *rbuf, unsigned int buflen));
82 88
83static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
84{
85 ata_scsi_badcmd(cmd, done, 0x20, 0x00);
86}
87
88static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
89{
90 ata_scsi_badcmd(cmd, done, 0x24, 0x00);
91}
92
93#endif /* __LIBATA_H__ */ 89#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 1b3148e842af..c3f637395734 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -34,6 +34,7 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/uio.h> 35#include <linux/uio.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <linux/fs.h>
37#include <linux/compat.h> 38#include <linux/compat.h>
38 39
39#include <scsi/scsi.h> 40#include <scsi/scsi.h>
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ea76fe44585e..d457f5673476 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -35,7 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36 36
37#define DRV_NAME "sata_mv" 37#define DRV_NAME "sata_mv"
38#define DRV_VERSION "0.12" 38#define DRV_VERSION "0.24"
39 39
40enum { 40enum {
41 /* BAR's are enumerated in terms of pci_resource_start() terms */ 41 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -55,31 +55,61 @@ enum {
55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 55 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 56 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
57 57
58 MV_Q_CT = 32, 58 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
59 MV_CRQB_SZ = 32,
60 MV_CRPB_SZ = 8,
61 59
62 MV_DMA_BOUNDARY = 0xffffffffU, 60 MV_MAX_Q_DEPTH = 32,
63 SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), 61 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
62
63 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
64 * CRPB needs alignment on a 256B boundary. Size == 256B
65 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
66 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
67 */
68 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
69 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
70 MV_MAX_SG_CT = 176,
71 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
72 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
73
74 /* Our DMA boundary is determined by an ePRD being unable to handle
75 * anything larger than 64KB
76 */
77 MV_DMA_BOUNDARY = 0xffffU,
64 78
65 MV_PORTS_PER_HC = 4, 79 MV_PORTS_PER_HC = 4,
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
67 MV_PORT_HC_SHIFT = 2, 81 MV_PORT_HC_SHIFT = 2,
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ 82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
69 MV_PORT_MASK = 3, 83 MV_PORT_MASK = 3,
70 84
71 /* Host Flags */ 85 /* Host Flags */
72 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 86 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ 88 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
89 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
90 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
91 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
92 MV_FLAG_GLBL_SFT_RST),
75 93
76 chip_504x = 0, 94 chip_504x = 0,
77 chip_508x = 1, 95 chip_508x = 1,
78 chip_604x = 2, 96 chip_604x = 2,
79 chip_608x = 3, 97 chip_608x = 3,
80 98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
81 /* PCI interface registers */ 109 /* PCI interface registers */
82 110
111 PCI_COMMAND_OFS = 0xc00,
112
83 PCI_MAIN_CMD_STS_OFS = 0xd30, 113 PCI_MAIN_CMD_STS_OFS = 0xd30,
84 STOP_PCI_MASTER = (1 << 2), 114 STOP_PCI_MASTER = (1 << 2),
85 PCI_MASTER_EMPTY = (1 << 3), 115 PCI_MASTER_EMPTY = (1 << 3),
@@ -111,20 +141,13 @@ enum {
111 HC_CFG_OFS = 0, 141 HC_CFG_OFS = 0,
112 142
113 HC_IRQ_CAUSE_OFS = 0x14, 143 HC_IRQ_CAUSE_OFS = 0x14,
114 CRBP_DMA_DONE = (1 << 0), /* shift by port # */ 144 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
115 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 145 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
116 DEV_IRQ = (1 << 8), /* shift by port # */ 146 DEV_IRQ = (1 << 8), /* shift by port # */
117 147
118 /* Shadow block registers */ 148 /* Shadow block registers */
119 SHD_PIO_DATA_OFS = 0x100, 149 SHD_BLK_OFS = 0x100,
120 SHD_FEA_ERR_OFS = 0x104, 150 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
121 SHD_SECT_CNT_OFS = 0x108,
122 SHD_LBA_L_OFS = 0x10C,
123 SHD_LBA_M_OFS = 0x110,
124 SHD_LBA_H_OFS = 0x114,
125 SHD_DEV_HD_OFS = 0x118,
126 SHD_CMD_STA_OFS = 0x11C,
127 SHD_CTL_AST_OFS = 0x120,
128 151
129 /* SATA registers */ 152 /* SATA registers */
130 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 153 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
@@ -132,6 +155,11 @@ enum {
132 155
133 /* Port registers */ 156 /* Port registers */
134 EDMA_CFG_OFS = 0, 157 EDMA_CFG_OFS = 0,
158 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
159 EDMA_CFG_NCQ = (1 << 5),
160 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
161 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
162 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
135 163
136 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 164 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
137 EDMA_ERR_IRQ_MASK_OFS = 0xc, 165 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -161,33 +189,85 @@ enum {
161 EDMA_ERR_LNK_DATA_TX | 189 EDMA_ERR_LNK_DATA_TX |
162 EDMA_ERR_TRANS_PROTO), 190 EDMA_ERR_TRANS_PROTO),
163 191
192 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
193 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
194 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
195
196 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
197 EDMA_REQ_Q_PTR_SHIFT = 5,
198
199 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
200 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
201 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
202 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
203 EDMA_RSP_Q_PTR_SHIFT = 3,
204
164 EDMA_CMD_OFS = 0x28, 205 EDMA_CMD_OFS = 0x28,
165 EDMA_EN = (1 << 0), 206 EDMA_EN = (1 << 0),
166 EDMA_DS = (1 << 1), 207 EDMA_DS = (1 << 1),
167 ATA_RST = (1 << 2), 208 ATA_RST = (1 << 2),
168 209
169 /* BDMA is 6xxx part only */ 210 /* Host private flags (hp_flags) */
170 BDMA_CMD_OFS = 0x224, 211 MV_HP_FLAG_MSI = (1 << 0),
171 BDMA_START = (1 << 0),
172 212
173 MV_UNDEF = 0, 213 /* Port private flags (pp_flags) */
214 MV_PP_FLAG_EDMA_EN = (1 << 0),
215 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
174}; 216};
175 217
176struct mv_port_priv { 218/* Command ReQuest Block: 32B */
219struct mv_crqb {
220 u32 sg_addr;
221 u32 sg_addr_hi;
222 u16 ctrl_flags;
223 u16 ata_cmd[11];
224};
177 225
226/* Command ResPonse Block: 8B */
227struct mv_crpb {
228 u16 id;
229 u16 flags;
230 u32 tmstmp;
178}; 231};
179 232
180struct mv_host_priv { 233/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
234struct mv_sg {
235 u32 addr;
236 u32 flags_size;
237 u32 addr_hi;
238 u32 reserved;
239};
181 240
241struct mv_port_priv {
242 struct mv_crqb *crqb;
243 dma_addr_t crqb_dma;
244 struct mv_crpb *crpb;
245 dma_addr_t crpb_dma;
246 struct mv_sg *sg_tbl;
247 dma_addr_t sg_tbl_dma;
248
249 unsigned req_producer; /* cp of req_in_ptr */
250 unsigned rsp_consumer; /* cp of rsp_out_ptr */
251 u32 pp_flags;
252};
253
254struct mv_host_priv {
255 u32 hp_flags;
182}; 256};
183 257
184static void mv_irq_clear(struct ata_port *ap); 258static void mv_irq_clear(struct ata_port *ap);
185static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 259static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
186static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 260static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
261static u8 mv_check_err(struct ata_port *ap);
187static void mv_phy_reset(struct ata_port *ap); 262static void mv_phy_reset(struct ata_port *ap);
188static int mv_master_reset(void __iomem *mmio_base); 263static void mv_host_stop(struct ata_host_set *host_set);
264static int mv_port_start(struct ata_port *ap);
265static void mv_port_stop(struct ata_port *ap);
266static void mv_qc_prep(struct ata_queued_cmd *qc);
267static int mv_qc_issue(struct ata_queued_cmd *qc);
189static irqreturn_t mv_interrupt(int irq, void *dev_instance, 268static irqreturn_t mv_interrupt(int irq, void *dev_instance,
190 struct pt_regs *regs); 269 struct pt_regs *regs);
270static void mv_eng_timeout(struct ata_port *ap);
191static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 271static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
192 272
193static Scsi_Host_Template mv_sht = { 273static Scsi_Host_Template mv_sht = {
@@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = {
196 .ioctl = ata_scsi_ioctl, 276 .ioctl = ata_scsi_ioctl,
197 .queuecommand = ata_scsi_queuecmd, 277 .queuecommand = ata_scsi_queuecmd,
198 .eh_strategy_handler = ata_scsi_error, 278 .eh_strategy_handler = ata_scsi_error,
199 .can_queue = ATA_DEF_QUEUE, 279 .can_queue = MV_USE_Q_DEPTH,
200 .this_id = ATA_SHT_THIS_ID, 280 .this_id = ATA_SHT_THIS_ID,
201 .sg_tablesize = MV_UNDEF, 281 .sg_tablesize = MV_MAX_SG_CT,
202 .max_sectors = ATA_MAX_SECTORS, 282 .max_sectors = ATA_MAX_SECTORS,
203 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 283 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
204 .emulated = ATA_SHT_EMULATED, 284 .emulated = ATA_SHT_EMULATED,
205 .use_clustering = MV_UNDEF, 285 .use_clustering = ATA_SHT_USE_CLUSTERING,
206 .proc_name = DRV_NAME, 286 .proc_name = DRV_NAME,
207 .dma_boundary = MV_DMA_BOUNDARY, 287 .dma_boundary = MV_DMA_BOUNDARY,
208 .slave_configure = ata_scsi_slave_config, 288 .slave_configure = ata_scsi_slave_config,
@@ -216,15 +296,16 @@ static struct ata_port_operations mv_ops = {
216 .tf_load = ata_tf_load, 296 .tf_load = ata_tf_load,
217 .tf_read = ata_tf_read, 297 .tf_read = ata_tf_read,
218 .check_status = ata_check_status, 298 .check_status = ata_check_status,
299 .check_err = mv_check_err,
219 .exec_command = ata_exec_command, 300 .exec_command = ata_exec_command,
220 .dev_select = ata_std_dev_select, 301 .dev_select = ata_std_dev_select,
221 302
222 .phy_reset = mv_phy_reset, 303 .phy_reset = mv_phy_reset,
223 304
224 .qc_prep = ata_qc_prep, 305 .qc_prep = mv_qc_prep,
225 .qc_issue = ata_qc_issue_prot, 306 .qc_issue = mv_qc_issue,
226 307
227 .eng_timeout = ata_eng_timeout, 308 .eng_timeout = mv_eng_timeout,
228 309
229 .irq_handler = mv_interrupt, 310 .irq_handler = mv_interrupt,
230 .irq_clear = mv_irq_clear, 311 .irq_clear = mv_irq_clear,
@@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = {
232 .scr_read = mv_scr_read, 313 .scr_read = mv_scr_read,
233 .scr_write = mv_scr_write, 314 .scr_write = mv_scr_write,
234 315
235 .port_start = ata_port_start, 316 .port_start = mv_port_start,
236 .port_stop = ata_port_stop, 317 .port_stop = mv_port_stop,
237 .host_stop = ata_host_stop, 318 .host_stop = mv_host_stop,
238}; 319};
239 320
240static struct ata_port_info mv_port_info[] = { 321static struct ata_port_info mv_port_info[] = {
241 { /* chip_504x */ 322 { /* chip_504x */
242 .sht = &mv_sht, 323 .sht = &mv_sht,
243 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 324 .host_flags = MV_COMMON_FLAGS,
244 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 325 .pio_mask = 0x1f, /* pio0-4 */
245 .pio_mask = 0x1f, /* pio4-0 */ 326 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
246 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
247 .port_ops = &mv_ops, 327 .port_ops = &mv_ops,
248 }, 328 },
249 { /* chip_508x */ 329 { /* chip_508x */
250 .sht = &mv_sht, 330 .sht = &mv_sht,
251 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 331 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
252 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 332 .pio_mask = 0x1f, /* pio0-4 */
253 MV_FLAG_DUAL_HC), 333 .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */
254 .pio_mask = 0x1f, /* pio4-0 */
255 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
256 .port_ops = &mv_ops, 334 .port_ops = &mv_ops,
257 }, 335 },
258 { /* chip_604x */ 336 { /* chip_604x */
259 .sht = &mv_sht, 337 .sht = &mv_sht,
260 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
261 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 339 .pio_mask = 0x1f, /* pio0-4 */
262 MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), 340 .udma_mask = 0x7f, /* udma0-6 */
263 .pio_mask = 0x1f, /* pio4-0 */
264 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
265 .port_ops = &mv_ops, 341 .port_ops = &mv_ops,
266 }, 342 },
267 { /* chip_608x */ 343 { /* chip_608x */
268 .sht = &mv_sht, 344 .sht = &mv_sht,
269 .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 345 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
270 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 346 MV_FLAG_DUAL_HC),
271 MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | 347 .pio_mask = 0x1f, /* pio0-4 */
272 MV_FLAG_BDMA), 348 .udma_mask = 0x7f, /* udma0-6 */
273 .pio_mask = 0x1f, /* pio4-0 */
274 .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */
275 .port_ops = &mv_ops, 349 .port_ops = &mv_ops,
276 }, 350 },
277}; 351};
@@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
306 (void) readl(addr); /* flush to avoid PCI posted write */ 380 (void) readl(addr); /* flush to avoid PCI posted write */
307} 381}
308 382
309static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio)
310{
311 return ((void __iomem *)((unsigned long)port_mmio &
312 (unsigned long)SATAHC_MASK));
313}
314
315static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 383static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
316{ 384{
317 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 385 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
@@ -329,24 +397,150 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap)
329 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 397 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
330} 398}
331 399
332static inline int mv_get_hc_count(unsigned long flags) 400static inline int mv_get_hc_count(unsigned long hp_flags)
333{ 401{
334 return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); 402 return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
335} 403}
336 404
337static inline int mv_is_edma_active(struct ata_port *ap) 405static void mv_irq_clear(struct ata_port *ap)
406{
407}
408
409/**
410 * mv_start_dma - Enable eDMA engine
411 * @base: port base address
412 * @pp: port private data
413 *
414 * Verify the local cache of the eDMA state is accurate with an
415 * assert.
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
421{
422 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
423 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
424 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
425 }
426 assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
427}
428
429/**
430 * mv_stop_dma - Disable eDMA engine
431 * @ap: ATA channel to manipulate
432 *
433 * Verify the local cache of the eDMA state is accurate with an
434 * assert.
435 *
436 * LOCKING:
437 * Inherited from caller.
438 */
439static void mv_stop_dma(struct ata_port *ap)
338{ 440{
339 void __iomem *port_mmio = mv_ap_base(ap); 441 void __iomem *port_mmio = mv_ap_base(ap);
340 return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 442 struct mv_port_priv *pp = ap->private_data;
443 u32 reg;
444 int i;
445
446 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
447 /* Disable EDMA if active. The disable bit auto clears.
448 */
449 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
450 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
451 } else {
452 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
453 }
454
455 /* now properly wait for the eDMA to stop */
456 for (i = 1000; i > 0; i--) {
457 reg = readl(port_mmio + EDMA_CMD_OFS);
458 if (!(EDMA_EN & reg)) {
459 break;
460 }
461 udelay(100);
462 }
463
464 if (EDMA_EN & reg) {
465 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
466 /* FIXME: Consider doing a reset here to recover */
467 }
341} 468}
342 469
343static inline int mv_port_bdma_capable(struct ata_port *ap) 470#ifdef ATA_DEBUG
471static void mv_dump_mem(void __iomem *start, unsigned bytes)
344{ 472{
345 return (ap->flags & MV_FLAG_BDMA); 473 int b, w;
474 for (b = 0; b < bytes; ) {
475 DPRINTK("%p: ", start + b);
476 for (w = 0; b < bytes && w < 4; w++) {
477 printk("%08x ",readl(start + b));
478 b += sizeof(u32);
479 }
480 printk("\n");
481 }
346} 482}
483#endif
347 484
348static void mv_irq_clear(struct ata_port *ap) 485static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
486{
487#ifdef ATA_DEBUG
488 int b, w;
489 u32 dw;
490 for (b = 0; b < bytes; ) {
491 DPRINTK("%02x: ", b);
492 for (w = 0; b < bytes && w < 4; w++) {
493 (void) pci_read_config_dword(pdev,b,&dw);
494 printk("%08x ",dw);
495 b += sizeof(u32);
496 }
497 printk("\n");
498 }
499#endif
500}
501static void mv_dump_all_regs(void __iomem *mmio_base, int port,
502 struct pci_dev *pdev)
349{ 503{
504#ifdef ATA_DEBUG
505 void __iomem *hc_base = mv_hc_base(mmio_base,
506 port >> MV_PORT_HC_SHIFT);
507 void __iomem *port_base;
508 int start_port, num_ports, p, start_hc, num_hcs, hc;
509
510 if (0 > port) {
511 start_hc = start_port = 0;
512 num_ports = 8; /* shld be benign for 4 port devs */
513 num_hcs = 2;
514 } else {
515 start_hc = port >> MV_PORT_HC_SHIFT;
516 start_port = port;
517 num_ports = num_hcs = 1;
518 }
519 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
520 num_ports > 1 ? num_ports - 1 : start_port);
521
522 if (NULL != pdev) {
523 DPRINTK("PCI config space regs:\n");
524 mv_dump_pci_cfg(pdev, 0x68);
525 }
526 DPRINTK("PCI regs:\n");
527 mv_dump_mem(mmio_base+0xc00, 0x3c);
528 mv_dump_mem(mmio_base+0xd00, 0x34);
529 mv_dump_mem(mmio_base+0xf00, 0x4);
530 mv_dump_mem(mmio_base+0x1d00, 0x6c);
531 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
532 hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
533 DPRINTK("HC regs (HC %i):\n", hc);
534 mv_dump_mem(hc_base, 0x1c);
535 }
536 for (p = start_port; p < start_port + num_ports; p++) {
537 port_base = mv_port_base(mmio_base, p);
538 DPRINTK("EDMA regs (port %i):\n",p);
539 mv_dump_mem(port_base, 0x54);
540 DPRINTK("SATA regs (port %i):\n",p);
541 mv_dump_mem(port_base+0x300, 0x60);
542 }
543#endif
350} 544}
351 545
352static unsigned int mv_scr_offset(unsigned int sc_reg_in) 546static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
389 } 583 }
390} 584}
391 585
392static int mv_master_reset(void __iomem *mmio_base) 586/**
587 * mv_global_soft_reset - Perform the 6xxx global soft reset
588 * @mmio_base: base address of the HBA
589 *
590 * This routine only applies to 6xxx parts.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595static int mv_global_soft_reset(void __iomem *mmio_base)
393{ 596{
394 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; 597 void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS;
395 int i, rc = 0; 598 int i, rc = 0;
396 u32 t; 599 u32 t;
397 600
398 VPRINTK("ENTER\n");
399
400 /* Following procedure defined in PCI "main command and status 601 /* Following procedure defined in PCI "main command and status
401 * register" table. 602 * register" table.
402 */ 603 */
403 t = readl(reg); 604 t = readl(reg);
404 writel(t | STOP_PCI_MASTER, reg); 605 writel(t | STOP_PCI_MASTER, reg);
405 606
406 for (i = 0; i < 100; i++) { 607 for (i = 0; i < 1000; i++) {
407 msleep(10); 608 udelay(1);
408 t = readl(reg); 609 t = readl(reg);
409 if (PCI_MASTER_EMPTY & t) { 610 if (PCI_MASTER_EMPTY & t) {
410 break; 611 break;
411 } 612 }
412 } 613 }
413 if (!(PCI_MASTER_EMPTY & t)) { 614 if (!(PCI_MASTER_EMPTY & t)) {
414 printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); 615 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
415 rc = 1; /* broken HW? */ 616 rc = 1;
416 goto done; 617 goto done;
417 } 618 }
418 619
@@ -425,39 +626,398 @@ static int mv_master_reset(void __iomem *mmio_base)
425 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 626 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
426 627
427 if (!(GLOB_SFT_RST & t)) { 628 if (!(GLOB_SFT_RST & t)) {
428 printk(KERN_ERR DRV_NAME "can't set global reset\n"); 629 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
429 rc = 1; /* broken HW? */ 630 rc = 1;
430 goto done; 631 goto done;
431 } 632 }
432 633
433 /* clear reset */ 634 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
434 i = 5; 635 i = 5;
435 do { 636 do {
436 writel(t & ~GLOB_SFT_RST, reg); 637 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
437 t = readl(reg); 638 t = readl(reg);
438 udelay(1); 639 udelay(1);
439 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 640 } while ((GLOB_SFT_RST & t) && (i-- > 0));
440 641
441 if (GLOB_SFT_RST & t) { 642 if (GLOB_SFT_RST & t) {
442 printk(KERN_ERR DRV_NAME "can't clear global reset\n"); 643 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
443 rc = 1; /* broken HW? */ 644 rc = 1;
444 } 645 }
445 646done:
446 done:
447 VPRINTK("EXIT, rc = %i\n", rc);
448 return rc; 647 return rc;
449} 648}
450 649
451static void mv_err_intr(struct ata_port *ap) 650/**
651 * mv_host_stop - Host specific cleanup/stop routine.
652 * @host_set: host data structure
653 *
654 * Disable ints, cleanup host memory, call general purpose
655 * host_stop.
656 *
657 * LOCKING:
658 * Inherited from caller.
659 */
660static void mv_host_stop(struct ata_host_set *host_set)
452{ 661{
453 void __iomem *port_mmio; 662 struct mv_host_priv *hpriv = host_set->private_data;
454 u32 edma_err_cause, serr = 0; 663 struct pci_dev *pdev = to_pci_dev(host_set->dev);
664
665 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
666 pci_disable_msi(pdev);
667 } else {
668 pci_intx(pdev, 0);
669 }
670 kfree(hpriv);
671 ata_host_stop(host_set);
672}
673
674/**
675 * mv_port_start - Port specific init/start routine.
676 * @ap: ATA channel to manipulate
677 *
678 * Allocate and point to DMA memory, init port private memory,
679 * zero indices.
680 *
681 * LOCKING:
682 * Inherited from caller.
683 */
684static int mv_port_start(struct ata_port *ap)
685{
686 struct device *dev = ap->host_set->dev;
687 struct mv_port_priv *pp;
688 void __iomem *port_mmio = mv_ap_base(ap);
689 void *mem;
690 dma_addr_t mem_dma;
691
692 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
693 if (!pp) {
694 return -ENOMEM;
695 }
696 memset(pp, 0, sizeof(*pp));
697
698 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
699 GFP_KERNEL);
700 if (!mem) {
701 kfree(pp);
702 return -ENOMEM;
703 }
704 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
705
706 /* First item in chunk of DMA memory:
707 * 32-slot command request table (CRQB), 32 bytes each in size
708 */
709 pp->crqb = mem;
710 pp->crqb_dma = mem_dma;
711 mem += MV_CRQB_Q_SZ;
712 mem_dma += MV_CRQB_Q_SZ;
713
714 /* Second item:
715 * 32-slot command response table (CRPB), 8 bytes each in size
716 */
717 pp->crpb = mem;
718 pp->crpb_dma = mem_dma;
719 mem += MV_CRPB_Q_SZ;
720 mem_dma += MV_CRPB_Q_SZ;
721
722 /* Third item:
723 * Table of scatter-gather descriptors (ePRD), 16 bytes each
724 */
725 pp->sg_tbl = mem;
726 pp->sg_tbl_dma = mem_dma;
727
728 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
729 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
730
731 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
732 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
733 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
734
735 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
736 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
737
738 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
739 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
740 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
741
742 pp->req_producer = pp->rsp_consumer = 0;
743
744 /* Don't turn on EDMA here...do it before DMA commands only. Else
745 * we'll be unable to send non-data, PIO, etc due to restricted access
746 * to shadow regs.
747 */
748 ap->private_data = pp;
749 return 0;
750}
751
752/**
753 * mv_port_stop - Port specific cleanup/stop routine.
754 * @ap: ATA channel to manipulate
755 *
756 * Stop DMA, cleanup port memory.
757 *
758 * LOCKING:
759 * This routine uses the host_set lock to protect the DMA stop.
760 */
761static void mv_port_stop(struct ata_port *ap)
762{
763 struct device *dev = ap->host_set->dev;
764 struct mv_port_priv *pp = ap->private_data;
765 unsigned long flags;
766
767 spin_lock_irqsave(&ap->host_set->lock, flags);
768 mv_stop_dma(ap);
769 spin_unlock_irqrestore(&ap->host_set->lock, flags);
770
771 ap->private_data = NULL;
772 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
773 kfree(pp);
774}
775
776/**
777 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
778 * @qc: queued command whose SG list to source from
779 *
780 * Populate the SG list and mark the last entry.
781 *
782 * LOCKING:
783 * Inherited from caller.
784 */
785static void mv_fill_sg(struct ata_queued_cmd *qc)
786{
787 struct mv_port_priv *pp = qc->ap->private_data;
788 unsigned int i;
789
790 for (i = 0; i < qc->n_elem; i++) {
791 u32 sg_len;
792 dma_addr_t addr;
793
794 addr = sg_dma_address(&qc->sg[i]);
795 sg_len = sg_dma_len(&qc->sg[i]);
796
797 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
798 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
799 assert(0 == (sg_len & ~MV_DMA_BOUNDARY));
800 pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len);
801 }
802 if (0 < qc->n_elem) {
803 pp->sg_tbl[qc->n_elem - 1].flags_size |= EPRD_FLAG_END_OF_TBL;
804 }
805}
806
807static inline unsigned mv_inc_q_index(unsigned *index)
808{
809 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
810 return *index;
811}
812
813static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
814{
815 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
816 (last ? CRQB_CMD_LAST : 0);
817}
455 818
456 /* bug here b/c we got an err int on a port we don't know about, 819/**
457 * so there's no way to clear it 820 * mv_qc_prep - Host specific command preparation.
821 * @qc: queued command to prepare
822 *
823 * This routine simply redirects to the general purpose routine
824 * if command is not DMA. Else, it handles prep of the CRQB
825 * (command request block), does some sanity checking, and calls
826 * the SG load routine.
827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
831static void mv_qc_prep(struct ata_queued_cmd *qc)
832{
833 struct ata_port *ap = qc->ap;
834 struct mv_port_priv *pp = ap->private_data;
835 u16 *cw;
836 struct ata_taskfile *tf;
837 u16 flags = 0;
838
839 if (ATA_PROT_DMA != qc->tf.protocol) {
840 return;
841 }
842
843 /* the req producer index should be the same as we remember it */
844 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
845 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
846 pp->req_producer);
847
848 /* Fill in command request block
458 */ 849 */
459 BUG_ON(NULL == ap); 850 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
460 port_mmio = mv_ap_base(ap); 851 flags |= CRQB_FLAG_READ;
852 }
853 assert(MV_MAX_Q_DEPTH > qc->tag);
854 flags |= qc->tag << CRQB_TAG_SHIFT;
855
856 pp->crqb[pp->req_producer].sg_addr =
857 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
858 pp->crqb[pp->req_producer].sg_addr_hi =
859 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
860 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
861
862 cw = &pp->crqb[pp->req_producer].ata_cmd[0];
863 tf = &qc->tf;
864
865 /* Sadly, the CRQB cannot accomodate all registers--there are
866 * only 11 bytes...so we must pick and choose required
867 * registers based on the command. So, we drop feature and
868 * hob_feature for [RW] DMA commands, but they are needed for
869 * NCQ. NCQ will drop hob_nsect.
870 */
871 switch (tf->command) {
872 case ATA_CMD_READ:
873 case ATA_CMD_READ_EXT:
874 case ATA_CMD_WRITE:
875 case ATA_CMD_WRITE_EXT:
876 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
877 break;
878#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
879 case ATA_CMD_FPDMA_READ:
880 case ATA_CMD_FPDMA_WRITE:
881 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
882 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
883 break;
884#endif /* FIXME: remove this line when NCQ added */
885 default:
886 /* The only other commands EDMA supports in non-queued and
887 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
888 * of which are defined/used by Linux. If we get here, this
889 * driver needs work.
890 *
891 * FIXME: modify libata to give qc_prep a return value and
892 * return error here.
893 */
894 BUG_ON(tf->command);
895 break;
896 }
897 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
898 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
899 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
900 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
901 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
902 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
903 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
904 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
905 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
906
907 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
908 return;
909 }
910 mv_fill_sg(qc);
911}
912
913/**
914 * mv_qc_issue - Initiate a command to the host
915 * @qc: queued command to start
916 *
917 * This routine simply redirects to the general purpose routine
918 * if command is not DMA. Else, it sanity checks our local
919 * caches of the request producer/consumer indices then enables
920 * DMA and bumps the request producer index.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925static int mv_qc_issue(struct ata_queued_cmd *qc)
926{
927 void __iomem *port_mmio = mv_ap_base(qc->ap);
928 struct mv_port_priv *pp = qc->ap->private_data;
929 u32 in_ptr;
930
931 if (ATA_PROT_DMA != qc->tf.protocol) {
932 /* We're about to send a non-EDMA capable command to the
933 * port. Turn off EDMA so there won't be problems accessing
934 * shadow block, etc registers.
935 */
936 mv_stop_dma(qc->ap);
937 return ata_qc_issue_prot(qc);
938 }
939
940 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
941
942 /* the req producer index should be the same as we remember it */
943 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
944 pp->req_producer);
945 /* until we do queuing, the queue should be empty at this point */
946 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
947 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
948 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
949
950 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
951
952 mv_start_dma(port_mmio, pp);
953
954 /* and write the request in pointer to kick the EDMA to life */
955 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
956 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
957 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
958
959 return 0;
960}
961
962/**
963 * mv_get_crpb_status - get status from most recently completed cmd
964 * @ap: ATA channel to manipulate
965 *
966 * This routine is for use when the port is in DMA mode, when it
967 * will be using the CRPB (command response block) method of
968 * returning command completion information. We assert indices
969 * are good, grab status, and bump the response consumer index to
970 * prove that we're up to date.
971 *
972 * LOCKING:
973 * Inherited from caller.
974 */
975static u8 mv_get_crpb_status(struct ata_port *ap)
976{
977 void __iomem *port_mmio = mv_ap_base(ap);
978 struct mv_port_priv *pp = ap->private_data;
979 u32 out_ptr;
980
981 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
982
983 /* the response consumer index should be the same as we remember it */
984 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
985 pp->rsp_consumer);
986
987 /* increment our consumer index... */
988 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
989
990 /* and, until we do NCQ, there should only be 1 CRPB waiting */
991 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
992 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
993 pp->rsp_consumer);
994
995 /* write out our inc'd consumer index so EDMA knows we're caught up */
996 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
997 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
998 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
999
1000 /* Return ATA status register for completed CRPB */
1001 return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
1002}
1003
1004/**
1005 * mv_err_intr - Handle error interrupts on the port
1006 * @ap: ATA channel to manipulate
1007 *
1008 * In most cases, just clear the interrupt and move on. However,
1009 * some cases require an eDMA reset, which is done right before
1010 * the COMRESET in mv_phy_reset(). The SERR case requires a
1011 * clear of pending errors in the SATA SERROR register. Finally,
1012 * if the port disabled DMA, update our cached copy to match.
1013 *
1014 * LOCKING:
1015 * Inherited from caller.
1016 */
1017static void mv_err_intr(struct ata_port *ap)
1018{
1019 void __iomem *port_mmio = mv_ap_base(ap);
1020 u32 edma_err_cause, serr = 0;
461 1021
462 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1022 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
463 1023
@@ -465,8 +1025,12 @@ static void mv_err_intr(struct ata_port *ap)
465 serr = scr_read(ap, SCR_ERROR); 1025 serr = scr_read(ap, SCR_ERROR);
466 scr_write_flush(ap, SCR_ERROR, serr); 1026 scr_write_flush(ap, SCR_ERROR, serr);
467 } 1027 }
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", 1028 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
469 ap->port_no, edma_err_cause, serr); 1029 struct mv_port_priv *pp = ap->private_data;
1030 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1031 }
1032 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1033 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
470 1034
471 /* Clear EDMA now that SERR cleanup done */ 1035 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1036 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -477,7 +1041,21 @@ static void mv_err_intr(struct ata_port *ap)
477 } 1041 }
478} 1042}
479 1043
480/* Handle any outstanding interrupts in a single SATAHC 1044/**
1045 * mv_host_intr - Handle all interrupts on the given host controller
1046 * @host_set: host specific structure
1047 * @relevant: port error bits relevant to this host controller
1048 * @hc: which host controller we're to look at
1049 *
1050 * Read then write clear the HC interrupt status then walk each
1051 * port connected to the HC and see if it needs servicing. Port
1052 * success ints are reported in the HC interrupt status reg, the
1053 * port error ints are reported in the higher level main
1054 * interrupt status register and thus are passed in via the
1055 * 'relevant' argument.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
481 */ 1059 */
482static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1060static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
483 unsigned int hc) 1061 unsigned int hc)
@@ -487,8 +1065,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
487 struct ata_port *ap; 1065 struct ata_port *ap;
488 struct ata_queued_cmd *qc; 1066 struct ata_queued_cmd *qc;
489 u32 hc_irq_cause; 1067 u32 hc_irq_cause;
490 int shift, port, port0, hard_port; 1068 int shift, port, port0, hard_port, handled;
491 u8 ata_status; 1069 u8 ata_status = 0;
492 1070
493 if (hc == 0) { 1071 if (hc == 0) {
494 port0 = 0; 1072 port0 = 0;
@@ -499,7 +1077,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
499 /* we'll need the HC success int register in most cases */ 1077 /* we'll need the HC success int register in most cases */
500 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1078 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
501 if (hc_irq_cause) { 1079 if (hc_irq_cause) {
502 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 1080 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
503 } 1081 }
504 1082
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1083 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
@@ -508,35 +1086,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
508 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1086 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
509 ap = host_set->ports[port]; 1087 ap = host_set->ports[port];
510 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1088 hard_port = port & MV_PORT_MASK; /* range 0-3 */
511 ata_status = 0xffU; 1089 handled = 0; /* ensure ata_status is set if handled++ */
512 1090
513 if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { 1091 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
514 BUG_ON(NULL == ap); 1092 /* new CRPB on the queue; just one at a time until NCQ
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */ 1093 */
516 /* This is needed to clear the ATA INTRQ. 1094 ata_status = mv_get_crpb_status(ap);
517 * FIXME: don't read the status reg in EDMA mode! 1095 handled++;
1096 } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1097 /* received ATA IRQ; read the status reg to clear INTRQ
518 */ 1098 */
519 ata_status = readb((void __iomem *) 1099 ata_status = readb((void __iomem *)
520 ap->ioaddr.status_addr); 1100 ap->ioaddr.status_addr);
1101 handled++;
521 } 1102 }
522 1103
523 shift = port * 2; 1104 shift = port << 1; /* (port * 2) */
524 if (port >= MV_PORTS_PER_HC) { 1105 if (port >= MV_PORTS_PER_HC) {
525 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1106 shift++; /* skip bit 8 in the HC Main IRQ reg */
526 } 1107 }
527 if ((PORT0_ERR << shift) & relevant) { 1108 if ((PORT0_ERR << shift) & relevant) {
528 mv_err_intr(ap); 1109 mv_err_intr(ap);
529 /* FIXME: smart to OR in ATA_ERR? */ 1110 /* OR in ATA_ERR to ensure libata knows we took one */
530 ata_status = readb((void __iomem *) 1111 ata_status = readb((void __iomem *)
531 ap->ioaddr.status_addr) | ATA_ERR; 1112 ap->ioaddr.status_addr) | ATA_ERR;
1113 handled++;
532 } 1114 }
533 1115
534 if (ap) { 1116 if (handled && ap) {
535 qc = ata_qc_from_tag(ap, ap->active_tag); 1117 qc = ata_qc_from_tag(ap, ap->active_tag);
536 if (NULL != qc) { 1118 if (NULL != qc) {
537 VPRINTK("port %u IRQ found for qc, " 1119 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port,ata_status); 1120 "ata_status 0x%x\n", port,ata_status);
539 BUG_ON(0xffU == ata_status);
540 /* mark qc status appropriately */ 1121 /* mark qc status appropriately */
541 ata_qc_complete(qc, ata_status); 1122 ata_qc_complete(qc, ata_status);
542 } 1123 }
@@ -545,17 +1126,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
545 VPRINTK("EXIT\n"); 1126 VPRINTK("EXIT\n");
546} 1127}
547 1128
1129/**
1130 * mv_interrupt -
1131 * @irq: unused
1132 * @dev_instance: private data; in this case the host structure
1133 * @regs: unused
1134 *
1135 * Read the read only register to determine if any host
1136 * controllers have pending interrupts. If so, call lower level
1137 * routine to handle. Also check for PCI errors which are only
1138 * reported here.
1139 *
1140 * LOCKING:
1141 * This routine holds the host_set lock while processing pending
1142 * interrupts.
1143 */
548static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1144static irqreturn_t mv_interrupt(int irq, void *dev_instance,
549 struct pt_regs *regs) 1145 struct pt_regs *regs)
550{ 1146{
551 struct ata_host_set *host_set = dev_instance; 1147 struct ata_host_set *host_set = dev_instance;
552 unsigned int hc, handled = 0, n_hcs; 1148 unsigned int hc, handled = 0, n_hcs;
553 void __iomem *mmio; 1149 void __iomem *mmio = host_set->mmio_base;
554 u32 irq_stat; 1150 u32 irq_stat;
555 1151
556 mmio = host_set->mmio_base;
557 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1152 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
558 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
559 1153
560 /* check the cases where we either have nothing pending or have read 1154 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault 1155 * a bogus register value which can indicate HW removal or PCI fault
@@ -564,64 +1158,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
564 return IRQ_NONE; 1158 return IRQ_NONE;
565 } 1159 }
566 1160
1161 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
567 spin_lock(&host_set->lock); 1162 spin_lock(&host_set->lock);
568 1163
569 for (hc = 0; hc < n_hcs; hc++) { 1164 for (hc = 0; hc < n_hcs; hc++) {
570 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1165 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
571 if (relevant) { 1166 if (relevant) {
572 mv_host_intr(host_set, relevant, hc); 1167 mv_host_intr(host_set, relevant, hc);
573 handled = 1; 1168 handled++;
574 } 1169 }
575 } 1170 }
576 if (PCI_ERR & irq_stat) { 1171 if (PCI_ERR & irq_stat) {
577 /* FIXME: these are all masked by default, but still need 1172 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
578 * to recover from them properly. 1173 readl(mmio + PCI_IRQ_CAUSE_OFS));
579 */
580 }
581 1174
1175 DPRINTK("All regs @ PCI error\n");
1176 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1177
1178 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1179 handled++;
1180 }
582 spin_unlock(&host_set->lock); 1181 spin_unlock(&host_set->lock);
583 1182
584 return IRQ_RETVAL(handled); 1183 return IRQ_RETVAL(handled);
585} 1184}
586 1185
1186/**
1187 * mv_check_err - Return the error shadow register to caller.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Marvell requires DMA to be stopped before accessing shadow
1191 * registers. So we do that, then return the needed register.
1192 *
1193 * LOCKING:
1194 * Inherited from caller. FIXME: protect mv_stop_dma with lock?
1195 */
1196static u8 mv_check_err(struct ata_port *ap)
1197{
1198 mv_stop_dma(ap); /* can't read shadow regs if DMA on */
1199 return readb((void __iomem *) ap->ioaddr.error_addr);
1200}
1201
1202/**
1203 * mv_phy_reset - Perform eDMA reset followed by COMRESET
1204 * @ap: ATA channel to manipulate
1205 *
1206 * Part of this is taken from __sata_phy_reset and modified to
1207 * not sleep since this routine gets called from interrupt level.
1208 *
1209 * LOCKING:
1210 * Inherited from caller. This is coded to safe to call at
1211 * interrupt level, i.e. it does not sleep.
1212 */
587static void mv_phy_reset(struct ata_port *ap) 1213static void mv_phy_reset(struct ata_port *ap)
588{ 1214{
589 void __iomem *port_mmio = mv_ap_base(ap); 1215 void __iomem *port_mmio = mv_ap_base(ap);
590 struct ata_taskfile tf; 1216 struct ata_taskfile tf;
591 struct ata_device *dev = &ap->device[0]; 1217 struct ata_device *dev = &ap->device[0];
592 u32 edma = 0, bdma; 1218 unsigned long timeout;
593 1219
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1220 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
595 1221
596 edma = readl(port_mmio + EDMA_CMD_OFS); 1222 mv_stop_dma(ap);
597 if (EDMA_EN & edma) {
598 /* disable EDMA if active */
599 edma &= ~EDMA_EN;
600 writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS);
601 udelay(1);
602 } else if (mv_port_bdma_capable(ap) &&
603 (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) {
604 /* disable BDMA if active */
605 writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS);
606 }
607 1223
608 writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); 1224 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
609 udelay(25); /* allow reset propagation */ 1225 udelay(25); /* allow reset propagation */
610 1226
611 /* Spec never mentions clearing the bit. Marvell's driver does 1227 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however. 1228 * clear the bit, however.
613 */ 1229 */
614 writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); 1230 writelfl(0, port_mmio + EDMA_CMD_OFS);
615 1231
616 VPRINTK("Done. Now calling __sata_phy_reset()\n"); 1232 VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1233 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1234 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
617 1235
618 /* proceed to init communications via the scr_control reg */ 1236 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap); 1237 scr_write_flush(ap, SCR_CONTROL, 0x301);
1238 mdelay(1);
1239 scr_write_flush(ap, SCR_CONTROL, 0x300);
1240 timeout = jiffies + (HZ * 1);
1241 do {
1242 mdelay(10);
1243 if ((scr_read(ap, SCR_STATUS) & 0xf) != 1)
1244 break;
1245 } while (time_before(jiffies, timeout));
620 1246
621 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1247 VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
622 VPRINTK("Port disabled pre-sig. Exiting.\n"); 1248 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1249 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1250
1251 if (sata_dev_present(ap)) {
1252 ata_port_probe(ap);
1253 } else {
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1255 ap->id, scr_read(ap, SCR_STATUS));
1256 ata_port_disable(ap);
623 return; 1257 return;
624 } 1258 }
1259 ap->cbl = ATA_CBL_SATA;
625 1260
626 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1261 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
627 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1262 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
@@ -636,37 +1271,118 @@ static void mv_phy_reset(struct ata_port *ap)
636 VPRINTK("EXIT\n"); 1271 VPRINTK("EXIT\n");
637} 1272}
638 1273
639static void mv_port_init(struct ata_ioports *port, unsigned long base) 1274/**
1275 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1276 * @ap: ATA channel to manipulate
1277 *
1278 * Intent is to clear all pending error conditions, reset the
1279 * chip/bus, fail the command, and move on.
1280 *
1281 * LOCKING:
1282 * This routine holds the host_set lock while failing the command.
1283 */
1284static void mv_eng_timeout(struct ata_port *ap)
1285{
1286 struct ata_queued_cmd *qc;
1287 unsigned long flags;
1288
1289 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1290 DPRINTK("All regs @ start of eng_timeout\n");
1291 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1292 to_pci_dev(ap->host_set->dev));
1293
1294 qc = ata_qc_from_tag(ap, ap->active_tag);
1295 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1296 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1297 &qc->scsicmd->cmnd);
1298
1299 mv_err_intr(ap);
1300 mv_phy_reset(ap);
1301
1302 if (!qc) {
1303 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
1304 ap->id);
1305 } else {
1306 /* hack alert! We cannot use the supplied completion
1307 * function from inside the ->eh_strategy_handler() thread.
1308 * libata is the only user of ->eh_strategy_handler() in
1309 * any kernel, so the default scsi_done() assumes it is
1310 * not being called from the SCSI EH.
1311 */
1312 spin_lock_irqsave(&ap->host_set->lock, flags);
1313 qc->scsidone = scsi_finish_command;
1314 ata_qc_complete(qc, ATA_ERR);
1315 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1316 }
1317}
1318
1319/**
1320 * mv_port_init - Perform some early initialization on a single port.
1321 * @port: libata data structure storing shadow register addresses
1322 * @port_mmio: base address of the port
1323 *
1324 * Initialize shadow register mmio addresses, clear outstanding
1325 * interrupts on the port, and unmask interrupts for the future
1326 * start of the port.
1327 *
1328 * LOCKING:
1329 * Inherited from caller.
1330 */
1331static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
640{ 1332{
641 /* PIO related setup */ 1333 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
642 port->data_addr = base + SHD_PIO_DATA_OFS; 1334 unsigned serr_ofs;
643 port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; 1335
644 port->nsect_addr = base + SHD_SECT_CNT_OFS; 1336 /* PIO related setup
645 port->lbal_addr = base + SHD_LBA_L_OFS; 1337 */
646 port->lbam_addr = base + SHD_LBA_M_OFS; 1338 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
647 port->lbah_addr = base + SHD_LBA_H_OFS; 1339 port->error_addr =
648 port->device_addr = base + SHD_DEV_HD_OFS; 1340 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
649 port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; 1341 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
650 port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; 1342 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
651 /* unused */ 1343 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1344 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1345 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1346 port->status_addr =
1347 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1348 /* special case: control/altstatus doesn't have ATA_REG_ address */
1349 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
1350
1351 /* unused: */
652 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 1352 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
653 1353
1354 /* Clear any currently outstanding port interrupt conditions */
1355 serr_ofs = mv_scr_offset(SCR_ERROR);
1356 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
1357 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1358
654 /* unmask all EDMA error interrupts */ 1359 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); 1360 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
656 1361
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1362 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem *)base + EDMA_CFG_OFS), 1363 readl(port_mmio + EDMA_CFG_OFS),
659 readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), 1364 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
660 readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); 1365 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
661} 1366}
662 1367
1368/**
1369 * mv_host_init - Perform some early initialization of the host.
1370 * @probe_ent: early data struct representing the host
1371 *
1372 * If possible, do an early global reset of the host. Then do
1373 * our port init and clear/unmask all/relevant host interrupts.
1374 *
1375 * LOCKING:
1376 * Inherited from caller.
1377 */
663static int mv_host_init(struct ata_probe_ent *probe_ent) 1378static int mv_host_init(struct ata_probe_ent *probe_ent)
664{ 1379{
665 int rc = 0, n_hc, port, hc; 1380 int rc = 0, n_hc, port, hc;
666 void __iomem *mmio = probe_ent->mmio_base; 1381 void __iomem *mmio = probe_ent->mmio_base;
667 void __iomem *port_mmio; 1382 void __iomem *port_mmio;
668 1383
669 if (mv_master_reset(probe_ent->mmio_base)) { 1384 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1385 mv_global_soft_reset(probe_ent->mmio_base)) {
670 rc = 1; 1386 rc = 1;
671 goto done; 1387 goto done;
672 } 1388 }
@@ -676,17 +1392,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
676 1392
677 for (port = 0; port < probe_ent->n_ports; port++) { 1393 for (port = 0; port < probe_ent->n_ports; port++) {
678 port_mmio = mv_port_base(mmio, port); 1394 port_mmio = mv_port_base(mmio, port);
679 mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); 1395 mv_port_init(&probe_ent->port[port], port_mmio);
680 } 1396 }
681 1397
682 for (hc = 0; hc < n_hc; hc++) { 1398 for (hc = 0; hc < n_hc; hc++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, 1399 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
684 readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), 1400
685 readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); 1401 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
1402 "(before clear)=0x%08x\n", hc,
1403 readl(hc_mmio + HC_CFG_OFS),
1404 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
1405
1406 /* Clear any currently outstanding hc interrupt conditions */
1407 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
686 } 1408 }
687 1409
688 writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1410 /* Clear any currently outstanding host interrupt conditions */
689 writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 1411 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1412
1413 /* and unmask interrupt generation for host regs */
1414 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
1415 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
690 1416
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1417 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n", 1418 "PCI int cause/mask=0x%08x/0x%08x\n",
@@ -694,11 +1420,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
694 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1420 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
695 readl(mmio + PCI_IRQ_CAUSE_OFS), 1421 readl(mmio + PCI_IRQ_CAUSE_OFS),
696 readl(mmio + PCI_IRQ_MASK_OFS)); 1422 readl(mmio + PCI_IRQ_MASK_OFS));
697 1423done:
698 done:
699 return rc; 1424 return rc;
700} 1425}
701 1426
1427/**
1428 * mv_print_info - Dump key info to kernel log for perusal.
1429 * @probe_ent: early data struct representing the host
1430 *
1431 * FIXME: complete this.
1432 *
1433 * LOCKING:
1434 * Inherited from caller.
1435 */
1436static void mv_print_info(struct ata_probe_ent *probe_ent)
1437{
1438 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1439 struct mv_host_priv *hpriv = probe_ent->private_data;
1440 u8 rev_id, scc;
1441 const char *scc_s;
1442
1443 /* Use this to determine the HW stepping of the chip so we know
1444 * what errata to workaround
1445 */
1446 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1447
1448 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
1449 if (scc == 0)
1450 scc_s = "SCSI";
1451 else if (scc == 0x01)
1452 scc_s = "RAID";
1453 else
1454 scc_s = "unknown";
1455
1456 printk(KERN_INFO DRV_NAME
1457 "(%s) %u slots %u ports %s mode IRQ via %s\n",
1458 pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1459 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1460}
1461
1462/**
1463 * mv_init_one - handle a positive probe of a Marvell host
1464 * @pdev: PCI device found
1465 * @ent: PCI device ID entry for the matched host
1466 *
1467 * LOCKING:
1468 * Inherited from caller.
1469 */
702static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1470static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
703{ 1471{
704 static int printed_version = 0; 1472 static int printed_version = 0;
@@ -706,16 +1474,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 struct mv_host_priv *hpriv; 1474 struct mv_host_priv *hpriv;
707 unsigned int board_idx = (unsigned int)ent->driver_data; 1475 unsigned int board_idx = (unsigned int)ent->driver_data;
708 void __iomem *mmio_base; 1476 void __iomem *mmio_base;
709 int pci_dev_busy = 0; 1477 int pci_dev_busy = 0, rc;
710 int rc;
711 1478
712 if (!printed_version++) { 1479 if (!printed_version++) {
713 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 1480 printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n");
714 } 1481 }
715 1482
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number,
717 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
718
719 rc = pci_enable_device(pdev); 1483 rc = pci_enable_device(pdev);
720 if (rc) { 1484 if (rc) {
721 return rc; 1485 return rc;
@@ -727,8 +1491,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
727 goto err_out; 1491 goto err_out;
728 } 1492 }
729 1493
730 pci_intx(pdev, 1);
731
732 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1494 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
733 if (probe_ent == NULL) { 1495 if (probe_ent == NULL) {
734 rc = -ENOMEM; 1496 rc = -ENOMEM;
@@ -739,8 +1501,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
739 probe_ent->dev = pci_dev_to_dev(pdev); 1501 probe_ent->dev = pci_dev_to_dev(pdev);
740 INIT_LIST_HEAD(&probe_ent->node); 1502 INIT_LIST_HEAD(&probe_ent->node);
741 1503
742 mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), 1504 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
743 pci_resource_len(pdev, MV_PRIMARY_BAR));
744 if (mmio_base == NULL) { 1505 if (mmio_base == NULL) {
745 rc = -ENOMEM; 1506 rc = -ENOMEM;
746 goto err_out_free_ent; 1507 goto err_out_free_ent;
@@ -769,37 +1530,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
769 if (rc) { 1530 if (rc) {
770 goto err_out_hpriv; 1531 goto err_out_hpriv;
771 } 1532 }
772/* mv_print_info(probe_ent); */
773 1533
774 { 1534 /* Enable interrupts */
775 int b, w; 1535 if (pci_enable_msi(pdev) == 0) {
776 u32 dw[4]; /* hold a line of 16b */ 1536 hpriv->hp_flags |= MV_HP_FLAG_MSI;
777 VPRINTK("PCI config space:\n"); 1537 } else {
778 for (b = 0; b < 0x40; ) { 1538 pci_intx(pdev, 1);
779 for (w = 0; w < 4; w++) {
780 (void) pci_read_config_dword(pdev,b,&dw[w]);
781 b += sizeof(*dw);
782 }
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw[0],dw[1],dw[2],dw[3]);
785 }
786 } 1539 }
787 1540
788 /* FIXME: check ata_device_add return value */ 1541 mv_dump_pci_cfg(pdev, 0x68);
789 ata_device_add(probe_ent); 1542 mv_print_info(probe_ent);
790 kfree(probe_ent); 1543
1544 if (ata_device_add(probe_ent) == 0) {
1545 rc = -ENODEV; /* No devices discovered */
1546 goto err_out_dev_add;
1547 }
791 1548
1549 kfree(probe_ent);
792 return 0; 1550 return 0;
793 1551
794 err_out_hpriv: 1552err_out_dev_add:
1553 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
1554 pci_disable_msi(pdev);
1555 } else {
1556 pci_intx(pdev, 0);
1557 }
1558err_out_hpriv:
795 kfree(hpriv); 1559 kfree(hpriv);
796 err_out_iounmap: 1560err_out_iounmap:
797 iounmap(mmio_base); 1561 pci_iounmap(pdev, mmio_base);
798 err_out_free_ent: 1562err_out_free_ent:
799 kfree(probe_ent); 1563 kfree(probe_ent);
800 err_out_regions: 1564err_out_regions:
801 pci_release_regions(pdev); 1565 pci_release_regions(pdev);
802 err_out: 1566err_out:
803 if (!pci_dev_busy) { 1567 if (!pci_dev_busy) {
804 pci_disable_device(pdev); 1568 pci_disable_device(pdev);
805 } 1569 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index c05653c7779d..9fa2535dd937 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -29,6 +29,8 @@
29 * NV-specific details such as register offsets, SATA phy location, 29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc. 30 * hotplug info, etc.
31 * 31 *
32 * 0.09
33 * - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
32 * 34 *
33 * 0.08 35 * 0.08
34 * - Added support for MCP51 and MCP55. 36 * - Added support for MCP51 and MCP55.
@@ -132,9 +134,7 @@ enum nv_host_type
132 GENERIC, 134 GENERIC,
133 NFORCE2, 135 NFORCE2,
134 NFORCE3, 136 NFORCE3,
135 CK804, 137 CK804
136 MCP51,
137 MCP55
138}; 138};
139 139
140static struct pci_device_id nv_pci_tbl[] = { 140static struct pci_device_id nv_pci_tbl[] = {
@@ -153,13 +153,13 @@ static struct pci_device_id nv_pci_tbl[] = {
153 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, 153 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
155 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, 155 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
157 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, 157 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, 159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, 161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
164 PCI_ANY_ID, PCI_ANY_ID, 164 PCI_ANY_ID, PCI_ANY_ID,
165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
405 rc = -ENOMEM; 405 rc = -ENOMEM;
406 406
407 ppi = &nv_port_info; 407 ppi = &nv_port_info;
408 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 408 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
409 if (!probe_ent) 409 if (!probe_ent)
410 goto err_out_regions; 410 goto err_out_regions;
411 411
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 538ad727bd2e..def7e0d9dacb 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
438 break; 438 break;
439 439
440 default: 440 default:
441 ap->stats.idle_irq++; 441 ap->stats.idle_irq++;
442 break; 442 break;
443 } 443 }
444 444
445 return handled; 445 return handled;
446} 446}
447 447
448static void pdc_irq_clear(struct ata_port *ap) 448static void pdc_irq_clear(struct ata_port *ap)
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
new file mode 100644
index 000000000000..19857814d69f
--- /dev/null
+++ b/drivers/scsi/sata_sil24.c
@@ -0,0 +1,875 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * NOTE: No NCQ/ATAPI support yet. The preview driver didn't support
9 * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make
10 * those work. Enabling those shouldn't be difficult. Basic
11 * structure is all there (in libata-dev tree). If you have any
12 * information about this hardware, please contact me or linux-ide.
13 * Info is needed on...
14 *
15 * - How to issue tagged commands and turn on sactive on issue accordingly.
16 * - Where to put an ATAPI command and how to tell the device to send it.
17 * - How to enable/use 64bit.
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2, or (at your option) any
22 * later version.
23 *
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/dma-mapping.h>
38#include <scsi/scsi_host.h>
39#include "scsi.h"
40#include <linux/libata.h>
41#include <asm/io.h>
42
43#define DRV_NAME "sata_sil24"
44#define DRV_VERSION "0.22" /* Silicon Image's preview driver was 0.10 */
45
46/*
47 * Port request block (PRB) 32 bytes
48 */
49struct sil24_prb {
50 u16 ctrl;
51 u16 prot;
52 u32 rx_cnt;
53 u8 fis[6 * 4];
54};
55
56/*
57 * Scatter gather entry (SGE) 16 bytes
58 */
59struct sil24_sge {
60 u64 addr;
61 u32 cnt;
62 u32 flags;
63};
64
65/*
66 * Port multiplier
67 */
68struct sil24_port_multiplier {
69 u32 diag;
70 u32 sactive;
71};
72
73enum {
74 /*
75 * Global controller registers (128 bytes @ BAR0)
76 */
77 /* 32 bit regs */
78 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
79 HOST_CTRL = 0x40,
80 HOST_IRQ_STAT = 0x44,
81 HOST_PHY_CFG = 0x48,
82 HOST_BIST_CTRL = 0x50,
83 HOST_BIST_PTRN = 0x54,
84 HOST_BIST_STAT = 0x58,
85 HOST_MEM_BIST_STAT = 0x5c,
86 HOST_FLASH_CMD = 0x70,
87 /* 8 bit regs */
88 HOST_FLASH_DATA = 0x74,
89 HOST_TRANSITION_DETECT = 0x75,
90 HOST_GPIO_CTRL = 0x76,
91 HOST_I2C_ADDR = 0x78, /* 32 bit */
92 HOST_I2C_DATA = 0x7c,
93 HOST_I2C_XFER_CNT = 0x7e,
94 HOST_I2C_CTRL = 0x7f,
95
96 /* HOST_SLOT_STAT bits */
97 HOST_SSTAT_ATTN = (1 << 31),
98
99 /*
100 * Port registers
101 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
102 */
103 PORT_REGS_SIZE = 0x2000,
104 PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_RESUME = (1 << 6), /* port resume */
142 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
143 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
144 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
145
146 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
147 /* bits[11:0] are masked */
148 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
149 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
150 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
151 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
152 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
153 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
154 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */
155 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */
156
157 /* bits[27:16] are unmasked (raw) */
158 PORT_IRQ_RAW_SHIFT = 16,
159 PORT_IRQ_MASKED_MASK = 0x7ff,
160 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
161
162 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
163 PORT_IRQ_STEER_SHIFT = 30,
164 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
165
166 /* PORT_CMD_ERR constants */
167 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
168 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
169 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
170 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
171 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
172 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
173 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
174 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
175 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
176 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
177 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
178 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
179 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
180 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
181 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
182 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
183 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
184 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
185 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
186 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */
187 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
188 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
189
190 /*
191 * Other constants
192 */
193 SGE_TRM = (1 << 31), /* Last SGE in chain */
194 PRB_SOFT_RST = (1 << 7), /* Soft reset request (ign BSY?) */
195
196 /* board id */
197 BID_SIL3124 = 0,
198 BID_SIL3132 = 1,
199 BID_SIL3131 = 2,
200
201 IRQ_STAT_4PORTS = 0xf,
202};
203
204struct sil24_cmd_block {
205 struct sil24_prb prb;
206 struct sil24_sge sge[LIBATA_MAX_PRD];
207};
208
209/*
210 * ap->private_data
211 *
212 * The preview driver always returned 0 for status. We emulate it
213 * here from the previous interrupt.
214 */
215struct sil24_port_priv {
216 struct sil24_cmd_block *cmd_block; /* 32 cmd blocks */
217 dma_addr_t cmd_block_dma; /* DMA base addr for them */
218 struct ata_taskfile tf; /* Cached taskfile registers */
219};
220
221/* ap->host_set->private_data */
222struct sil24_host_priv {
223 void *host_base; /* global controller control (128 bytes @BAR0) */
224 void *port_base; /* port registers (4 * 8192 bytes @BAR2) */
225};
226
227static u8 sil24_check_status(struct ata_port *ap);
228static u8 sil24_check_err(struct ata_port *ap);
229static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
230static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
231static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
232static void sil24_phy_reset(struct ata_port *ap);
233static void sil24_qc_prep(struct ata_queued_cmd *qc);
234static int sil24_qc_issue(struct ata_queued_cmd *qc);
235static void sil24_irq_clear(struct ata_port *ap);
236static void sil24_eng_timeout(struct ata_port *ap);
237static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
238static int sil24_port_start(struct ata_port *ap);
239static void sil24_port_stop(struct ata_port *ap);
240static void sil24_host_stop(struct ata_host_set *host_set);
241static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
242
243static struct pci_device_id sil24_pci_tbl[] = {
244 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
245 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
246 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
247 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
248 { } /* terminate list */
249};
250
251static struct pci_driver sil24_pci_driver = {
252 .name = DRV_NAME,
253 .id_table = sil24_pci_tbl,
254 .probe = sil24_init_one,
255 .remove = ata_pci_remove_one, /* safe? */
256};
257
258static Scsi_Host_Template sil24_sht = {
259 .module = THIS_MODULE,
260 .name = DRV_NAME,
261 .ioctl = ata_scsi_ioctl,
262 .queuecommand = ata_scsi_queuecmd,
263 .eh_strategy_handler = ata_scsi_error,
264 .can_queue = ATA_DEF_QUEUE,
265 .this_id = ATA_SHT_THIS_ID,
266 .sg_tablesize = LIBATA_MAX_PRD,
267 .max_sectors = ATA_MAX_SECTORS,
268 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
269 .emulated = ATA_SHT_EMULATED,
270 .use_clustering = ATA_SHT_USE_CLUSTERING,
271 .proc_name = DRV_NAME,
272 .dma_boundary = ATA_DMA_BOUNDARY,
273 .slave_configure = ata_scsi_slave_config,
274 .bios_param = ata_std_bios_param,
275 .ordered_flush = 1, /* NCQ not supported yet */
276};
277
278static struct ata_port_operations sil24_ops = {
279 .port_disable = ata_port_disable,
280
281 .check_status = sil24_check_status,
282 .check_altstatus = sil24_check_status,
283 .check_err = sil24_check_err,
284 .dev_select = ata_noop_dev_select,
285
286 .tf_read = sil24_tf_read,
287
288 .phy_reset = sil24_phy_reset,
289
290 .qc_prep = sil24_qc_prep,
291 .qc_issue = sil24_qc_issue,
292
293 .eng_timeout = sil24_eng_timeout,
294
295 .irq_handler = sil24_interrupt,
296 .irq_clear = sil24_irq_clear,
297
298 .scr_read = sil24_scr_read,
299 .scr_write = sil24_scr_write,
300
301 .port_start = sil24_port_start,
302 .port_stop = sil24_port_stop,
303 .host_stop = sil24_host_stop,
304};
305
306/*
307 * Use bits 30-31 of host_flags to encode available port numbers.
308 * Current maxium is 4.
309 */
310#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
311#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
312
313static struct ata_port_info sil24_port_info[] = {
314 /* sil_3124 */
315 {
316 .sht = &sil24_sht,
317 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
318 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
319 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4),
320 .pio_mask = 0x1f, /* pio0-4 */
321 .mwdma_mask = 0x07, /* mwdma0-2 */
322 .udma_mask = 0x3f, /* udma0-5 */
323 .port_ops = &sil24_ops,
324 },
325 /* sil_3132 */
326 {
327 .sht = &sil24_sht,
328 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
329 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
330 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2),
331 .pio_mask = 0x1f, /* pio0-4 */
332 .mwdma_mask = 0x07, /* mwdma0-2 */
333 .udma_mask = 0x3f, /* udma0-5 */
334 .port_ops = &sil24_ops,
335 },
336 /* sil_3131/sil_3531 */
337 {
338 .sht = &sil24_sht,
339 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
340 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
341 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1),
342 .pio_mask = 0x1f, /* pio0-4 */
343 .mwdma_mask = 0x07, /* mwdma0-2 */
344 .udma_mask = 0x3f, /* udma0-5 */
345 .port_ops = &sil24_ops,
346 },
347};
348
349static inline void sil24_update_tf(struct ata_port *ap)
350{
351 struct sil24_port_priv *pp = ap->private_data;
352 void *port = (void *)ap->ioaddr.cmd_addr;
353 struct sil24_prb *prb = port;
354
355 ata_tf_from_fis(prb->fis, &pp->tf);
356}
357
358static u8 sil24_check_status(struct ata_port *ap)
359{
360 struct sil24_port_priv *pp = ap->private_data;
361 return pp->tf.command;
362}
363
364static u8 sil24_check_err(struct ata_port *ap)
365{
366 struct sil24_port_priv *pp = ap->private_data;
367 return pp->tf.feature;
368}
369
370static int sil24_scr_map[] = {
371 [SCR_CONTROL] = 0,
372 [SCR_STATUS] = 1,
373 [SCR_ERROR] = 2,
374 [SCR_ACTIVE] = 3,
375};
376
377static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
378{
379 void *scr_addr = (void *)ap->ioaddr.scr_addr;
380 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
381 void *addr;
382 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
383 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
384 }
385 return 0xffffffffU;
386}
387
388static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
389{
390 void *scr_addr = (void *)ap->ioaddr.scr_addr;
391 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
392 void *addr;
393 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
394 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
395 }
396}
397
398static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
399{
400 struct sil24_port_priv *pp = ap->private_data;
401 *tf = pp->tf;
402}
403
404static void sil24_phy_reset(struct ata_port *ap)
405{
406 __sata_phy_reset(ap);
407 /*
408 * No ATAPI yet. Just unconditionally indicate ATA device.
409 * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA
410 * and libata core will ignore the device.
411 */
412 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
413 ap->device[0].class = ATA_DEV_ATA;
414}
415
416static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
417 struct sil24_cmd_block *cb)
418{
419 struct scatterlist *sg = qc->sg;
420 struct sil24_sge *sge = cb->sge;
421 unsigned i;
422
423 for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
424 sge->addr = cpu_to_le64(sg_dma_address(sg));
425 sge->cnt = cpu_to_le32(sg_dma_len(sg));
426 sge->flags = 0;
427 sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
428 }
429}
430
431static void sil24_qc_prep(struct ata_queued_cmd *qc)
432{
433 struct ata_port *ap = qc->ap;
434 struct sil24_port_priv *pp = ap->private_data;
435 struct sil24_cmd_block *cb = pp->cmd_block + qc->tag;
436 struct sil24_prb *prb = &cb->prb;
437
438 switch (qc->tf.protocol) {
439 case ATA_PROT_PIO:
440 case ATA_PROT_DMA:
441 case ATA_PROT_NODATA:
442 break;
443 default:
444 /* ATAPI isn't supported yet */
445 BUG();
446 }
447
448 ata_tf_to_fis(&qc->tf, prb->fis, 0);
449
450 if (qc->flags & ATA_QCFLAG_DMAMAP)
451 sil24_fill_sg(qc, cb);
452}
453
454static int sil24_qc_issue(struct ata_queued_cmd *qc)
455{
456 struct ata_port *ap = qc->ap;
457 void *port = (void *)ap->ioaddr.cmd_addr;
458 struct sil24_port_priv *pp = ap->private_data;
459 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
460
461 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
462 return 0;
463}
464
465static void sil24_irq_clear(struct ata_port *ap)
466{
467 /* unused */
468}
469
470static int __sil24_reset_controller(void *port)
471{
472 int cnt;
473 u32 tmp;
474
475 /* Reset controller state. Is this correct? */
476 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
477 readl(port + PORT_CTRL_STAT); /* sync */
478
479 /* Max ~100ms */
480 for (cnt = 0; cnt < 1000; cnt++) {
481 udelay(100);
482 tmp = readl(port + PORT_CTRL_STAT);
483 if (!(tmp & PORT_CS_DEV_RST))
484 break;
485 }
486
487 if (tmp & PORT_CS_DEV_RST)
488 return -1;
489 return 0;
490}
491
492static void sil24_reset_controller(struct ata_port *ap)
493{
494 printk(KERN_NOTICE DRV_NAME
495 " ata%u: resetting controller...\n", ap->id);
496 if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr))
497 printk(KERN_ERR DRV_NAME
498 " ata%u: failed to reset controller\n", ap->id);
499}
500
501static void sil24_eng_timeout(struct ata_port *ap)
502{
503 struct ata_queued_cmd *qc;
504
505 qc = ata_qc_from_tag(ap, ap->active_tag);
506 if (!qc) {
507 printk(KERN_ERR "ata%u: BUG: tiemout without command\n",
508 ap->id);
509 return;
510 }
511
512 /*
513 * hack alert! We cannot use the supplied completion
514 * function from inside the ->eh_strategy_handler() thread.
515 * libata is the only user of ->eh_strategy_handler() in
516 * any kernel, so the default scsi_done() assumes it is
517 * not being called from the SCSI EH.
518 */
519 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
520 qc->scsidone = scsi_finish_command;
521 ata_qc_complete(qc, ATA_ERR);
522
523 sil24_reset_controller(ap);
524}
525
526static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
527{
528 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
529 struct sil24_port_priv *pp = ap->private_data;
530 void *port = (void *)ap->ioaddr.cmd_addr;
531 u32 irq_stat, cmd_err, sstatus, serror;
532
533 irq_stat = readl(port + PORT_IRQ_STAT);
534 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
535
536 if (!(irq_stat & PORT_IRQ_ERROR)) {
537 /* ignore non-completion, non-error irqs for now */
538 printk(KERN_WARNING DRV_NAME
539 "ata%u: non-error exception irq (irq_stat %x)\n",
540 ap->id, irq_stat);
541 return;
542 }
543
544 cmd_err = readl(port + PORT_CMD_ERR);
545 sstatus = readl(port + PORT_SSTATUS);
546 serror = readl(port + PORT_SERROR);
547 if (serror)
548 writel(serror, port + PORT_SERROR);
549
550 printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n"
551 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
552 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
553
554 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
555 /*
556 * Device is reporting error, tf registers are valid.
557 */
558 sil24_update_tf(ap);
559 } else {
560 /*
561 * Other errors. libata currently doesn't have any
562 * mechanism to report these errors. Just turn on
563 * ATA_ERR.
564 */
565 pp->tf.command = ATA_ERR;
566 }
567
568 if (qc)
569 ata_qc_complete(qc, pp->tf.command);
570
571 sil24_reset_controller(ap);
572}
573
574static inline void sil24_host_intr(struct ata_port *ap)
575{
576 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
577 void *port = (void *)ap->ioaddr.cmd_addr;
578 u32 slot_stat;
579
580 slot_stat = readl(port + PORT_SLOT_STAT);
581 if (!(slot_stat & HOST_SSTAT_ATTN)) {
582 struct sil24_port_priv *pp = ap->private_data;
583 /*
584 * !HOST_SSAT_ATTN guarantees successful completion,
585 * so reading back tf registers is unnecessary for
586 * most commands. TODO: read tf registers for
587 * commands which require these values on successful
588 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
589 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
590 */
591 sil24_update_tf(ap);
592
593 if (qc)
594 ata_qc_complete(qc, pp->tf.command);
595 } else
596 sil24_error_intr(ap, slot_stat);
597}
598
599static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
600{
601 struct ata_host_set *host_set = dev_instance;
602 struct sil24_host_priv *hpriv = host_set->private_data;
603 unsigned handled = 0;
604 u32 status;
605 int i;
606
607 status = readl(hpriv->host_base + HOST_IRQ_STAT);
608
609 if (status == 0xffffffff) {
610 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
611 "PCI fault or device removal?\n");
612 goto out;
613 }
614
615 if (!(status & IRQ_STAT_4PORTS))
616 goto out;
617
618 spin_lock(&host_set->lock);
619
620 for (i = 0; i < host_set->n_ports; i++)
621 if (status & (1 << i)) {
622 struct ata_port *ap = host_set->ports[i];
623 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
624 sil24_host_intr(host_set->ports[i]);
625 handled++;
626 } else
627 printk(KERN_ERR DRV_NAME
628 ": interrupt from disabled port %d\n", i);
629 }
630
631 spin_unlock(&host_set->lock);
632 out:
633 return IRQ_RETVAL(handled);
634}
635
636static int sil24_port_start(struct ata_port *ap)
637{
638 struct device *dev = ap->host_set->dev;
639 struct sil24_port_priv *pp;
640 struct sil24_cmd_block *cb;
641 size_t cb_size = sizeof(*cb);
642 dma_addr_t cb_dma;
643
644 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
645 if (!pp)
646 return -ENOMEM;
647 memset(pp, 0, sizeof(*pp));
648
649 pp->tf.command = ATA_DRDY;
650
651 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
652 if (!cb) {
653 kfree(pp);
654 return -ENOMEM;
655 }
656 memset(cb, 0, cb_size);
657
658 pp->cmd_block = cb;
659 pp->cmd_block_dma = cb_dma;
660
661 ap->private_data = pp;
662
663 return 0;
664}
665
666static void sil24_port_stop(struct ata_port *ap)
667{
668 struct device *dev = ap->host_set->dev;
669 struct sil24_port_priv *pp = ap->private_data;
670 size_t cb_size = sizeof(*pp->cmd_block);
671
672 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
673 kfree(pp);
674}
675
676static void sil24_host_stop(struct ata_host_set *host_set)
677{
678 struct sil24_host_priv *hpriv = host_set->private_data;
679
680 iounmap(hpriv->host_base);
681 iounmap(hpriv->port_base);
682 kfree(hpriv);
683}
684
685static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
686{
687 static int printed_version = 0;
688 unsigned int board_id = (unsigned int)ent->driver_data;
689 struct ata_port_info *pinfo = &sil24_port_info[board_id];
690 struct ata_probe_ent *probe_ent = NULL;
691 struct sil24_host_priv *hpriv = NULL;
692 void *host_base = NULL, *port_base = NULL;
693 int i, rc;
694
695 if (!printed_version++)
696 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
697
698 rc = pci_enable_device(pdev);
699 if (rc)
700 return rc;
701
702 rc = pci_request_regions(pdev, DRV_NAME);
703 if (rc)
704 goto out_disable;
705
706 rc = -ENOMEM;
707 /* ioremap mmio registers */
708 host_base = ioremap(pci_resource_start(pdev, 0),
709 pci_resource_len(pdev, 0));
710 if (!host_base)
711 goto out_free;
712 port_base = ioremap(pci_resource_start(pdev, 2),
713 pci_resource_len(pdev, 2));
714 if (!port_base)
715 goto out_free;
716
717 /* allocate & init probe_ent and hpriv */
718 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
719 if (!probe_ent)
720 goto out_free;
721
722 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
723 if (!hpriv)
724 goto out_free;
725
726 memset(probe_ent, 0, sizeof(*probe_ent));
727 probe_ent->dev = pci_dev_to_dev(pdev);
728 INIT_LIST_HEAD(&probe_ent->node);
729
730 probe_ent->sht = pinfo->sht;
731 probe_ent->host_flags = pinfo->host_flags;
732 probe_ent->pio_mask = pinfo->pio_mask;
733 probe_ent->udma_mask = pinfo->udma_mask;
734 probe_ent->port_ops = pinfo->port_ops;
735 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
736
737 probe_ent->irq = pdev->irq;
738 probe_ent->irq_flags = SA_SHIRQ;
739 probe_ent->mmio_base = port_base;
740 probe_ent->private_data = hpriv;
741
742 memset(hpriv, 0, sizeof(*hpriv));
743 hpriv->host_base = host_base;
744 hpriv->port_base = port_base;
745
746 /*
747 * Configure the device
748 */
749 /*
750 * FIXME: This device is certainly 64-bit capable. We just
751 * don't know how to use it. After fixing 32bit activation in
752 * this function, enable 64bit masks here.
753 */
754 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
755 if (rc) {
756 printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n",
757 pci_name(pdev));
758 goto out_free;
759 }
760 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
761 if (rc) {
762 printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n",
763 pci_name(pdev));
764 goto out_free;
765 }
766
767 /* GPIO off */
768 writel(0, host_base + HOST_FLASH_CMD);
769
770 /* Mask interrupts during initialization */
771 writel(0, host_base + HOST_CTRL);
772
773 for (i = 0; i < probe_ent->n_ports; i++) {
774 void *port = port_base + i * PORT_REGS_SIZE;
775 unsigned long portu = (unsigned long)port;
776 u32 tmp;
777 int cnt;
778
779 probe_ent->port[i].cmd_addr = portu + PORT_PRB;
780 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
781
782 ata_std_ports(&probe_ent->port[i]);
783
784 /* Initial PHY setting */
785 writel(0x20c, port + PORT_PHY_CFG);
786
787 /* Clear port RST */
788 tmp = readl(port + PORT_CTRL_STAT);
789 if (tmp & PORT_CS_PORT_RST) {
790 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
791 readl(port + PORT_CTRL_STAT); /* sync */
792 for (cnt = 0; cnt < 10; cnt++) {
793 msleep(10);
794 tmp = readl(port + PORT_CTRL_STAT);
795 if (!(tmp & PORT_CS_PORT_RST))
796 break;
797 }
798 if (tmp & PORT_CS_PORT_RST)
799 printk(KERN_ERR DRV_NAME
800 "(%s): failed to clear port RST\n",
801 pci_name(pdev));
802 }
803
804 /* Zero error counters. */
805 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
806 writel(0x8000, port + PORT_CRC_ERR_THRESH);
807 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
808 writel(0x0000, port + PORT_DECODE_ERR_CNT);
809 writel(0x0000, port + PORT_CRC_ERR_CNT);
810 writel(0x0000, port + PORT_HSHK_ERR_CNT);
811
812 /* FIXME: 32bit activation? */
813 writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
814 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
815
816 /* Configure interrupts */
817 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
818 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
819 port + PORT_IRQ_ENABLE_SET);
820
821 /* Clear interrupts */
822 writel(0x0fff0fff, port + PORT_IRQ_STAT);
823 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
824
825 /* Clear port multiplier enable and resume bits */
826 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
827
828 /* Reset itself */
829 if (__sil24_reset_controller(port))
830 printk(KERN_ERR DRV_NAME
831 "(%s): failed to reset controller\n",
832 pci_name(pdev));
833 }
834
835 /* Turn on interrupts */
836 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
837
838 pci_set_master(pdev);
839
840 /* FIXME: check ata_device_add return value */
841 ata_device_add(probe_ent);
842
843 kfree(probe_ent);
844 return 0;
845
846 out_free:
847 if (host_base)
848 iounmap(host_base);
849 if (port_base)
850 iounmap(port_base);
851 kfree(probe_ent);
852 kfree(hpriv);
853 pci_release_regions(pdev);
854 out_disable:
855 pci_disable_device(pdev);
856 return rc;
857}
858
859static int __init sil24_init(void)
860{
861 return pci_module_init(&sil24_pci_driver);
862}
863
864static void __exit sil24_exit(void)
865{
866 pci_unregister_driver(&sil24_pci_driver);
867}
868
869MODULE_AUTHOR("Tejun Heo");
870MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
871MODULE_LICENSE("GPL");
872MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
873
874module_init(sil24_init);
875module_exit(sil24_exit);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index b227e51d12f4..0761a3234fcf 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
263 goto err_out_regions; 263 goto err_out_regions;
264 264
265 ppi = &sis_port_info; 265 ppi = &sis_port_info;
266 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 266 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
267 if (!probe_ent) { 267 if (!probe_ent) {
268 rc = -ENOMEM; 268 rc = -ENOMEM;
269 goto err_out_regions; 269 goto err_out_regions;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 4c9fb8b71be1..9c06f2abe7f7 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
202 goto err_out_regions; 202 goto err_out_regions;
203 203
204 ppi = &uli_port_info; 204 ppi = &uli_port_info;
205 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 205 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
206 if (!probe_ent) { 206 if (!probe_ent) {
207 rc = -ENOMEM; 207 rc = -ENOMEM;
208 goto err_out_regions; 208 goto err_out_regions;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 128b996b07b7..565872479b9a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
212 struct ata_probe_ent *probe_ent; 212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 213 struct ata_port_info *ppi = &svia_port_info;
214 214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi); 215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 216 if (!probe_ent)
217 return NULL; 217 return NULL;
218 218
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 5959e6755a81..656c0e8d160e 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
518 518
519 quot = up->port.uartclk / (16 * new_baud); 519 quot = up->port.uartclk / (16 * new_baud);
520 520
521 spin_unlock(&up->port.lock);
522
523 sunsu_change_speed(&up->port, up->cflag, 0, quot); 521 sunsu_change_speed(&up->port, up->cflag, 0, quot);
524
525 spin_lock(&up->port.lock);
526} 522}
527 523
528static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) 524static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break)