aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-10-02 16:37:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-02 16:37:18 -0400
commit90d5ffc729e92bffc0f84e2447e2e6dc280240a5 (patch)
treeee8e912a1e92ea612843af7492199e977f29ee89 /drivers
parent0efe5e32c8729ef44b00d9a7203e4c99a6378b27 (diff)
parent6053bbf7bbdbb2c94547f830ad07636c17d7024e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (46 commits) cnic: Fix NETDEV_UP event processing. uvesafb/connector: Disallow unpliviged users to send netlink packets pohmelfs/connector: Disallow unpliviged users to configure pohmelfs dst/connector: Disallow unpliviged users to configure dst dm/connector: Only process connector packages from privileged processes connector: Removed the destruct_data callback since it is always kfree_skb() connector/dm: Fixed a compilation warning connector: Provide the sender's credentials to the callback connector: Keep the skb in cn_callback_data e1000e/igb/ixgbe: Don't report an error if devices don't support AER net: Fix wrong sizeof net: splice() from tcp to pipe should take into account O_NONBLOCK net: Use sk_mark for routing lookup in more places sky2: irqname based on pci address skge: use unique IRQ name IPv4 TCP fails to send window scale option when window scale is zero net/ipv4/tcp.c: fix min() type mismatch warning Kconfig: STRIP: Remove stale bits of STRIP help text NET: mkiss: Fix typo tg3: Remove prev_vlan_tag from struct tx_ring_info ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c22
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/net/3c59x.c77
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c3
-rw-r--r--drivers/net/benet/be_cmds.h3
-rw-r--r--drivers/net/benet/be_main.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cnic.c3
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h9
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/qlge/qlge.h18
-rw-r--r--drivers/net/qlge/qlge_main.c26
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c16
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/b43/pio.c60
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/staging/dst/dcore.c7
-rw-r--r--drivers/staging/pohmelfs/config.c5
-rw-r--r--drivers/video/uvesafb.c5
-rw-r--r--drivers/w1/w1_netlink.c2
43 files changed, 2128 insertions, 245 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 4a1dfe1f4ba9..210338ea222f 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work)
78 struct cn_callback_entry *cbq = 78 struct cn_callback_entry *cbq =
79 container_of(work, struct cn_callback_entry, work); 79 container_of(work, struct cn_callback_entry, work);
80 struct cn_callback_data *d = &cbq->data; 80 struct cn_callback_data *d = &cbq->data;
81 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
82 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
81 83
82 d->callback(d->callback_priv); 84 d->callback(msg, nsp);
83 85
84 d->destruct_data(d->ddata); 86 kfree_skb(d->skb);
85 d->ddata = NULL; 87 d->skb = NULL;
86 88
87 kfree(d->free); 89 kfree(d->free);
88} 90}
89 91
90static struct cn_callback_entry * 92static struct cn_callback_entry *
91cn_queue_alloc_callback_entry(char *name, struct cb_id *id, 93cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
92 void (*callback)(struct cn_msg *)) 94 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
93{ 95{
94 struct cn_callback_entry *cbq; 96 struct cn_callback_entry *cbq;
95 97
@@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123} 125}
124 126
125int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, 127int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
126 void (*callback)(struct cn_msg *)) 128 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
127{ 129{
128 struct cn_callback_entry *cbq, *__cbq; 130 struct cn_callback_entry *cbq, *__cbq;
129 int found = 0; 131 int found = 0;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 74f52af79563..f06024668f99 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
129/* 129/*
130 * Callback helper - queues work and setup destructor for given data. 130 * Callback helper - queues work and setup destructor for given data.
131 */ 131 */
132static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 132static int cn_call_callback(struct sk_buff *skb)
133{ 133{
134 struct cn_callback_entry *__cbq, *__new_cbq; 134 struct cn_callback_entry *__cbq, *__new_cbq;
135 struct cn_dev *dev = &cdev; 135 struct cn_dev *dev = &cdev;
136 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
136 int err = -ENODEV; 137 int err = -ENODEV;
137 138
138 spin_lock_bh(&dev->cbdev->queue_lock); 139 spin_lock_bh(&dev->cbdev->queue_lock);
139 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 140 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
140 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 141 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
141 if (likely(!work_pending(&__cbq->work) && 142 if (likely(!work_pending(&__cbq->work) &&
142 __cbq->data.ddata == NULL)) { 143 __cbq->data.skb == NULL)) {
143 __cbq->data.callback_priv = msg; 144 __cbq->data.skb = skb;
144
145 __cbq->data.ddata = data;
146 __cbq->data.destruct_data = destruct_data;
147 145
148 if (queue_cn_work(__cbq, &__cbq->work)) 146 if (queue_cn_work(__cbq, &__cbq->work))
149 err = 0; 147 err = 0;
@@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
156 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 154 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
157 if (__new_cbq) { 155 if (__new_cbq) {
158 d = &__new_cbq->data; 156 d = &__new_cbq->data;
159 d->callback_priv = msg; 157 d->skb = skb;
160 d->callback = __cbq->data.callback; 158 d->callback = __cbq->data.callback;
161 d->ddata = data;
162 d->destruct_data = destruct_data;
163 d->free = __new_cbq; 159 d->free = __new_cbq;
164 160
165 __new_cbq->pdev = __cbq->pdev; 161 __new_cbq->pdev = __cbq->pdev;
@@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
191 */ 187 */
192static void cn_rx_skb(struct sk_buff *__skb) 188static void cn_rx_skb(struct sk_buff *__skb)
193{ 189{
194 struct cn_msg *msg;
195 struct nlmsghdr *nlh; 190 struct nlmsghdr *nlh;
196 int err; 191 int err;
197 struct sk_buff *skb; 192 struct sk_buff *skb;
@@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
208 return; 203 return;
209 } 204 }
210 205
211 msg = NLMSG_DATA(nlh); 206 err = cn_call_callback(skb);
212 err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
213 if (err < 0) 207 if (err < 0)
214 kfree_skb(skb); 208 kfree_skb(skb);
215 } 209 }
@@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
270 * May sleep. 264 * May sleep.
271 */ 265 */
272int cn_add_callback(struct cb_id *id, char *name, 266int cn_add_callback(struct cb_id *id, char *name,
273 void (*callback)(struct cn_msg *)) 267 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
274{ 268{
275 int err; 269 int err;
276 struct cn_dev *dev = &cdev; 270 struct cn_dev *dev = &cdev;
@@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
352 * 346 *
353 * Used for notification of a request's processing. 347 * Used for notification of a request's processing.
354 */ 348 */
355static void cn_callback(struct cn_msg *msg) 349static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
356{ 350{
357 struct cn_ctl_msg *ctl; 351 struct cn_ctl_msg *ctl;
358 struct cn_ctl_entry *ent; 352 struct cn_ctl_entry *ent;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index ba0edad2d048..54abf9e303b7 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
129 * This is the connector callback that delivers data 129 * This is the connector callback that delivers data
130 * that was sent from userspace. 130 * that was sent from userspace.
131 */ 131 */
132static void cn_ulog_callback(void *data) 132static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
133{ 133{
134 struct cn_msg *msg = (struct cn_msg *)data;
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 134 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 135
136 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
137 return;
138
137 spin_lock(&receiving_list_lock); 139 spin_lock(&receiving_list_lock);
138 if (msg->len == 0) 140 if (msg->len == 0)
139 fill_pkg(msg, NULL); 141 fill_pkg(msg, NULL);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index b9eeadf01b74..975e25b19ebe 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
805 805
806#ifdef CONFIG_PM 806#ifdef CONFIG_PM
807 807
808static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) 808static int vortex_suspend(struct device *dev)
809{ 809{
810 struct net_device *dev = pci_get_drvdata(pdev); 810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct net_device *ndev = pci_get_drvdata(pdev);
812
813 if (!ndev || !netif_running(ndev))
814 return 0;
815
816 netif_device_detach(ndev);
817 vortex_down(ndev, 1);
811 818
812 if (dev && netdev_priv(dev)) {
813 if (netif_running(dev)) {
814 netif_device_detach(dev);
815 vortex_down(dev, 1);
816 disable_irq(dev->irq);
817 }
818 pci_save_state(pdev);
819 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
820 pci_disable_device(pdev);
821 pci_set_power_state(pdev, pci_choose_state(pdev, state));
822 }
823 return 0; 819 return 0;
824} 820}
825 821
826static int vortex_resume(struct pci_dev *pdev) 822static int vortex_resume(struct device *dev)
827{ 823{
828 struct net_device *dev = pci_get_drvdata(pdev); 824 struct pci_dev *pdev = to_pci_dev(dev);
829 struct vortex_private *vp = netdev_priv(dev); 825 struct net_device *ndev = pci_get_drvdata(pdev);
830 int err; 826 int err;
831 827
832 if (dev && vp) { 828 if (!ndev || !netif_running(ndev))
833 pci_set_power_state(pdev, PCI_D0); 829 return 0;
834 pci_restore_state(pdev); 830
835 err = pci_enable_device(pdev); 831 err = vortex_up(ndev);
836 if (err) { 832 if (err)
837 pr_warning("%s: Could not enable device\n", 833 return err;
838 dev->name); 834
839 return err; 835 netif_device_attach(ndev);
840 } 836
841 pci_set_master(pdev);
842 if (netif_running(dev)) {
843 err = vortex_up(dev);
844 if (err)
845 return err;
846 enable_irq(dev->irq);
847 netif_device_attach(dev);
848 }
849 }
850 return 0; 837 return 0;
851} 838}
852 839
853#endif /* CONFIG_PM */ 840static struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend,
842 .resume = vortex_resume,
843 .freeze = vortex_suspend,
844 .thaw = vortex_resume,
845 .poweroff = vortex_suspend,
846 .restore = vortex_resume,
847};
848
849#define VORTEX_PM_OPS (&vortex_pm_ops)
850
851#else /* !CONFIG_PM */
852
853#define VORTEX_PM_OPS NULL
854
855#endif /* !CONFIG_PM */
854 856
855#ifdef CONFIG_EISA 857#ifdef CONFIG_EISA
856static struct eisa_device_id vortex_eisa_ids[] = { 858static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
3199 .probe = vortex_init_one, 3201 .probe = vortex_init_one,
3200 .remove = __devexit_p(vortex_remove_one), 3202 .remove = __devexit_p(vortex_remove_one),
3201 .id_table = vortex_pci_tbl, 3203 .id_table = vortex_pci_tbl,
3202#ifdef CONFIG_PM 3204 .driver.pm = VORTEX_PM_OPS,
3203 .suspend = vortex_suspend,
3204 .resume = vortex_resume,
3205#endif
3206}; 3205};
3207 3206
3208 3207
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2bea67c134f0..712776089b46 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,13 @@ config KS8851
1738 help 1738 help
1739 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1740 1740
1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM
1744 help
1745 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip.
1747
1741config VIA_RHINE 1748config VIA_RHINE
1742 tristate "VIA Rhine support" 1749 tristate "VIA Rhine support"
1743 depends on NET_PCI && PCI 1750 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d6..d866b8cf65d1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 89obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 90obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o 91obj-$(CONFIG_KS8851) += ks8851.o
92obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
92obj-$(CONFIG_VIA_RHINE) += via-rhine.o 93obj-$(CONFIG_VIA_RHINE) += via-rhine.o
93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 94obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 95obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c50..ba29dc319b34 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
91 break; 91 break;
92 udelay(1); 92 udelay(1);
93 } while (limit-- >= 0); 93 } while (limit-- > 0);
94 94
95 return (limit < 0) ? 1 : 0; 95 return (limit < 0) ? 1 : 0;
96} 96}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8d..a80da0e14a52 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
258 bool link_up; 258 bool link_up;
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap;
261}; 262};
262 263
263extern const struct ethtool_ops be_ethtool_ops; 264extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170bf..79d35d122c08 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1068} 1068}
1069 1069
1070/* Uses mbox */ 1070/* Uses mbox */
1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) 1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1072{ 1072{
1073 struct be_mcc_wrb *wrb; 1073 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_query_fw_cfg *req; 1074 struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1088 if (!status) { 1088 if (!status) {
1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1090 *port_num = le32_to_cpu(resp->phys_port); 1090 *port_num = le32_to_cpu(resp->phys_port);
1091 *cap = le32_to_cpu(resp->function_cap);
1091 } 1092 }
1092 1093
1093 spin_unlock(&adapter->mbox_lock); 1094 spin_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d926..8b4c2cb9ad62 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
760 u32 tx_fc, u32 rx_fc); 760 u32 tx_fc, u32 rx_fc);
761extern int be_cmd_get_flow_control(struct be_adapter *adapter, 761extern int be_cmd_get_flow_control(struct be_adapter *adapter,
762 u32 *tx_fc, u32 *rx_fc); 762 u32 *tx_fc, u32 *rx_fc);
763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); 763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
764 u32 *port_num, u32 *cap);
764extern int be_cmd_reset_function(struct be_adapter *adapter); 765extern int be_cmd_reset_function(struct be_adapter *adapter);
765extern int be_process_mcc(struct be_adapter *adapter); 766extern int be_process_mcc(struct be_adapter *adapter);
766extern int be_cmd_write_flashrom(struct be_adapter *adapter, 767extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf0595903..2f9b50156e0c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
747 struct be_eth_rx_compl *rxcp) 747 struct be_eth_rx_compl *rxcp)
748{ 748{
749 struct sk_buff *skb; 749 struct sk_buff *skb;
750 u32 vtp, vid; 750 u32 vlanf, vid;
751 u8 vtm;
751 752
752 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
755
756 /* vlanf could be wrongly set in some cards.
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
753 760
754 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
755 if (!skb) { 762 if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
772 skb->protocol = eth_type_trans(skb, adapter->netdev); 779 skb->protocol = eth_type_trans(skb, adapter->netdev);
773 skb->dev = adapter->netdev; 780 skb->dev = adapter->netdev;
774 781
775 if (vtp) { 782 if (vlanf) {
776 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 783 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
777 kfree_skb(skb); 784 kfree_skb(skb);
778 return; 785 return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
797 struct be_eq_obj *eq_obj = &adapter->rx_eq; 804 struct be_eq_obj *eq_obj = &adapter->rx_eq;
798 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 805 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
799 u16 i, rxq_idx = 0, vid, j; 806 u16 i, rxq_idx = 0, vid, j;
807 u8 vtm;
800 808
801 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
802 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
803 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
804 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
813 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
814
815 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm)
818 vlanf = 0;
805 819
806 skb = napi_get_frags(&eq_obj->napi); 820 skb = napi_get_frags(&eq_obj->napi);
807 if (!skb) { 821 if (!skb) {
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
2045 if (status) 2059 if (status)
2046 return status; 2060 return status;
2047 2061
2048 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); 2062 status = be_cmd_query_fw_cfg(adapter,
2063 &adapter->port_num, &adapter->cap);
2049 return status; 2064 return status;
2050} 2065}
2051 2066
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9fc..ff449de6f3c0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
1182 ": %s: Setting %s as primary slave.\n", 1182 ": %s: Setting %s as primary slave.\n",
1183 bond->dev->name, slave->dev->name); 1183 bond->dev->name, slave->dev->name);
1184 bond->primary_slave = slave; 1184 bond->primary_slave = slave;
1185 strcpy(bond->params.primary, slave->dev->name);
1185 bond_select_active_slave(bond); 1186 bond_select_active_slave(bond);
1186 goto out; 1187 goto out;
1187 } 1188 }
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 211c8e9182fc..46c87ec7960c 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2733 cnic_ulp_init(dev); 2733 cnic_ulp_init(dev);
2734 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2735 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2736 else if (event == NETDEV_UP) { 2736
2737 if (event == NETDEV_UP) {
2737 if (cnic_register_netdev(dev) != 0) { 2738 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev); 2739 cnic_put(dev);
2739 goto done; 2740 goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739eef..d8b09efdcb52 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.0" 15#define CNIC_MODULE_VERSION "2.0.1"
16#define CNIC_MODULE_RELDATE "May 21, 2009" 16#define CNIC_MODULE_RELDATE "Oct 01, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95c..0687c6aa4e46 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4982 goto err_pci_reg; 4982 goto err_pci_reg;
4983 4983
4984 /* AER (Advanced Error Reporting) hooks */ 4984 /* AER (Advanced Error Reporting) hooks */
4985 err = pci_enable_pcie_error_reporting(pdev); 4985 pci_enable_pcie_error_reporting(pdev);
4986 if (err) {
4987 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4988 "0x%x\n", err);
4989 /* non-fatal, continue */
4990 }
4991 4986
4992 pci_set_master(pdev); 4987 pci_set_master(pdev);
4993 /* PCI config space info */ 4988 /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5263{ 5258{
5264 struct net_device *netdev = pci_get_drvdata(pdev); 5259 struct net_device *netdev = pci_get_drvdata(pdev);
5265 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 struct e1000_adapter *adapter = netdev_priv(netdev);
5266 int err;
5267 5261
5268 /* 5262 /*
5269 * flush_scheduled work may reschedule our watchdog task, so 5263 * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5299 free_netdev(netdev); 5293 free_netdev(netdev);
5300 5294
5301 /* AER disable */ 5295 /* AER disable */
5302 err = pci_disable_pcie_error_reporting(pdev); 5296 pci_disable_pcie_error_reporting(pdev);
5303 if (err)
5304 dev_err(&pdev->dev,
5305 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5306 5297
5307 pci_disable_device(pdev); 5298 pci_disable_device(pdev);
5308} 5299}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f729742..db4b7f1603f6 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
258 } 258 }
259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { 259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
260 printk(KERN_INFO 260 printk(KERN_INFO
261 "mkiss: %s: Switchting to crc-smack\n", 261 "mkiss: %s: Switching to crc-smack\n",
262 ax->dev->name); 262 ax->dev->name);
263 ax->crcmode = CRC_MODE_SMACK; 263 ax->crcmode = CRC_MODE_SMACK;
264 } 264 }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
272 } 272 }
273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { 273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
274 printk(KERN_INFO 274 printk(KERN_INFO
275 "mkiss: %s: Switchting to crc-flexnet\n", 275 "mkiss: %s: Switching to crc-flexnet\n",
276 ax->dev->name); 276 ax->dev->name);
277 ax->crcmode = CRC_MODE_FLEX; 277 ax->crcmode = CRC_MODE_FLEX;
278 } 278 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c0..714c3a4a44ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 if (err) 1246 if (err)
1247 goto err_pci_reg; 1247 goto err_pci_reg;
1248 1248
1249 err = pci_enable_pcie_error_reporting(pdev); 1249 pci_enable_pcie_error_reporting(pdev);
1250 if (err) {
1251 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1252 "0x%x\n", err);
1253 /* non-fatal, continue */
1254 }
1255 1250
1256 pci_set_master(pdev); 1251 pci_set_master(pdev);
1257 pci_save_state(pdev); 1252 pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1628 struct net_device *netdev = pci_get_drvdata(pdev); 1623 struct net_device *netdev = pci_get_drvdata(pdev);
1629 struct igb_adapter *adapter = netdev_priv(netdev); 1624 struct igb_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 1625 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632 1626
1633 /* flush_scheduled work may reschedule our watchdog task, so 1627 /* flush_scheduled work may reschedule our watchdog task, so
1634 * explicitly disable watchdog tasks from being rescheduled */ 1628 * explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682 1676
1683 free_netdev(netdev); 1677 free_netdev(netdev);
1684 1678
1685 err = pci_disable_pcie_error_reporting(pdev); 1679 pci_disable_pcie_error_reporting(pdev);
1686 if (err)
1687 dev_err(&pdev->dev,
1688 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1689 1680
1690 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1691} 1682}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc65..aa7286bc4364 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
495 cnx->remote_lp); 495 cnx->remote_lp);
496 } else { 496 } else {
497 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
498 sizeof(&cnx->cap_ack_event)); 498 sizeof(cnx->cap_ack_event));
499 cnx->state |= VETH_STATE_GOTCAPACK; 499 cnx->state |= VETH_STATE_GOTCAPACK;
500 veth_kick_statemachine(cnx); 500 veth_kick_statemachine(cnx);
501 } 501 }
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f1..e2d5343f1275 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425#endif /* CONFIG_DCB */ 425#endif /* CONFIG_DCB */
426 default: 426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n"); 427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = -IXGBE_ERR_CONFIG; 428 ret_val = IXGBE_ERR_CONFIG;
429 goto out; 429 goto out;
430 break; 430 break;
431 } 431 }
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3d..40ff120a9ad4 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1355/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1357 * @hw: pointer to hardware structure
1358 * @addr_list: the list of new addresses 1358 * @uc_list: the list of new addresses
1359 * @addr_count: number of addresses
1360 * @next: iterator function to walk the address list
1361 * 1359 *
1362 * The given list replaces any existing list. Clears the secondary addrs from 1360 * The given list replaces any existing list. Clears the secondary addrs from
1363 * receive address registers. Uses unused receive address registers for the 1361 * receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1663#endif /* CONFIG_DCB */ 1661#endif /* CONFIG_DCB */
1664 default: 1662 default:
1665 hw_dbg(hw, "Flow control param set incorrectly\n"); 1663 hw_dbg(hw, "Flow control param set incorrectly\n");
1666 ret_val = -IXGBE_ERR_CONFIG; 1664 ret_val = IXGBE_ERR_CONFIG;
1667 goto out; 1665 goto out;
1668 break; 1666 break;
1669 } 1667 }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1734 s32 ret_val = 0; 1732 s32 ret_val = 0;
1735 ixgbe_link_speed speed; 1733 ixgbe_link_speed speed;
1736 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 1734 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1735 u32 links2, anlp1_reg, autoc_reg, links;
1737 bool link_up; 1736 bool link_up;
1738 1737
1739 /* 1738 /*
1740 * AN should have completed when the cable was plugged in. 1739 * AN should have completed when the cable was plugged in.
1741 * Look for reasons to bail out. Bail out if: 1740 * Look for reasons to bail out. Bail out if:
1742 * - FC autoneg is disabled, or if 1741 * - FC autoneg is disabled, or if
1743 * - we don't have multispeed fiber, or if 1742 * - link is not up.
1744 * - we're not running at 1G, or if
1745 * - link is not up, or if
1746 * - link is up but AN did not complete, or if
1747 * - link is up and AN completed but timed out
1748 * 1743 *
1749 * Since we're being called from an LSC, link is already know to be up. 1744 * Since we're being called from an LSC, link is already known to be up.
1750 * So use link_up_wait_to_complete=false. 1745 * So use link_up_wait_to_complete=false.
1751 */ 1746 */
1752 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1747 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1753 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1748
1754 1749 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1755 if (hw->fc.disable_fc_autoneg ||
1756 !hw->phy.multispeed_fiber ||
1757 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1758 !link_up ||
1759 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1760 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1761 hw->fc.fc_was_autonegged = false; 1750 hw->fc.fc_was_autonegged = false;
1762 hw->fc.current_mode = hw->fc.requested_mode; 1751 hw->fc.current_mode = hw->fc.requested_mode;
1763 hw_dbg(hw, "Autoneg FC was skipped.\n");
1764 goto out; 1752 goto out;
1765 } 1753 }
1766 1754
1767 /* 1755 /*
1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled
1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out;
1768 }
1769 }
1770
1771 /*
1772 * On multispeed fiber at 1g, bail out if
1773 * - link is up but AN did not complete, or if
1774 * - link is up and AN completed but timed out
1775 */
1776 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1777 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1778 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1779 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1780 hw->fc.fc_was_autonegged = false;
1781 hw->fc.current_mode = hw->fc.requested_mode;
1782 goto out;
1783 }
1784 }
1785
1786 /*
1768 * Read the AN advertisement and LP ability registers and resolve 1787 * Read the AN advertisement and LP ability registers and resolve
1769 * local flow control settings accordingly 1788 * local flow control settings accordingly
1770 */ 1789 */
1771 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1790 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1772 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 1791 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1773 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && 1792 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1774 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { 1793 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1794 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1795 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1796 /*
1797 * Now we need to check if the user selected Rx ONLY
1798 * of pause frames. In this case, we had to advertise
1799 * FULL flow control because we could not advertise RX
1800 * ONLY. Hence, we must now check to see if we need to
1801 * turn OFF the TRANSMISSION of PAUSE frames.
1802 */
1803 if (hw->fc.requested_mode == ixgbe_fc_full) {
1804 hw->fc.current_mode = ixgbe_fc_full;
1805 hw_dbg(hw, "Flow Control = FULL.\n");
1806 } else {
1807 hw->fc.current_mode = ixgbe_fc_rx_pause;
1808 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1809 }
1810 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1811 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1812 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1814 hw->fc.current_mode = ixgbe_fc_tx_pause;
1815 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1816 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1817 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1818 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1820 hw->fc.current_mode = ixgbe_fc_rx_pause;
1821 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1822 } else {
1823 hw->fc.current_mode = ixgbe_fc_none;
1824 hw_dbg(hw, "Flow Control = NONE.\n");
1825 }
1826 }
1827
1828 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1775 /* 1829 /*
1776 * Now we need to check if the user selected Rx ONLY 1830 * Read the 10g AN autoc and LP ability registers and resolve
1777 * of pause frames. In this case, we had to advertise 1831 * local flow control settings accordingly
1778 * FULL flow control because we could not advertise RX
1779 * ONLY. Hence, we must now check to see if we need to
1780 * turn OFF the TRANSMISSION of PAUSE frames.
1781 */ 1832 */
1782 if (hw->fc.requested_mode == ixgbe_fc_full) { 1833 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1783 hw->fc.current_mode = ixgbe_fc_full; 1834 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1784 hw_dbg(hw, "Flow Control = FULL.\n"); 1835
1785 } else { 1836 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1837 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1838 /*
1839 * Now we need to check if the user selected Rx ONLY
1840 * of pause frames. In this case, we had to advertise
1841 * FULL flow control because we could not advertise RX
1842 * ONLY. Hence, we must now check to see if we need to
1843 * turn OFF the TRANSMISSION of PAUSE frames.
1844 */
1845 if (hw->fc.requested_mode == ixgbe_fc_full) {
1846 hw->fc.current_mode = ixgbe_fc_full;
1847 hw_dbg(hw, "Flow Control = FULL.\n");
1848 } else {
1849 hw->fc.current_mode = ixgbe_fc_rx_pause;
1850 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1851 }
1852 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1853 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1854 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1855 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1856 hw->fc.current_mode = ixgbe_fc_tx_pause;
1857 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1858 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1859 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1860 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1861 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1786 hw->fc.current_mode = ixgbe_fc_rx_pause; 1862 hw->fc.current_mode = ixgbe_fc_rx_pause;
1787 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 1863 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1864 } else {
1865 hw->fc.current_mode = ixgbe_fc_none;
1866 hw_dbg(hw, "Flow Control = NONE.\n");
1788 } 1867 }
1789 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1790 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1791 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1792 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1793 hw->fc.current_mode = ixgbe_fc_tx_pause;
1794 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1795 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1796 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1797 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1798 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1799 hw->fc.current_mode = ixgbe_fc_rx_pause;
1800 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1801 } else {
1802 hw->fc.current_mode = ixgbe_fc_none;
1803 hw_dbg(hw, "Flow Control = NONE.\n");
1804 } 1868 }
1805
1806 /* Record that current_mode is the result of a successful autoneg */ 1869 /* Record that current_mode is the result of a successful autoneg */
1807 hw->fc.fc_was_autonegged = true; 1870 hw->fc.fc_was_autonegged = true;
1808 1871
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1919#endif /* CONFIG_DCB */ 1982#endif /* CONFIG_DCB */
1920 default: 1983 default:
1921 hw_dbg(hw, "Flow control param set incorrectly\n"); 1984 hw_dbg(hw, "Flow control param set incorrectly\n");
1922 ret_val = -IXGBE_ERR_CONFIG; 1985 ret_val = IXGBE_ERR_CONFIG;
1923 goto out; 1986 goto out;
1924 break; 1987 break;
1925 } 1988 }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1927 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 1990 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1928 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 1991 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1929 1992
1930 /* Enable and restart autoneg to inform the link partner */
1931 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1932
1933 /* Disable AN timeout */ 1993 /* Disable AN timeout */
1934 if (hw->fc.strict_ieee) 1994 if (hw->fc.strict_ieee)
1935 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 1995 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 1997 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1938 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 1998 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1939 1999
2000 /*
2001 * Set up the 10G flow control advertisement registers so the HW
2002 * can do fc autoneg once the cable is plugged in. If we end up
2003 * using 1g instead, this is harmless.
2004 */
2005 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2006
2007 /*
2008 * The possible values of fc.requested_mode are:
2009 * 0: Flow control is completely disabled
2010 * 1: Rx flow control is enabled (we can receive pause frames,
2011 * but not send pause frames).
2012 * 2: Tx flow control is enabled (we can send pause frames but
2013 * we do not support receiving pause frames).
2014 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2015 * other: Invalid.
2016 */
2017 switch (hw->fc.requested_mode) {
2018 case ixgbe_fc_none:
2019 /* Flow control completely disabled by software override. */
2020 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2021 break;
2022 case ixgbe_fc_rx_pause:
2023 /*
2024 * Rx Flow control is enabled and Tx Flow control is
2025 * disabled by software override. Since there really
2026 * isn't a way to advertise that we are capable of RX
2027 * Pause ONLY, we will advertise that we support both
2028 * symmetric and asymmetric Rx PAUSE. Later, we will
2029 * disable the adapter's ability to send PAUSE frames.
2030 */
2031 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2032 break;
2033 case ixgbe_fc_tx_pause:
2034 /*
2035 * Tx Flow control is enabled, and Rx Flow control is
2036 * disabled by software override.
2037 */
2038 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2039 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2040 break;
2041 case ixgbe_fc_full:
2042 /* Flow control (both Rx and Tx) is enabled by SW override. */
2043 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2044 break;
2045#ifdef CONFIG_DCB
2046 case ixgbe_fc_pfc:
2047 goto out;
2048 break;
2049#endif /* CONFIG_DCB */
2050 default:
2051 hw_dbg(hw, "Flow control param set incorrectly\n");
2052 ret_val = IXGBE_ERR_CONFIG;
2053 goto out;
2054 break;
2055 }
2056 /*
2057 * AUTOC restart handles negotiation of 1G and 10G. There is
2058 * no need to set the PCS1GCTL register.
2059 */
2060 reg |= IXGBE_AUTOC_AN_RESTART;
2061 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2062 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2063
1940out: 2064out:
1941 return ret_val; 2065 return ret_val;
1942} 2066}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2000 2124
2001 while (timeout) { 2125 while (timeout) {
2002 if (ixgbe_get_eeprom_semaphore(hw)) 2126 if (ixgbe_get_eeprom_semaphore(hw))
2003 return -IXGBE_ERR_SWFW_SYNC; 2127 return IXGBE_ERR_SWFW_SYNC;
2004 2128
2005 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2129 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2006 if (!(gssr & (fwmask | swmask))) 2130 if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2017 2141
2018 if (!timeout) { 2142 if (!timeout) {
2019 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2143 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2020 return -IXGBE_ERR_SWFW_SYNC; 2144 return IXGBE_ERR_SWFW_SYNC;
2021 } 2145 }
2022 2146
2023 gssr |= swmask; 2147 gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 53b0a6680254..fa314cb005a4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
59 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
56 {"lsc_int", IXGBE_STAT(lsc_int)}, 60 {"lsc_int", IXGBE_STAT(lsc_int)},
57 {"tx_busy", IXGBE_STAT(tx_busy)}, 61 {"tx_busy", IXGBE_STAT(tx_busy)},
58 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c407bd9de0dd..28fbb9d281f9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 49static const char ixgbe_driver_string[] =
50 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 51
52#define DRV_VERSION "2.0.37-k2" 52#define DRV_VERSION "2.0.44-k2"
53const char ixgbe_driver_version[] = DRV_VERSION; 53const char ixgbe_driver_version[] = DRV_VERSION;
54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55 55
@@ -1885,12 +1885,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1886 adapter->tx_ring[i].head = IXGBE_TDH(j); 1886 adapter->tx_ring[i].head = IXGBE_TDH(j);
1887 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1887 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1888 /* Disable Tx Head Writeback RO bit, since this hoses 1888 /*
1889 * Disable Tx Head Writeback RO bit, since this hoses
1889 * bookkeeping if things aren't delivered in order. 1890 * bookkeeping if things aren't delivered in order.
1890 */ 1891 */
1891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1892 switch (hw->mac.type) {
1893 case ixgbe_mac_82598EB:
1894 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1895 break;
1896 case ixgbe_mac_82599EB:
1897 default:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1899 break;
1900 }
1892 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1901 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1902 switch (hw->mac.type) {
1903 case ixgbe_mac_82598EB:
1904 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1905 break;
1906 case ixgbe_mac_82599EB:
1907 default:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1909 break;
1910 }
1894 } 1911 }
1895 if (hw->mac.type == ixgbe_mac_82599EB) { 1912 if (hw->mac.type == ixgbe_mac_82599EB) {
1896 /* We enable 8 traffic classes, DCB only */ 1913 /* We enable 8 traffic classes, DCB only */
@@ -4432,10 +4449,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4432 4449
4433 /* 82598 hardware only has a 32 bit counter in the high register */ 4450 /* 82598 hardware only has a 32 bit counter in the high register */
4434 if (hw->mac.type == ixgbe_mac_82599EB) { 4451 if (hw->mac.type == ixgbe_mac_82599EB) {
4452 u64 tmp;
4435 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4453 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4436 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 4454 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4455 adapter->stats.gorc += (tmp << 32);
4437 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4456 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4438 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 4457 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4458 adapter->stats.gotc += (tmp << 32);
4439 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4459 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4440 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4460 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4441 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4461 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5071,7 +5091,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5071 /* Right now, we support IPv4 only */ 5091 /* Right now, we support IPv4 only */
5072 struct ixgbe_atr_input atr_input; 5092 struct ixgbe_atr_input atr_input;
5073 struct tcphdr *th; 5093 struct tcphdr *th;
5074 struct udphdr *uh;
5075 struct iphdr *iph = ip_hdr(skb); 5094 struct iphdr *iph = ip_hdr(skb);
5076 struct ethhdr *eth = (struct ethhdr *)skb->data; 5095 struct ethhdr *eth = (struct ethhdr *)skb->data;
5077 u16 vlan_id, src_port, dst_port, flex_bytes; 5096 u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5085,12 +5104,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5085 dst_port = th->dest; 5104 dst_port = th->dest;
5086 l4type |= IXGBE_ATR_L4TYPE_TCP; 5105 l4type |= IXGBE_ATR_L4TYPE_TCP;
5087 /* l4type IPv4 type is 0, no need to assign */ 5106 /* l4type IPv4 type is 0, no need to assign */
5088 } else if(iph->protocol == IPPROTO_UDP) {
5089 uh = udp_hdr(skb);
5090 src_port = uh->source;
5091 dst_port = uh->dest;
5092 l4type |= IXGBE_ATR_L4TYPE_UDP;
5093 /* l4type IPv4 type is 0, no need to assign */
5094 } else { 5107 } else {
5095 /* Unsupported L4 header, just bail here */ 5108 /* Unsupported L4 header, just bail here */
5096 return; 5109 return;
@@ -5494,12 +5507,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5494 goto err_pci_reg; 5507 goto err_pci_reg;
5495 } 5508 }
5496 5509
5497 err = pci_enable_pcie_error_reporting(pdev); 5510 pci_enable_pcie_error_reporting(pdev);
5498 if (err) {
5499 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5500 "0x%x\n", err);
5501 /* non-fatal, continue */
5502 }
5503 5511
5504 pci_set_master(pdev); 5512 pci_set_master(pdev);
5505 pci_save_state(pdev); 5513 pci_save_state(pdev);
@@ -5808,7 +5816,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5808{ 5816{
5809 struct net_device *netdev = pci_get_drvdata(pdev); 5817 struct net_device *netdev = pci_get_drvdata(pdev);
5810 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5811 int err;
5812 5819
5813 set_bit(__IXGBE_DOWN, &adapter->state); 5820 set_bit(__IXGBE_DOWN, &adapter->state);
5814 /* clear the module not found bit to make sure the worker won't 5821 /* clear the module not found bit to make sure the worker won't
@@ -5859,10 +5866,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5859 5866
5860 free_netdev(netdev); 5867 free_netdev(netdev);
5861 5868
5862 err = pci_disable_pcie_error_reporting(pdev); 5869 pci_disable_pcie_error_reporting(pdev);
5863 if (err)
5864 dev_err(&pdev->dev,
5865 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5866 5870
5867 pci_disable_device(pdev); 5871 pci_disable_device(pdev);
5868} 5872}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7d..7c93e923bf2e 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1336,6 +1336,8 @@
1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000 1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000
1337#define IXGBE_AUTOC_KX_SUPP 0x40000000 1337#define IXGBE_AUTOC_KX_SUPP 0x40000000
1338#define IXGBE_AUTOC_PAUSE 0x30000000 1338#define IXGBE_AUTOC_PAUSE 0x30000000
1339#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
1340#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
1339#define IXGBE_AUTOC_RF 0x08000000 1341#define IXGBE_AUTOC_RF 0x08000000
1340#define IXGBE_AUTOC_PD_TMR 0x06000000 1342#define IXGBE_AUTOC_PD_TMR 0x06000000
1341#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 1343#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1406,8 @@
1404#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1406#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1405#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1407#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1406 1408
1409#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
1410
1407/* PCS1GLSTA Bit Masks */ 1411/* PCS1GLSTA Bit Masks */
1408#define IXGBE_PCS1GLSTA_LINK_OK 1 1412#define IXGBE_PCS1GLSTA_LINK_OK 1
1409#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1413#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1428,11 @@
1424#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 1428#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
1425#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 1429#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
1426 1430
1431/* ANLP1 Bit Masks */
1432#define IXGBE_ANLP1_PAUSE 0x0C00
1433#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1434#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1435
1427/* SW Semaphore Register bitmasks */ 1436/* SW Semaphore Register bitmasks */
1428#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1437#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1429#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1438#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 000000000000..0be14d702beb
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
34
35#define DRV_NAME "ks8851_mll"
36
37static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
38#define MAX_RECV_FRAMES 32
39#define MAX_BUF_SIZE 2048
40#define TX_BUF_SIZE 2000
41#define RX_BUF_SIZE 2000
42
43#define KS_CCR 0x08
44#define CCR_EEPROM (1 << 9)
45#define CCR_SPI (1 << 8)
46#define CCR_8BIT (1 << 7)
47#define CCR_16BIT (1 << 6)
48#define CCR_32BIT (1 << 5)
49#define CCR_SHARED (1 << 4)
50#define CCR_32PIN (1 << 0)
51
52/* MAC address registers */
53#define KS_MARL 0x10
54#define KS_MARM 0x12
55#define KS_MARH 0x14
56
57#define KS_OBCR 0x20
58#define OBCR_ODS_16MA (1 << 6)
59
60#define KS_EEPCR 0x22
61#define EEPCR_EESA (1 << 4)
62#define EEPCR_EESB (1 << 3)
63#define EEPCR_EEDO (1 << 2)
64#define EEPCR_EESCK (1 << 1)
65#define EEPCR_EECS (1 << 0)
66
67#define KS_MBIR 0x24
68#define MBIR_TXMBF (1 << 12)
69#define MBIR_TXMBFA (1 << 11)
70#define MBIR_RXMBF (1 << 4)
71#define MBIR_RXMBFA (1 << 3)
72
73#define KS_GRR 0x26
74#define GRR_QMU (1 << 1)
75#define GRR_GSR (1 << 0)
76
77#define KS_WFCR 0x2A
78#define WFCR_MPRXE (1 << 7)
79#define WFCR_WF3E (1 << 3)
80#define WFCR_WF2E (1 << 2)
81#define WFCR_WF1E (1 << 1)
82#define WFCR_WF0E (1 << 0)
83
84#define KS_WF0CRC0 0x30
85#define KS_WF0CRC1 0x32
86#define KS_WF0BM0 0x34
87#define KS_WF0BM1 0x36
88#define KS_WF0BM2 0x38
89#define KS_WF0BM3 0x3A
90
91#define KS_WF1CRC0 0x40
92#define KS_WF1CRC1 0x42
93#define KS_WF1BM0 0x44
94#define KS_WF1BM1 0x46
95#define KS_WF1BM2 0x48
96#define KS_WF1BM3 0x4A
97
98#define KS_WF2CRC0 0x50
99#define KS_WF2CRC1 0x52
100#define KS_WF2BM0 0x54
101#define KS_WF2BM1 0x56
102#define KS_WF2BM2 0x58
103#define KS_WF2BM3 0x5A
104
105#define KS_WF3CRC0 0x60
106#define KS_WF3CRC1 0x62
107#define KS_WF3BM0 0x64
108#define KS_WF3BM1 0x66
109#define KS_WF3BM2 0x68
110#define KS_WF3BM3 0x6A
111
112#define KS_TXCR 0x70
113#define TXCR_TCGICMP (1 << 8)
114#define TXCR_TCGUDP (1 << 7)
115#define TXCR_TCGTCP (1 << 6)
116#define TXCR_TCGIP (1 << 5)
117#define TXCR_FTXQ (1 << 4)
118#define TXCR_TXFCE (1 << 3)
119#define TXCR_TXPE (1 << 2)
120#define TXCR_TXCRC (1 << 1)
121#define TXCR_TXE (1 << 0)
122
123#define KS_TXSR 0x72
124#define TXSR_TXLC (1 << 13)
125#define TXSR_TXMC (1 << 12)
126#define TXSR_TXFID_MASK (0x3f << 0)
127#define TXSR_TXFID_SHIFT (0)
128#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
129
130
131#define KS_RXCR1 0x74
132#define RXCR1_FRXQ (1 << 15)
133#define RXCR1_RXUDPFCC (1 << 14)
134#define RXCR1_RXTCPFCC (1 << 13)
135#define RXCR1_RXIPFCC (1 << 12)
136#define RXCR1_RXPAFMA (1 << 11)
137#define RXCR1_RXFCE (1 << 10)
138#define RXCR1_RXEFE (1 << 9)
139#define RXCR1_RXMAFMA (1 << 8)
140#define RXCR1_RXBE (1 << 7)
141#define RXCR1_RXME (1 << 6)
142#define RXCR1_RXUE (1 << 5)
143#define RXCR1_RXAE (1 << 4)
144#define RXCR1_RXINVF (1 << 1)
145#define RXCR1_RXE (1 << 0)
146#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
147 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
148
149#define KS_RXCR2 0x76
150#define RXCR2_SRDBL_MASK (0x7 << 5)
151#define RXCR2_SRDBL_SHIFT (5)
152#define RXCR2_SRDBL_4B (0x0 << 5)
153#define RXCR2_SRDBL_8B (0x1 << 5)
154#define RXCR2_SRDBL_16B (0x2 << 5)
155#define RXCR2_SRDBL_32B (0x3 << 5)
156/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
157#define RXCR2_IUFFP (1 << 4)
158#define RXCR2_RXIUFCEZ (1 << 3)
159#define RXCR2_UDPLFE (1 << 2)
160#define RXCR2_RXICMPFCC (1 << 1)
161#define RXCR2_RXSAF (1 << 0)
162
163#define KS_TXMIR 0x78
164
165#define KS_RXFHSR 0x7C
166#define RXFSHR_RXFV (1 << 15)
167#define RXFSHR_RXICMPFCS (1 << 13)
168#define RXFSHR_RXIPFCS (1 << 12)
169#define RXFSHR_RXTCPFCS (1 << 11)
170#define RXFSHR_RXUDPFCS (1 << 10)
171#define RXFSHR_RXBF (1 << 7)
172#define RXFSHR_RXMF (1 << 6)
173#define RXFSHR_RXUF (1 << 5)
174#define RXFSHR_RXMR (1 << 4)
175#define RXFSHR_RXFT (1 << 3)
176#define RXFSHR_RXFTL (1 << 2)
177#define RXFSHR_RXRF (1 << 1)
178#define RXFSHR_RXCE (1 << 0)
179#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
180 RXFSHR_RXFTL | RXFSHR_RXMR |\
181 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
182 RXFSHR_RXTCPFCS)
183#define KS_RXFHBCR 0x7E
184#define RXFHBCR_CNT_MASK 0x0FFF
185
186#define KS_TXQCR 0x80
187#define TXQCR_AETFE (1 << 2)
188#define TXQCR_TXQMAM (1 << 1)
189#define TXQCR_METFE (1 << 0)
190
191#define KS_RXQCR 0x82
192#define RXQCR_RXDTTS (1 << 12)
193#define RXQCR_RXDBCTS (1 << 11)
194#define RXQCR_RXFCTS (1 << 10)
195#define RXQCR_RXIPHTOE (1 << 9)
196#define RXQCR_RXDTTE (1 << 7)
197#define RXQCR_RXDBCTE (1 << 6)
198#define RXQCR_RXFCTE (1 << 5)
199#define RXQCR_ADRFE (1 << 4)
200#define RXQCR_SDA (1 << 3)
201#define RXQCR_RRXEF (1 << 0)
202#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
203
204#define KS_TXFDPR 0x84
205#define TXFDPR_TXFPAI (1 << 14)
206#define TXFDPR_TXFP_MASK (0x7ff << 0)
207#define TXFDPR_TXFP_SHIFT (0)
208
209#define KS_RXFDPR 0x86
210#define RXFDPR_RXFPAI (1 << 14)
211
212#define KS_RXDTTR 0x8C
213#define KS_RXDBCTR 0x8E
214
215#define KS_IER 0x90
216#define KS_ISR 0x92
217#define IRQ_LCI (1 << 15)
218#define IRQ_TXI (1 << 14)
219#define IRQ_RXI (1 << 13)
220#define IRQ_RXOI (1 << 11)
221#define IRQ_TXPSI (1 << 9)
222#define IRQ_RXPSI (1 << 8)
223#define IRQ_TXSAI (1 << 6)
224#define IRQ_RXWFDI (1 << 5)
225#define IRQ_RXMPDI (1 << 4)
226#define IRQ_LDI (1 << 3)
227#define IRQ_EDI (1 << 2)
228#define IRQ_SPIBEI (1 << 1)
229#define IRQ_DEDI (1 << 0)
230
231#define KS_RXFCTR 0x9C
232#define RXFCTR_THRESHOLD_MASK 0x00FF
233
234#define KS_RXFC 0x9D
235#define RXFCTR_RXFC_MASK (0xff << 8)
236#define RXFCTR_RXFC_SHIFT (8)
237#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
238#define RXFCTR_RXFCT_MASK (0xff << 0)
239#define RXFCTR_RXFCT_SHIFT (0)
240
241#define KS_TXNTFSR 0x9E
242
243#define KS_MAHTR0 0xA0
244#define KS_MAHTR1 0xA2
245#define KS_MAHTR2 0xA4
246#define KS_MAHTR3 0xA6
247
248#define KS_FCLWR 0xB0
249#define KS_FCHWR 0xB2
250#define KS_FCOWR 0xB4
251
252#define KS_CIDER 0xC0
253#define CIDER_ID 0x8870
254#define CIDER_REV_MASK (0x7 << 1)
255#define CIDER_REV_SHIFT (1)
256#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
257
258#define KS_CGCR 0xC6
259#define KS_IACR 0xC8
260#define IACR_RDEN (1 << 12)
261#define IACR_TSEL_MASK (0x3 << 10)
262#define IACR_TSEL_SHIFT (10)
263#define IACR_TSEL_MIB (0x3 << 10)
264#define IACR_ADDR_MASK (0x1f << 0)
265#define IACR_ADDR_SHIFT (0)
266
267#define KS_IADLR 0xD0
268#define KS_IAHDR 0xD2
269
270#define KS_PMECR 0xD4
271#define PMECR_PME_DELAY (1 << 14)
272#define PMECR_PME_POL (1 << 12)
273#define PMECR_WOL_WAKEUP (1 << 11)
274#define PMECR_WOL_MAGICPKT (1 << 10)
275#define PMECR_WOL_LINKUP (1 << 9)
276#define PMECR_WOL_ENERGY (1 << 8)
277#define PMECR_AUTO_WAKE_EN (1 << 7)
278#define PMECR_WAKEUP_NORMAL (1 << 6)
279#define PMECR_WKEVT_MASK (0xf << 2)
280#define PMECR_WKEVT_SHIFT (2)
281#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
282#define PMECR_WKEVT_ENERGY (0x1 << 2)
283#define PMECR_WKEVT_LINK (0x2 << 2)
284#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
285#define PMECR_WKEVT_FRAME (0x8 << 2)
286#define PMECR_PM_MASK (0x3 << 0)
287#define PMECR_PM_SHIFT (0)
288#define PMECR_PM_NORMAL (0x0 << 0)
289#define PMECR_PM_ENERGY (0x1 << 0)
290#define PMECR_PM_SOFTDOWN (0x2 << 0)
291#define PMECR_PM_POWERSAVE (0x3 << 0)
292
293/* Standard MII PHY data */
294#define KS_P1MBCR 0xE4
295#define P1MBCR_FORCE_FDX (1 << 8)
296
297#define KS_P1MBSR 0xE6
298#define P1MBSR_AN_COMPLETE (1 << 5)
299#define P1MBSR_AN_CAPABLE (1 << 3)
300#define P1MBSR_LINK_UP (1 << 2)
301
302#define KS_PHY1ILR 0xE8
303#define KS_PHY1IHR 0xEA
304#define KS_P1ANAR 0xEC
305#define KS_P1ANLPR 0xEE
306
307#define KS_P1SCLMD 0xF4
308#define P1SCLMD_LEDOFF (1 << 15)
309#define P1SCLMD_TXIDS (1 << 14)
310#define P1SCLMD_RESTARTAN (1 << 13)
311#define P1SCLMD_DISAUTOMDIX (1 << 10)
312#define P1SCLMD_FORCEMDIX (1 << 9)
313#define P1SCLMD_AUTONEGEN (1 << 7)
314#define P1SCLMD_FORCE100 (1 << 6)
315#define P1SCLMD_FORCEFDX (1 << 5)
316#define P1SCLMD_ADV_FLOW (1 << 4)
317#define P1SCLMD_ADV_100BT_FDX (1 << 3)
318#define P1SCLMD_ADV_100BT_HDX (1 << 2)
319#define P1SCLMD_ADV_10BT_FDX (1 << 1)
320#define P1SCLMD_ADV_10BT_HDX (1 << 0)
321
322#define KS_P1CR 0xF6
323#define P1CR_HP_MDIX (1 << 15)
324#define P1CR_REV_POL (1 << 13)
325#define P1CR_OP_100M (1 << 10)
326#define P1CR_OP_FDX (1 << 9)
327#define P1CR_OP_MDI (1 << 7)
328#define P1CR_AN_DONE (1 << 6)
329#define P1CR_LINK_GOOD (1 << 5)
330#define P1CR_PNTR_FLOW (1 << 4)
331#define P1CR_PNTR_100BT_FDX (1 << 3)
332#define P1CR_PNTR_100BT_HDX (1 << 2)
333#define P1CR_PNTR_10BT_FDX (1 << 1)
334#define P1CR_PNTR_10BT_HDX (1 << 0)
335
336/* TX Frame control */
337
338#define TXFR_TXIC (1 << 15)
339#define TXFR_TXFID_MASK (0x3f << 0)
340#define TXFR_TXFID_SHIFT (0)
341
342#define KS_P1SR 0xF8
343#define P1SR_HP_MDIX (1 << 15)
344#define P1SR_REV_POL (1 << 13)
345#define P1SR_OP_100M (1 << 10)
346#define P1SR_OP_FDX (1 << 9)
347#define P1SR_OP_MDI (1 << 7)
348#define P1SR_AN_DONE (1 << 6)
349#define P1SR_LINK_GOOD (1 << 5)
350#define P1SR_PNTR_FLOW (1 << 4)
351#define P1SR_PNTR_100BT_FDX (1 << 3)
352#define P1SR_PNTR_100BT_HDX (1 << 2)
353#define P1SR_PNTR_10BT_FDX (1 << 1)
354#define P1SR_PNTR_10BT_HDX (1 << 0)
355
356#define ENUM_BUS_NONE 0
357#define ENUM_BUS_8BIT 1
358#define ENUM_BUS_16BIT 2
359#define ENUM_BUS_32BIT 3
360
361#define MAX_MCAST_LST 32
362#define HW_MCAST_SIZE 8
363#define MAC_ADDR_LEN 6
364
365/**
366 * union ks_tx_hdr - tx header data
367 * @txb: The header as bytes
368 * @txw: The header as 16bit, little-endian words
369 *
370 * A dual representation of the tx header data to allow
371 * access to individual bytes, and to allow 16bit accesses
372 * with 16bit alignment.
373 */
374union ks_tx_hdr {
375 u8 txb[4];
376 __le16 txw[2];
377};
378
379/**
380 * struct ks_net - KS8851 driver private data
381 * @net_device : The network device we're bound to
382 * @hw_addr : start address of data register.
383 * @hw_addr_cmd : start address of command register.
384 * @txh : temporaly buffer to save status/length.
385 * @lock : Lock to ensure that the device is not accessed when busy.
386 * @pdev : Pointer to platform device.
387 * @mii : The MII state information for the mii calls.
388 * @frame_head_info : frame header information for multi-pkt rx.
389 * @statelock : Lock on this structure for tx list.
390 * @msg_enable : The message flags controlling driver output (see ethtool).
391 * @frame_cnt : number of frames received.
392 * @bus_width : i/o bus width.
393 * @irq : irq number assigned to this device.
394 * @rc_rxqcr : Cached copy of KS_RXQCR.
395 * @rc_txcr : Cached copy of KS_TXCR.
396 * @rc_ier : Cached copy of KS_IER.
397 * @sharedbus : Multipex(addr and data bus) mode indicator.
398 * @cmd_reg_cache : command register cached.
399 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
400 * @promiscuous : promiscuous mode indicator.
401 * @all_mcast : mutlicast indicator.
402 * @mcast_lst_size : size of multicast list.
403 * @mcast_lst : multicast list.
404 * @mcast_bits : multicast enabed.
405 * @mac_addr : MAC address assigned to this device.
406 * @fid : frame id.
407 * @extra_byte : number of extra byte prepended rx pkt.
408 * @enabled : indicator this device works.
409 *
410 * The @lock ensures that the chip is protected when certain operations are
411 * in progress. When the read or write packet transfer is in progress, most
412 * of the chip registers are not accessible until the transfer is finished and
413 * the DMA has been de-asserted.
414 *
415 * The @statelock is used to protect information in the structure which may
416 * need to be accessed via several sources, such as the network driver layer
417 * or one of the work queues.
418 *
419 */
420
421/* Receive multiplex framer header info */
422struct type_frame_head {
423 u16 sts; /* Frame status */
424 u16 len; /* Byte count */
425};
426
427struct ks_net {
428 struct net_device *netdev;
429 void __iomem *hw_addr;
430 void __iomem *hw_addr_cmd;
431 union ks_tx_hdr txh ____cacheline_aligned;
432 struct mutex lock; /* spinlock to be interrupt safe */
433 struct platform_device *pdev;
434 struct mii_if_info mii;
435 struct type_frame_head *frame_head_info;
436 spinlock_t statelock;
437 u32 msg_enable;
438 u32 frame_cnt;
439 int bus_width;
440 int irq;
441
442 u16 rc_rxqcr;
443 u16 rc_txcr;
444 u16 rc_ier;
445 u16 sharedbus;
446 u16 cmd_reg_cache;
447 u16 cmd_reg_cache_int;
448 u16 promiscuous;
449 u16 all_mcast;
450 u16 mcast_lst_size;
451 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
452 u8 mcast_bits[HW_MCAST_SIZE];
453 u8 mac_addr[6];
454 u8 fid;
455 u8 extra_byte;
456 u8 enabled;
457};
458
459static int msg_enable;
460
461#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
462#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
463#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
464#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
465
466#define BE3 0x8000 /* Byte Enable 3 */
467#define BE2 0x4000 /* Byte Enable 2 */
468#define BE1 0x2000 /* Byte Enable 1 */
469#define BE0 0x1000 /* Byte Enable 0 */
470
471/**
472 * register read/write calls.
473 *
474 * All these calls issue transactions to access the chip's registers. They
475 * all require that the necessary lock is held to prevent accesses when the
476 * chip is busy transfering packet data (RX/TX FIFO accesses).
477 */
478
479/**
480 * ks_rdreg8 - read 8 bit register from device
481 * @ks : The chip information
482 * @offset: The register address
483 *
484 * Read a 8bit register from the chip, returning the result
485 */
486static u8 ks_rdreg8(struct ks_net *ks, int offset)
487{
488 u16 data;
489 u8 shift_bit = offset & 0x03;
490 u8 shift_data = (offset & 1) << 3;
491 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
492 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
493 data = ioread16(ks->hw_addr);
494 return (u8)(data >> shift_data);
495}
496
497/**
498 * ks_rdreg16 - read 16 bit register from device
499 * @ks : The chip information
500 * @offset: The register address
501 *
502 * Read a 16bit register from the chip, returning the result
503 */
504
505static u16 ks_rdreg16(struct ks_net *ks, int offset)
506{
507 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
508 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
509 return ioread16(ks->hw_addr);
510}
511
512/**
513 * ks_wrreg8 - write 8bit register value to chip
514 * @ks: The chip information
515 * @offset: The register address
516 * @value: The value to write
517 *
518 */
519static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
520{
521 u8 shift_bit = (offset & 0x03);
522 u16 value_write = (u16)(value << ((offset & 1) << 3));
523 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
524 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
525 iowrite16(value_write, ks->hw_addr);
526}
527
528/**
529 * ks_wrreg16 - write 16bit register value to chip
530 * @ks: The chip information
531 * @offset: The register address
532 * @value: The value to write
533 *
534 */
535
536static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
537{
538 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
539 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
540 iowrite16(value, ks->hw_addr);
541}
542
543/**
544 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
545 * @ks: The chip state
546 * @wptr: buffer address to save data
547 * @len: length in byte to read
548 *
549 */
550static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
551{
552 len >>= 1;
553 while (len--)
554 *wptr++ = (u16)ioread16(ks->hw_addr);
555}
556
557/**
558 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
559 * @ks: The chip information
560 * @wptr: buffer address
561 * @len: length in byte to write
562 *
563 */
564static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
565{
566 len >>= 1;
567 while (len--)
568 iowrite16(*wptr++, ks->hw_addr);
569}
570
571/**
572 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information
574 *
575 */
576static inline u16 ks_tx_fifo_space(struct ks_net *ks)
577{
578 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
579}
580
581/**
582 * ks_save_cmd_reg - save the command register from the cache.
583 * @ks: The chip information
584 *
585 */
586static inline void ks_save_cmd_reg(struct ks_net *ks)
587{
588 /*ks8851 MLL has a bug to read back the command register.
589 * So rely on software to save the content of command register.
590 */
591 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
592}
593
594/**
595 * ks_restore_cmd_reg - restore the command register from the cache and
596 * write to hardware register.
597 * @ks: The chip information
598 *
599 */
600static inline void ks_restore_cmd_reg(struct ks_net *ks)
601{
602 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
603 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
604}
605
606/**
607 * ks_set_powermode - set power mode of the device
608 * @ks: The chip information
609 * @pwrmode: The power mode value to write to KS_PMECR.
610 *
611 * Change the power mode of the chip.
612 */
613static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
614{
615 unsigned pmecr;
616
617 if (netif_msg_hw(ks))
618 ks_dbg(ks, "setting power mode %d\n", pwrmode);
619
620 ks_rdreg16(ks, KS_GRR);
621 pmecr = ks_rdreg16(ks, KS_PMECR);
622 pmecr &= ~PMECR_PM_MASK;
623 pmecr |= pwrmode;
624
625 ks_wrreg16(ks, KS_PMECR, pmecr);
626}
627
628/**
629 * ks_read_config - read chip configuration of bus width.
630 * @ks: The chip information
631 *
632 */
633static void ks_read_config(struct ks_net *ks)
634{
635 u16 reg_data = 0;
636
637 /* Regardless of bus width, 8 bit read should always work.*/
638 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
639 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
640
641 /* addr/data bus are multiplexed */
642 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
643
644 /* There are garbage data when reading data from QMU,
645 depending on bus-width.
646 */
647
648 if (reg_data & CCR_8BIT) {
649 ks->bus_width = ENUM_BUS_8BIT;
650 ks->extra_byte = 1;
651 } else if (reg_data & CCR_16BIT) {
652 ks->bus_width = ENUM_BUS_16BIT;
653 ks->extra_byte = 2;
654 } else {
655 ks->bus_width = ENUM_BUS_32BIT;
656 ks->extra_byte = 4;
657 }
658}
659
660/**
661 * ks_soft_reset - issue one of the soft reset to the device
662 * @ks: The device state.
663 * @op: The bit(s) to set in the GRR
664 *
665 * Issue the relevant soft-reset command to the device's GRR register
666 * specified by @op.
667 *
668 * Note, the delays are in there as a caution to ensure that the reset
669 * has time to take effect and then complete. Since the datasheet does
670 * not currently specify the exact sequence, we have chosen something
671 * that seems to work with our device.
672 */
673static void ks_soft_reset(struct ks_net *ks, unsigned op)
674{
675 /* Disable interrupt first */
676 ks_wrreg16(ks, KS_IER, 0x0000);
677 ks_wrreg16(ks, KS_GRR, op);
678 mdelay(10); /* wait a short time to effect reset */
679 ks_wrreg16(ks, KS_GRR, 0);
680 mdelay(1); /* wait for condition to clear */
681}
682
683
684/**
685 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information
687 * @buf: buffer address to save 1 pkt
688 * @len: Pkt length
689 * Here is the sequence to read 1 pkt:
690 * 1. set sudo DMA mode
691 * 2. read prepend data
692 * 3. read pkt data
693 * 4. reset sudo DMA Mode
694 */
695static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
696{
697 u32 r = ks->extra_byte & 0x1 ;
698 u32 w = ks->extra_byte - r;
699
700 /* 1. set sudo DMA mode */
701 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
702 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
703
704 /* 2. read prepend data */
705 /**
706 * read 4 + extra bytes and discard them.
707 * extra bytes for dummy, 2 for status, 2 for len
708 */
709
710 /* use likely(r) for 8 bit access for performance */
711 if (unlikely(r))
712 ioread8(ks->hw_addr);
713 ks_inblk(ks, buf, w + 2 + 2);
714
715 /* 3. read pkt data */
716 ks_inblk(ks, buf, ALIGN(len, 4));
717
718 /* 4. reset sudo DMA Mode */
719 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
720}
721
722/**
723 * ks_rcv - read multiple pkts data from the QMU.
724 * @ks: The chip information
725 * @netdev: The network device being opened.
726 *
727 * Read all of header information before reading pkt content.
728 * It is not allowed only port of pkts in QMU after issuing
729 * interrupt ack.
730 */
731static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
732{
733 u32 i;
734 struct type_frame_head *frame_hdr = ks->frame_head_info;
735 struct sk_buff *skb;
736
737 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
738
739 /* read all header information */
740 for (i = 0; i < ks->frame_cnt; i++) {
741 /* Checking Received packet status */
742 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
743 /* Get packet len from hardware */
744 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
745 frame_hdr++;
746 }
747
748 frame_hdr = ks->frame_head_info;
749 while (ks->frame_cnt--) {
750 skb = dev_alloc_skb(frame_hdr->len + 16);
751 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
756 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev);
759 netif_rx(skb);
760 } else {
761 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
762 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
763 if (skb)
764 dev_kfree_skb_irq(skb);
765 }
766 frame_hdr++;
767 }
768}
769
770/**
771 * ks_update_link_status - link status update.
772 * @netdev: The network device being opened.
773 * @ks: The chip information
774 *
775 */
776
777static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
778{
779 /* check the status of the link */
780 u32 link_up_status;
781 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
782 netif_carrier_on(netdev);
783 link_up_status = true;
784 } else {
785 netif_carrier_off(netdev);
786 link_up_status = false;
787 }
788 if (netif_msg_link(ks))
789 ks_dbg(ks, "%s: %s\n",
790 __func__, link_up_status ? "UP" : "DOWN");
791}
792
793/**
794 * ks_irq - device interrupt handler
795 * @irq: Interrupt number passed from the IRQ hnalder.
796 * @pw: The private word passed to register_irq(), our struct ks_net.
797 *
798 * This is the handler invoked to find out what happened
799 *
800 * Read the interrupt status, work out what needs to be done and then clear
801 * any of the interrupts that are not needed.
802 */
803
804static irqreturn_t ks_irq(int irq, void *pw)
805{
806 struct ks_net *ks = pw;
807 struct net_device *netdev = ks->netdev;
808 u16 status;
809
810 /*this should be the first in IRQ handler */
811 ks_save_cmd_reg(ks);
812
813 status = ks_rdreg16(ks, KS_ISR);
814 if (unlikely(!status)) {
815 ks_restore_cmd_reg(ks);
816 return IRQ_NONE;
817 }
818
819 ks_wrreg16(ks, KS_ISR, status);
820
821 if (likely(status & IRQ_RXI))
822 ks_rcv(ks, netdev);
823
824 if (unlikely(status & IRQ_LCI))
825 ks_update_link_status(netdev, ks);
826
827 if (unlikely(status & IRQ_TXI))
828 netif_wake_queue(netdev);
829
830 if (unlikely(status & IRQ_LDI)) {
831
832 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
833 pmecr &= ~PMECR_WKEVT_MASK;
834 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
835 }
836
837 /* this should be the last in IRQ handler*/
838 ks_restore_cmd_reg(ks);
839 return IRQ_HANDLED;
840}
841
842
843/**
844 * ks_net_open - open network device
845 * @netdev: The network device being opened.
846 *
847 * Called when the network device is marked active, such as a user executing
848 * 'ifconfig up' on the device.
849 */
850static int ks_net_open(struct net_device *netdev)
851{
852 struct ks_net *ks = netdev_priv(netdev);
853 int err;
854
855#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
856 /* lock the card, even if we may not actually do anything
857 * else at the moment.
858 */
859
860 if (netif_msg_ifup(ks))
861 ks_dbg(ks, "%s - entry\n", __func__);
862
863 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
865
866 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
868 ks->irq, err);
869 return err;
870 }
871
872 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name);
874
875 return 0;
876}
877
878/**
879 * ks_net_stop - close network device
880 * @netdev: The device being closed.
881 *
882 * Called to close down a network device which has been active. Cancell any
883 * work, shutdown the RX and TX process and then place the chip into a low
884 * power state whilst it is not being used.
885 */
886static int ks_net_stop(struct net_device *netdev)
887{
888 struct ks_net *ks = netdev_priv(netdev);
889
890 if (netif_msg_ifdown(ks))
891 ks_info(ks, "%s: shutting down\n", netdev->name);
892
893 netif_stop_queue(netdev);
894
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock);
898
899 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff);
902
903 /* shutdown RX process */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908
909 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 free_irq(ks->irq, netdev);
912 mutex_unlock(&ks->lock);
913 return 0;
914}
915
916
917/**
918 * ks_write_qmu - write 1 pkt data to the QMU.
919 * @ks: The chip information
920 * @pdata: buffer address to save 1 pkt
921 * @len: Pkt length in byte
922 * Here is the sequence to write 1 pkt:
923 * 1. set sudo DMA mode
924 * 2. write status/length
925 * 3. write pkt data
926 * 4. reset sudo DMA Mode
927 * 5. reset sudo DMA mode
928 * 6. Wait until pkt is out
929 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid);
943 ks->txh.txw[1] = cpu_to_le16(len);
944
945 /* 1. set sudo-DMA mode */
946 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
947 /* 2. write status/lenth info */
948 ks_outblk(ks, ks->txh.txw, 4);
949 /* 3. write pkt data */
950 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
951 /* 4. reset sudo-DMA mode */
952 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
953 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
954 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
955 /* 6. wait until TXQCR_METFE is auto-cleared */
956 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
957 ;
958}
959
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/**
971 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit
973 * @netdev : The device used to transmit the packet.
974 *
975 * Called by the network layer to transmit the @skb.
976 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
977 * So while tx is in-progress, prevent IRQ interrupt from happenning.
978 */
979static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
980{
981 int retv = NETDEV_TX_OK;
982 struct ks_net *ks = netdev_priv(netdev);
983
984 disable_irq(netdev->irq);
985 ks_disable_int(ks);
986 spin_lock(&ks->statelock);
987
988 /* Extra space are required:
989 * 4 byte for alignment, 4 for status/length, 4 for CRC
990 */
991
992 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
993 ks_write_qmu(ks, skb->data, skb->len);
994 dev_kfree_skb(skb);
995 } else
996 retv = NETDEV_TX_BUSY;
997 spin_unlock(&ks->statelock);
998 ks_enable_int(ks);
999 enable_irq(netdev->irq);
1000 return retv;
1001}
1002
1003/**
1004 * ks_start_rx - ready to serve pkts
1005 * @ks : The chip information
1006 *
1007 */
1008static void ks_start_rx(struct ks_net *ks)
1009{
1010 u16 cntl;
1011
1012 /* Enables QMU Receive (RXCR1). */
1013 cntl = ks_rdreg16(ks, KS_RXCR1);
1014 cntl |= RXCR1_RXE ;
1015 ks_wrreg16(ks, KS_RXCR1, cntl);
1016} /* ks_start_rx */
1017
1018/**
1019 * ks_stop_rx - stop to serve pkts
1020 * @ks : The chip information
1021 *
1022 */
1023static void ks_stop_rx(struct ks_net *ks)
1024{
1025 u16 cntl;
1026
1027 /* Disables QMU Receive (RXCR1). */
1028 cntl = ks_rdreg16(ks, KS_RXCR1);
1029 cntl &= ~RXCR1_RXE ;
1030 ks_wrreg16(ks, KS_RXCR1, cntl);
1031
1032} /* ks_stop_rx */
1033
1034static unsigned long const ethernet_polynomial = 0x04c11db7U;
1035
1036static unsigned long ether_gen_crc(int length, u8 *data)
1037{
1038 long crc = -1;
1039 while (--length >= 0) {
1040 u8 current_octet = *data++;
1041 int bit;
1042
1043 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1044 crc = (crc << 1) ^
1045 ((crc < 0) ^ (current_octet & 1) ?
1046 ethernet_polynomial : 0);
1047 }
1048 }
1049 return (unsigned long)crc;
1050} /* ether_gen_crc */
1051
1052/**
1053* ks_set_grpaddr - set multicast information
1054* @ks : The chip information
1055*/
1056
1057static void ks_set_grpaddr(struct ks_net *ks)
1058{
1059 u8 i;
1060 u32 index, position, value;
1061
1062 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1063
1064 for (i = 0; i < ks->mcast_lst_size; i++) {
1065 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1066 index = position >> 3;
1067 value = 1 << (position & 7);
1068 ks->mcast_bits[index] |= (u8)value;
1069 }
1070
1071 for (i = 0; i < HW_MCAST_SIZE; i++) {
1072 if (i & 1) {
1073 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1074 (ks->mcast_bits[i] << 8) |
1075 ks->mcast_bits[i - 1]);
1076 }
1077 }
1078} /* ks_set_grpaddr */
1079
1080/*
1081* ks_clear_mcast - clear multicast information
1082*
1083* @ks : The chip information
1084* This routine removes all mcast addresses set in the hardware.
1085*/
1086
1087static void ks_clear_mcast(struct ks_net *ks)
1088{
1089 u16 i, mcast_size;
1090 for (i = 0; i < HW_MCAST_SIZE; i++)
1091 ks->mcast_bits[i] = 0;
1092
1093 mcast_size = HW_MCAST_SIZE >> 2;
1094 for (i = 0; i < mcast_size; i++)
1095 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1096}
1097
1098static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1099{
1100 u16 cntl;
1101 ks->promiscuous = promiscuous_mode;
1102 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1103 cntl = ks_rdreg16(ks, KS_RXCR1);
1104
1105 cntl &= ~RXCR1_FILTER_MASK;
1106 if (promiscuous_mode)
1107 /* Enable Promiscuous mode */
1108 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1109 else
1110 /* Disable Promiscuous mode (default normal mode) */
1111 cntl |= RXCR1_RXPAFMA;
1112
1113 ks_wrreg16(ks, KS_RXCR1, cntl);
1114
1115 if (ks->enabled)
1116 ks_start_rx(ks);
1117
1118} /* ks_set_promis */
1119
1120static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1121{
1122 u16 cntl;
1123
1124 ks->all_mcast = mcast;
1125 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1126 cntl = ks_rdreg16(ks, KS_RXCR1);
1127 cntl &= ~RXCR1_FILTER_MASK;
1128 if (mcast)
1129 /* Enable "Perfect with Multicast address passed mode" */
1130 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1131 else
1132 /**
1133 * Disable "Perfect with Multicast address passed
1134 * mode" (normal mode).
1135 */
1136 cntl |= RXCR1_RXPAFMA;
1137
1138 ks_wrreg16(ks, KS_RXCR1, cntl);
1139
1140 if (ks->enabled)
1141 ks_start_rx(ks);
1142} /* ks_set_mcast */
1143
1144static void ks_set_rx_mode(struct net_device *netdev)
1145{
1146 struct ks_net *ks = netdev_priv(netdev);
1147 struct dev_mc_list *ptr;
1148
1149 /* Turn on/off promiscuous mode. */
1150 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1151 ks_set_promis(ks,
1152 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1153 /* Turn on/off all mcast mode. */
1154 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1155 ks_set_mcast(ks,
1156 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1157 else
1158 ks_set_promis(ks, false);
1159
1160 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
1161 if (netdev->mc_count <= MAX_MCAST_LST) {
1162 int i = 0;
1163 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
1164 if (!(*ptr->dmi_addr & 1))
1165 continue;
1166 if (i >= MAX_MCAST_LST)
1167 break;
1168 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1169 MAC_ADDR_LEN);
1170 }
1171 ks->mcast_lst_size = (u8)i;
1172 ks_set_grpaddr(ks);
1173 } else {
1174 /**
1175 * List too big to support so
1176 * turn on all mcast mode.
1177 */
1178 ks->mcast_lst_size = MAX_MCAST_LST;
1179 ks_set_mcast(ks, true);
1180 }
1181 } else {
1182 ks->mcast_lst_size = 0;
1183 ks_clear_mcast(ks);
1184 }
1185} /* ks_set_rx_mode */
1186
1187static void ks_set_mac(struct ks_net *ks, u8 *data)
1188{
1189 u16 *pw = (u16 *)data;
1190 u16 w, u;
1191
1192 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1193
1194 u = *pw++;
1195 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1196 ks_wrreg16(ks, KS_MARH, w);
1197
1198 u = *pw++;
1199 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1200 ks_wrreg16(ks, KS_MARM, w);
1201
1202 u = *pw;
1203 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1204 ks_wrreg16(ks, KS_MARL, w);
1205
1206 memcpy(ks->mac_addr, data, 6);
1207
1208 if (ks->enabled)
1209 ks_start_rx(ks);
1210}
1211
1212static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1213{
1214 struct ks_net *ks = netdev_priv(netdev);
1215 struct sockaddr *addr = paddr;
1216 u8 *da;
1217
1218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1219
1220 da = (u8 *)netdev->dev_addr;
1221
1222 ks_set_mac(ks, da);
1223 return 0;
1224}
1225
1226static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1227{
1228 struct ks_net *ks = netdev_priv(netdev);
1229
1230 if (!netif_running(netdev))
1231 return -EINVAL;
1232
1233 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1234}
1235
1236static const struct net_device_ops ks_netdev_ops = {
1237 .ndo_open = ks_net_open,
1238 .ndo_stop = ks_net_stop,
1239 .ndo_do_ioctl = ks_net_ioctl,
1240 .ndo_start_xmit = ks_start_xmit,
1241 .ndo_set_mac_address = ks_set_mac_address,
1242 .ndo_set_rx_mode = ks_set_rx_mode,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1245};
1246
1247/* ethtool support */
1248
1249static void ks_get_drvinfo(struct net_device *netdev,
1250 struct ethtool_drvinfo *di)
1251{
1252 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1253 strlcpy(di->version, "1.00", sizeof(di->version));
1254 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1255 sizeof(di->bus_info));
1256}
1257
1258static u32 ks_get_msglevel(struct net_device *netdev)
1259{
1260 struct ks_net *ks = netdev_priv(netdev);
1261 return ks->msg_enable;
1262}
1263
1264static void ks_set_msglevel(struct net_device *netdev, u32 to)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267 ks->msg_enable = to;
1268}
1269
1270static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1271{
1272 struct ks_net *ks = netdev_priv(netdev);
1273 return mii_ethtool_gset(&ks->mii, cmd);
1274}
1275
1276static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1277{
1278 struct ks_net *ks = netdev_priv(netdev);
1279 return mii_ethtool_sset(&ks->mii, cmd);
1280}
1281
1282static u32 ks_get_link(struct net_device *netdev)
1283{
1284 struct ks_net *ks = netdev_priv(netdev);
1285 return mii_link_ok(&ks->mii);
1286}
1287
1288static int ks_nway_reset(struct net_device *netdev)
1289{
1290 struct ks_net *ks = netdev_priv(netdev);
1291 return mii_nway_restart(&ks->mii);
1292}
1293
1294static const struct ethtool_ops ks_ethtool_ops = {
1295 .get_drvinfo = ks_get_drvinfo,
1296 .get_msglevel = ks_get_msglevel,
1297 .set_msglevel = ks_set_msglevel,
1298 .get_settings = ks_get_settings,
1299 .set_settings = ks_set_settings,
1300 .get_link = ks_get_link,
1301 .nway_reset = ks_nway_reset,
1302};
1303
1304/* MII interface controls */
1305
1306/**
1307 * ks_phy_reg - convert MII register into a KS8851 register
1308 * @reg: MII register number.
1309 *
1310 * Return the KS8851 register number for the corresponding MII PHY register
1311 * if possible. Return zero if the MII register has no direct mapping to the
1312 * KS8851 register set.
1313 */
1314static int ks_phy_reg(int reg)
1315{
1316 switch (reg) {
1317 case MII_BMCR:
1318 return KS_P1MBCR;
1319 case MII_BMSR:
1320 return KS_P1MBSR;
1321 case MII_PHYSID1:
1322 return KS_PHY1ILR;
1323 case MII_PHYSID2:
1324 return KS_PHY1IHR;
1325 case MII_ADVERTISE:
1326 return KS_P1ANAR;
1327 case MII_LPA:
1328 return KS_P1ANLPR;
1329 }
1330
1331 return 0x0;
1332}
1333
1334/**
1335 * ks_phy_read - MII interface PHY register read.
1336 * @netdev: The network device the PHY is on.
1337 * @phy_addr: Address of PHY (ignored as we only have one)
1338 * @reg: The register to read.
1339 *
1340 * This call reads data from the PHY register specified in @reg. Since the
1341 * device does not support all the MII registers, the non-existant values
1342 * are always returned as zero.
1343 *
1344 * We return zero for unsupported registers as the MII code does not check
1345 * the value returned for any error status, and simply returns it to the
1346 * caller. The mii-tool that the driver was tested with takes any -ve error
1347 * as real PHY capabilities, thus displaying incorrect data to the user.
1348 */
1349static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1350{
1351 struct ks_net *ks = netdev_priv(netdev);
1352 int ksreg;
1353 int result;
1354
1355 ksreg = ks_phy_reg(reg);
1356 if (!ksreg)
1357 return 0x0; /* no error return allowed, so use zero */
1358
1359 mutex_lock(&ks->lock);
1360 result = ks_rdreg16(ks, ksreg);
1361 mutex_unlock(&ks->lock);
1362
1363 return result;
1364}
1365
1366static void ks_phy_write(struct net_device *netdev,
1367 int phy, int reg, int value)
1368{
1369 struct ks_net *ks = netdev_priv(netdev);
1370 int ksreg;
1371
1372 ksreg = ks_phy_reg(reg);
1373 if (ksreg) {
1374 mutex_lock(&ks->lock);
1375 ks_wrreg16(ks, ksreg, value);
1376 mutex_unlock(&ks->lock);
1377 }
1378}
1379
1380/**
1381 * ks_read_selftest - read the selftest memory info.
1382 * @ks: The device state
1383 *
1384 * Read and check the TX/RX memory selftest information.
1385 */
1386static int ks_read_selftest(struct ks_net *ks)
1387{
1388 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1389 int ret = 0;
1390 unsigned rd;
1391
1392 rd = ks_rdreg16(ks, KS_MBIR);
1393
1394 if ((rd & both_done) != both_done) {
1395 ks_warn(ks, "Memory selftest not finished\n");
1396 return 0;
1397 }
1398
1399 if (rd & MBIR_TXMBFA) {
1400 ks_err(ks, "TX memory selftest fails\n");
1401 ret |= 1;
1402 }
1403
1404 if (rd & MBIR_RXMBFA) {
1405 ks_err(ks, "RX memory selftest fails\n");
1406 ret |= 2;
1407 }
1408
1409 ks_info(ks, "the selftest passes\n");
1410 return ret;
1411}
1412
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks)
1433{
1434 u16 w;
1435
1436 /**
1437 * Configure QMU Transmit
1438 */
1439
1440 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1441 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1442
1443 /* Setup Receive Frame Data Pointer Auto-Increment */
1444 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1445
1446 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1447 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1448
1449 /* Setup RxQ Command Control (RXQCR) */
1450 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1451 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1452
1453 /**
1454 * set the force mode to half duplex, default is full duplex
1455 * because if the auto-negotiation fails, most switch uses
1456 * half-duplex.
1457 */
1458
1459 w = ks_rdreg16(ks, KS_P1MBCR);
1460 w &= ~P1MBCR_FORCE_FDX;
1461 ks_wrreg16(ks, KS_P1MBCR, w);
1462
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w);
1465
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
1467
1468 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1470 else if (ks->all_mcast) /* Multicast address passed mode */
1471 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1472 else /* Normal mode */
1473 w |= RXCR1_RXPAFMA;
1474
1475 ks_wrreg16(ks, KS_RXCR1, w);
1476} /*ks_setup */
1477
1478
1479static void ks_setup_int(struct ks_net *ks)
1480{
1481 ks->rc_ier = 0x00;
1482 /* Clear the interrupts status of the hardware. */
1483 ks_wrreg16(ks, KS_ISR, 0xffff);
1484
1485 /* Enables the interrupts of the hardware. */
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */
1488
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks)
1512{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1514 ks->promiscuous = 0;
1515 ks->all_mcast = 0;
1516 ks->mcast_lst_size = 0;
1517
1518 ks->frame_head_info = (struct type_frame_head *) \
1519 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info) {
1521 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1522 return false;
1523 }
1524
1525 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1526 return true;
1527}
1528
1529
1530static int __devinit ks8851_probe(struct platform_device *pdev)
1531{
1532 int err = -ENOMEM;
1533 struct resource *io_d, *io_c;
1534 struct net_device *netdev;
1535 struct ks_net *ks;
1536 u16 id, data;
1537
1538 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1540
1541 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1542 goto err_mem_region;
1543
1544 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1545 goto err_mem_region1;
1546
1547 netdev = alloc_etherdev(sizeof(struct ks_net));
1548 if (!netdev)
1549 goto err_alloc_etherdev;
1550
1551 SET_NETDEV_DEV(netdev, &pdev->dev);
1552
1553 ks = netdev_priv(netdev);
1554 ks->netdev = netdev;
1555 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1556
1557 if (!ks->hw_addr)
1558 goto err_ioremap;
1559
1560 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1561 if (!ks->hw_addr_cmd)
1562 goto err_ioremap1;
1563
1564 ks->irq = platform_get_irq(pdev, 0);
1565
1566 if (ks->irq < 0) {
1567 err = ks->irq;
1568 goto err_get_irq;
1569 }
1570
1571 ks->pdev = pdev;
1572
1573 mutex_init(&ks->lock);
1574 spin_lock_init(&ks->statelock);
1575
1576 netdev->netdev_ops = &ks_netdev_ops;
1577 netdev->ethtool_ops = &ks_ethtool_ops;
1578
1579 /* setup mii state */
1580 ks->mii.dev = netdev;
1581 ks->mii.phy_id = 1,
1582 ks->mii.phy_id_mask = 1;
1583 ks->mii.reg_num_mask = 0xf;
1584 ks->mii.mdio_read = ks_phy_read;
1585 ks->mii.mdio_write = ks_phy_write;
1586
1587 ks_info(ks, "message enable is %d\n", msg_enable);
1588 /* set the default message enable */
1589 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1590 NETIF_MSG_PROBE |
1591 NETIF_MSG_LINK));
1592 ks_read_config(ks);
1593
1594 /* simple check for a valid chip being connected to the bus */
1595 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1596 ks_err(ks, "failed to read device ID\n");
1597 err = -ENODEV;
1598 goto err_register;
1599 }
1600
1601 if (ks_read_selftest(ks)) {
1602 ks_err(ks, "failed to read device ID\n");
1603 err = -ENODEV;
1604 goto err_register;
1605 }
1606
1607 err = register_netdev(netdev);
1608 if (err)
1609 goto err_register;
1610
1611 platform_set_drvdata(pdev, netdev);
1612
1613 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks);
1615 ks_disable(ks);
1616 ks_setup(ks);
1617 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621
1622 data = ks_rdreg16(ks, KS_OBCR);
1623 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1624
1625 /**
1626 * If you want to use the default MAC addr,
1627 * comment out the 2 functions below.
1628 */
1629
1630 random_ether_addr(netdev->dev_addr);
1631 ks_set_mac(ks, netdev->dev_addr);
1632
1633 id = ks_rdreg16(ks, KS_CIDER);
1634
1635 printk(KERN_INFO DRV_NAME
1636 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1637 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1638 return 0;
1639
1640err_register:
1641err_get_irq:
1642 iounmap(ks->hw_addr_cmd);
1643err_ioremap1:
1644 iounmap(ks->hw_addr);
1645err_ioremap:
1646 free_netdev(netdev);
1647err_alloc_etherdev:
1648 release_mem_region(io_c->start, resource_size(io_c));
1649err_mem_region1:
1650 release_mem_region(io_d->start, resource_size(io_d));
1651err_mem_region:
1652 return err;
1653}
1654
1655static int __devexit ks8851_remove(struct platform_device *pdev)
1656{
1657 struct net_device *netdev = platform_get_drvdata(pdev);
1658 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660
1661 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr);
1663 free_netdev(netdev);
1664 release_mem_region(iomem->start, resource_size(iomem));
1665 platform_set_drvdata(pdev, NULL);
1666 return 0;
1667
1668}
1669
1670static struct platform_driver ks8851_platform_driver = {
1671 .driver = {
1672 .name = DRV_NAME,
1673 .owner = THIS_MODULE,
1674 },
1675 .probe = ks8851_probe,
1676 .remove = __devexit_p(ks8851_remove),
1677};
1678
1679static int __init ks8851_init(void)
1680{
1681 return platform_driver_register(&ks8851_platform_driver);
1682}
1683
1684static void __exit ks8851_exit(void)
1685{
1686 platform_driver_unregister(&ks8851_platform_driver);
1687}
1688
1689module_init(ks8851_init);
1690module_exit(ks8851_exit);
1691
1692MODULE_DESCRIPTION("KS8851 MLL Network driver");
1693MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1694MODULE_LICENSE("GPL");
1695module_param_named(message, msg_enable, int, 0);
1696MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1697
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d4..2af81735386b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
828 828
829static struct platform_driver meth_driver = { 829static struct platform_driver meth_driver = {
830 .probe = meth_probe, 830 .probe = meth_probe,
831 .remove = __devexit_p(meth_remove), 831 .remove = __exit_p(meth_remove),
832 .driver = { 832 .driver = {
833 .name = "meth", 833 .name = "meth",
834 .owner = THIS_MODULE, 834 .owner = THIS_MODULE,
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243f..30d5585beeee 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1381,15 +1381,15 @@ struct intr_context {
1381 1381
1382/* adapter flags definitions. */ 1382/* adapter flags definitions. */
1383enum { 1383enum {
1384 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ 1384 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1385 QL_LEGACY_ENABLED = (1 << 3), 1385 QL_LEGACY_ENABLED = 1,
1386 QL_MSI_ENABLED = (1 << 3), 1386 QL_MSI_ENABLED = 2,
1387 QL_MSIX_ENABLED = (1 << 4), 1387 QL_MSIX_ENABLED = 3,
1388 QL_DMA64 = (1 << 5), 1388 QL_DMA64 = 4,
1389 QL_PROMISCUOUS = (1 << 6), 1389 QL_PROMISCUOUS = 5,
1390 QL_ALLMULTI = (1 << 7), 1390 QL_ALLMULTI = 6,
1391 QL_PORT_CFG = (1 << 8), 1391 QL_PORT_CFG = 7,
1392 QL_CAM_RT_SET = (1 << 9), 1392 QL_CAM_RT_SET = 8,
1393}; 1393};
1394 1394
1395/* link_status bit definitions */ 1395/* link_status bit definitions */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81dc..3d0efea32111 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3142,14 +3142,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3142{ 3142{
3143 int status = 0; 3143 int status = 0;
3144 3144
3145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3145 /* Clear all the entries in the routing table. */
3146 status = ql_clear_routing_entries(qdev);
3146 if (status) 3147 if (status)
3147 return status; 3148 return status;
3148 3149
3149 /* Clear all the entries in the routing table. */ 3150 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 status = ql_clear_routing_entries(qdev);
3151 if (status) 3151 if (status)
3152 goto exit; 3152 return status;
3153 3153
3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 if (status) { 3155 if (status) {
@@ -3380,12 +3380,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3380 3380
3381 ql_free_rx_buffers(qdev); 3381 ql_free_rx_buffers(qdev);
3382 3382
3383 spin_lock(&qdev->hw_lock);
3384 status = ql_adapter_reset(qdev); 3383 status = ql_adapter_reset(qdev);
3385 if (status) 3384 if (status)
3386 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3385 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3387 qdev->func); 3386 qdev->func);
3388 spin_unlock(&qdev->hw_lock);
3389 return status; 3387 return status;
3390} 3388}
3391 3389
@@ -3705,7 +3703,7 @@ static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev = 3703 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work); 3704 container_of(work, struct ql_adapter, asic_reset_work.work);
3707 int status; 3705 int status;
3708 3706 rtnl_lock();
3709 status = ql_adapter_down(qdev); 3707 status = ql_adapter_down(qdev);
3710 if (status) 3708 if (status)
3711 goto error; 3709 goto error;
@@ -3713,12 +3711,12 @@ static void ql_asic_reset_work(struct work_struct *work)
3713 status = ql_adapter_up(qdev); 3711 status = ql_adapter_up(qdev);
3714 if (status) 3712 if (status)
3715 goto error; 3713 goto error;
3716 3714 rtnl_unlock();
3717 return; 3715 return;
3718error: 3716error:
3719 QPRINTK(qdev, IFUP, ALERT, 3717 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n"); 3718 "Driver up/down cycle failed, closing device\n");
3721 rtnl_lock(); 3719
3722 set_bit(QL_ADAPTER_UP, &qdev->flags); 3720 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev); 3721 dev_close(qdev->ndev);
3724 rtnl_unlock(); 3722 rtnl_unlock();
@@ -3834,11 +3832,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3834 return err; 3832 return err;
3835 } 3833 }
3836 3834
3835 qdev->ndev = ndev;
3836 qdev->pdev = pdev;
3837 pci_set_drvdata(pdev, ndev);
3837 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3838 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3838 if (pos <= 0) { 3839 if (pos <= 0) {
3839 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 3840 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3840 "aborting.\n"); 3841 "aborting.\n");
3841 goto err_out; 3842 return pos;
3842 } else { 3843 } else {
3843 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 3844 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3844 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 3845 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3852,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3851 err = pci_request_regions(pdev, DRV_NAME); 3852 err = pci_request_regions(pdev, DRV_NAME);
3852 if (err) { 3853 if (err) {
3853 dev_err(&pdev->dev, "PCI region request failed.\n"); 3854 dev_err(&pdev->dev, "PCI region request failed.\n");
3854 goto err_out; 3855 return err;
3855 } 3856 }
3856 3857
3857 pci_set_master(pdev); 3858 pci_set_master(pdev);
@@ -3869,7 +3870,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3869 goto err_out; 3870 goto err_out;
3870 } 3871 }
3871 3872
3872 pci_set_drvdata(pdev, ndev);
3873 qdev->reg_base = 3873 qdev->reg_base =
3874 ioremap_nocache(pci_resource_start(pdev, 1), 3874 ioremap_nocache(pci_resource_start(pdev, 1),
3875 pci_resource_len(pdev, 1)); 3875 pci_resource_len(pdev, 1));
@@ -3889,8 +3889,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3889 goto err_out; 3889 goto err_out;
3890 } 3890 }
3891 3891
3892 qdev->ndev = ndev;
3893 qdev->pdev = pdev;
3894 err = ql_get_board_info(qdev); 3892 err = ql_get_board_info(qdev);
3895 if (err) { 3893 if (err) {
3896 dev_err(&pdev->dev, "Register access failed.\n"); 3894 dev_err(&pdev->dev, "Register access failed.\n");
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef5..f4dfd1f679a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
826 826
827static struct platform_driver sgiseeq_driver = { 827static struct platform_driver sgiseeq_driver = {
828 .probe = sgiseeq_probe, 828 .probe = sgiseeq_probe,
829 .remove = __devexit_p(sgiseeq_remove), 829 .remove = __exit_p(sgiseeq_remove),
830 .driver = { 830 .driver = {
831 .name = "sgiseeq", 831 .name = "sgiseeq",
832 .owner = THIS_MODULE, 832 .owner = THIS_MODULE,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad4081966..01f6811f1324 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3935#endif 3935#endif
3936 3936
3937 err = -ENOMEM; 3937 err = -ENOMEM;
3938 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3938 /* space for skge@pci:0000:04:00.0 */
3939 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
3940 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3939 if (!hw) { 3941 if (!hw) {
3940 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3942 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3941 goto err_out_free_regions; 3943 goto err_out_free_regions;
3942 } 3944 }
3945 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3943 3946
3944 hw->pdev = pdev; 3947 hw->pdev = pdev;
3945 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3974 goto err_out_free_netdev; 3977 goto err_out_free_netdev;
3975 } 3978 }
3976 3979
3977 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3980 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
3978 if (err) { 3981 if (err) {
3979 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3982 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3980 dev->name, pdev->irq); 3983 dev->name, pdev->irq);
@@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3982 } 3985 }
3983 skge_show_addr(dev); 3986 skge_show_addr(dev);
3984 3987
3985 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3988 if (hw->ports > 1) {
3986 if (register_netdev(dev1) == 0) 3989 dev1 = skge_devinit(hw, 1, using_dac);
3990 if (dev1 && register_netdev(dev1) == 0)
3987 skge_show_addr(dev1); 3991 skge_show_addr(dev1);
3988 else { 3992 else {
3989 /* Failure to register second port need not be fatal */ 3993 /* Failure to register second port need not be fatal */
3990 dev_warn(&pdev->dev, "register of second port failed\n"); 3994 dev_warn(&pdev->dev, "register of second port failed\n");
3991 hw->dev[1] = NULL; 3995 hw->dev[1] = NULL;
3992 free_netdev(dev1); 3996 hw->ports = 1;
3997 if (dev1)
3998 free_netdev(dev1);
3993 } 3999 }
3994 } 4000 }
3995 pci_set_drvdata(pdev, hw); 4001 pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb7685..831de1b6e96e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
2423 u16 phy_addr; 2423 u16 phy_addr;
2424 spinlock_t phy_lock; 2424 spinlock_t phy_lock;
2425 struct tasklet_struct phy_task; 2425 struct tasklet_struct phy_task;
2426
2427 char irq_name[0]; /* skge@pci:000:04:00.0 */
2426}; 2428};
2427 2429
2428enum pause_control { 2430enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ef1165718dd7..2ab5c39f33ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4488 4488
4489 err = -ENOMEM; 4489 err = -ENOMEM;
4490 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4490
4491 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4492 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4491 if (!hw) { 4493 if (!hw) {
4492 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4494 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4493 goto err_out_free_regions; 4495 goto err_out_free_regions;
4494 } 4496 }
4495 4497
4496 hw->pdev = pdev; 4498 hw->pdev = pdev;
4499 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4497 4500
4498 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4501 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4499 if (!hw->regs) { 4502 if (!hw->regs) {
@@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4539 4542
4540 err = request_irq(pdev->irq, sky2_intr, 4543 err = request_irq(pdev->irq, sky2_intr,
4541 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4544 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4542 dev->name, hw); 4545 hw->irq_name, hw);
4543 if (err) { 4546 if (err) {
4544 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4547 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4545 goto err_out_unregister; 4548 goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a101043..ed54129698b4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
2085 struct timer_list watchdog_timer; 2085 struct timer_list watchdog_timer;
2086 struct work_struct restart_work; 2086 struct work_struct restart_work;
2087 wait_queue_head_t msi_wait; 2087 wait_queue_head_t msi_wait;
2088
2089 char irq_name[0];
2088}; 2090};
2089 2091
2090static inline int sky2_is_copper(const struct sky2_hw *hw) 2092static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b4..524691cd9896 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
2412 2412
2413struct tx_ring_info { 2413struct tx_ring_info {
2414 struct sk_buff *skb; 2414 struct sk_buff *skb;
2415 u32 prev_vlan_tag;
2416}; 2415};
2417 2416
2418struct tg3_config_info { 2417struct tg3_config_info {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d445845f2779..8d009760277c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -948,7 +948,7 @@ free:
948 return err; 948 return err;
949} 949}
950 950
951static void virtnet_remove(struct virtio_device *vdev) 951static void __devexit virtnet_remove(struct virtio_device *vdev)
952{ 952{
953 struct virtnet_info *vi = vdev->priv; 953 struct virtnet_info *vi = vdev->priv;
954 struct sk_buff *skb; 954 struct sk_buff *skb;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e6..d7a764a2fc1a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
31 ---help--- 31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio 32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project 33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet 34 to send Internet traffic using Metricom radios. Metricom radios are
35 traffic using Metricom radios. Metricom radios are small, battery 35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 powered, 100kbit/sec packet radio transceivers, about the size and 36 the size and weight of a cellular telephone. (You may also have heard
37 weight of a cellular telephone. (You may also have heard them called 37 them called "Metricom modems" but we avoid the term "modem" because
38 "Metricom modems" but we avoid the term "modem" because it misleads 38 it misleads many people into thinking that you can plug a Metricom
39 many people into thinking that you can plug a Metricom modem into a 39 modem into a phone line and use it as a modem.)
40 phone line and use it as a modem.)
41 40
42 You can use STRIP on any Linux machine with a serial port, although 41 You can use STRIP on any Linux machine with a serial port, although
43 it is obviously most useful for people with laptop computers. If you 42 it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b0..dbd488da18b1 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; 1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; 1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1143 int chain, idx, i; 1143 int chain, idx, i;
1144 u8 f; 1144 u32 phy_data = 0;
1145 u8 f, tmp;
1145 1146
1146 switch (channel->band) { 1147 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ: 1148 case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1208 } 1209 }
1209 1210
1210 for (i = 0; i < 76; i++) { 1211 for (i = 0; i < 76; i++) {
1211 u32 phy_data;
1212 u8 tmp;
1213
1214 if (i < 25) { 1212 if (i < 25) {
1215 tmp = ar9170_interpolate_val(i, &pwrs[0][0], 1213 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1216 &vpds[0][0]); 1214 &vpds[0][0]);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index e96091b31499..9c1397996e0a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
340 q->mmio_base + B43_PIO_TXDATA, 340 q->mmio_base + B43_PIO_TXDATA,
341 sizeof(u16)); 341 sizeof(u16));
342 if (data_len & 1) { 342 if (data_len & 1) {
343 u8 tail[2] = { 0, };
344
343 /* Write the last byte. */ 345 /* Write the last byte. */
344 ctl &= ~B43_PIO_TXCTL_WRITEHI; 346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
345 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
346 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); 348 tail[0] = data[data_len - 1];
349 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16));
347 } 352 }
348 353
349 return ctl; 354 return ctl;
@@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 q->mmio_base + B43_PIO8_TXDATA, 391 q->mmio_base + B43_PIO8_TXDATA,
387 sizeof(u32)); 392 sizeof(u32));
388 if (data_len & 3) { 393 if (data_len & 3) {
389 u32 value = 0; 394 u8 tail[4] = { 0, };
390 395
391 /* Write the last few bytes. */ 396 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31); 398 B43_PIO8_TXCTL_24_31);
394 data = &(data[data_len - 1]);
395 switch (data_len & 3) { 399 switch (data_len & 3) {
396 case 3: 400 case 3:
397 ctl |= B43_PIO8_TXCTL_16_23; 401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
398 value |= (u32)(*data) << 16; 402 tail[0] = data[data_len - 3];
399 data--; 403 tail[1] = data[data_len - 2];
404 tail[2] = data[data_len - 1];
405 break;
400 case 2: 406 case 2:
401 ctl |= B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_8_15;
402 value |= (u32)(*data) << 8; 408 tail[0] = data[data_len - 2];
403 data--; 409 tail[1] = data[data_len - 1];
410 break;
404 case 1: 411 case 1:
405 value |= (u32)(*data); 412 tail[0] = data[data_len - 1];
413 break;
406 } 414 }
407 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 415 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
408 b43_piotx_write32(q, B43_PIO8_TXDATA, value); 416 ssb_block_write(dev->dev, tail, 4,
417 q->mmio_base + B43_PIO8_TXDATA,
418 sizeof(u32));
409 } 419 }
410 420
411 return ctl; 421 return ctl;
@@ -693,21 +703,25 @@ data_ready:
693 q->mmio_base + B43_PIO8_RXDATA, 703 q->mmio_base + B43_PIO8_RXDATA,
694 sizeof(u32)); 704 sizeof(u32));
695 if (len & 3) { 705 if (len & 3) {
696 u32 value; 706 u8 tail[4] = { 0, };
697 char *data;
698 707
699 /* Read the last few bytes. */ 708 /* Read the last few bytes. */
700 value = b43_piorx_read32(q, B43_PIO8_RXDATA); 709 ssb_block_read(dev->dev, tail, 4,
701 data = &(skb->data[len + padding - 1]); 710 q->mmio_base + B43_PIO8_RXDATA,
711 sizeof(u32));
702 switch (len & 3) { 712 switch (len & 3) {
703 case 3: 713 case 3:
704 *data = (value >> 16); 714 skb->data[len + padding - 3] = tail[0];
705 data--; 715 skb->data[len + padding - 2] = tail[1];
716 skb->data[len + padding - 1] = tail[2];
717 break;
706 case 2: 718 case 2:
707 *data = (value >> 8); 719 skb->data[len + padding - 2] = tail[0];
708 data--; 720 skb->data[len + padding - 1] = tail[1];
721 break;
709 case 1: 722 case 1:
710 *data = value; 723 skb->data[len + padding - 1] = tail[0];
724 break;
711 } 725 }
712 } 726 }
713 } else { 727 } else {
@@ -715,11 +729,13 @@ data_ready:
715 q->mmio_base + B43_PIO_RXDATA, 729 q->mmio_base + B43_PIO_RXDATA,
716 sizeof(u16)); 730 sizeof(u16));
717 if (len & 1) { 731 if (len & 1) {
718 u16 value; 732 u8 tail[2] = { 0, };
719 733
720 /* Read the last byte. */ 734 /* Read the last byte. */
721 value = b43_piorx_read16(q, B43_PIO_RXDATA); 735 ssb_block_read(dev->dev, tail, 2,
722 skb->data[len + padding - 1] = value; 736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0];
723 } 739 }
724 } 740 }
725 741
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f0..38cfd79e0590 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
632 if (WARN_ON(!data->beacon_int)) 632 if (WARN_ON(!data->beacon_int))
633 data->beacon_int = 1; 633 data->beacon_int = 1;
634 if (data->started)
635 mod_timer(&data->beacon_timer,
636 jiffies + data->beacon_int);
634 } 637 }
635 638
636 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 639 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3efc..b8f5ee33445e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2381 /* Huawei-3Com */ 2381 /* Huawei-3Com */
2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
2383 /* Hercules */ 2383 /* Hercules */
2384 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
2384 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2385 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
2385 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2386 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
2386 /* Linksys */ 2387 /* Linksys */
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index ac8577358ba0..ee1601026fb0 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = {
847/* 847/*
848 * Configuration parser. 848 * Configuration parser.
849 */ 849 */
850static void cn_dst_callback(struct cn_msg *msg) 850static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
851{ 851{
852 struct dst_ctl *ctl; 852 struct dst_ctl *ctl;
853 int err; 853 int err;
@@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg)
855 struct dst_node *n = NULL, *tmp; 855 struct dst_node *n = NULL, *tmp;
856 unsigned int hash; 856 unsigned int hash;
857 857
858 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
859 err = -EPERM;
860 goto out;
861 }
862
858 if (msg->len < sizeof(struct dst_ctl)) { 863 if (msg->len < sizeof(struct dst_ctl)) {
859 err = -EBADMSG; 864 err = -EBADMSG;
860 goto out; 865 goto out;
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index 90f962ee5fd8..5d04bf5b021a 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -527,10 +527,13 @@ out_unlock:
527 return err; 527 return err;
528} 528}
529 529
530static void pohmelfs_cn_callback(struct cn_msg *msg) 530static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
531{ 531{
532 int err; 532 int err;
533 533
534 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
535 return;
536
534 switch (msg->flags) { 537 switch (msg->flags) {
535 case POHMELFS_FLAGS_ADD: 538 case POHMELFS_FLAGS_ADD:
536 case POHMELFS_FLAGS_DEL: 539 case POHMELFS_FLAGS_DEL:
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e98baf6916b8..e35232a18571 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock);
67 * find the kernel part of the task struct, copy the registers and 67 * find the kernel part of the task struct, copy the registers and
68 * the buffer contents and then complete the task. 68 * the buffer contents and then complete the task.
69 */ 69 */
70static void uvesafb_cn_callback(struct cn_msg *msg) 70static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
71{ 71{
72 struct uvesafb_task *utask; 72 struct uvesafb_task *utask;
73 struct uvesafb_ktask *task; 73 struct uvesafb_ktask *task;
74 74
75 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
76 return;
77
75 if (msg->seq >= UVESAFB_TASKS_MAX) 78 if (msg->seq >= UVESAFB_TASKS_MAX)
76 return; 79 return;
77 80
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 52ccb3d3a963..45c126fea31d 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
306 return error; 306 return error;
307} 307}
308 308
309static void w1_cn_callback(struct cn_msg *msg) 309static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
310{ 310{
311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
312 struct w1_netlink_cmd *cmd; 312 struct w1_netlink_cmd *cmd;