aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 21:28:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 21:28:00 -0400
commit22cdbd1d5789cc16c37102eb6f62c3ae377b849e (patch)
treef86d3d798351c4bde69afbfa80e940aad01abaad
parent55f335a8857db2ee22c068e7ab7141fc79928296 (diff)
parentce45b873028fdf94a24f0850cd554e6fda593e16 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (108 commits) ehea: Fixing statistics bonding: Fix lockdep warning after bond_vlan_rx_register() tunnels: Fix tunnels change rcu protection caif-u5500: Build config for CAIF shared mem driver caif-u5500: CAIF shared memory mailbox interface caif-u5500: CAIF shared memory transport protocol caif-u5500: Adding shared memory include drivers/isdn: delete double assignment drivers/net/typhoon.c: delete double assignment drivers/net/sb1000.c: delete double assignment qlcnic: define valid vlan id range qlcnic: reduce rx ring size qlcnic: fix mac learning ehea: fix use after free inetpeer: __rcu annotations fib_rules: __rcu annotates ctarget tunnels: add __rcu annotations net: add __rcu annotations to protocol ipv4: add __rcu annotations to routes.c qlge: bugfix: Restoring the vlan setting. ...
-rw-r--r--Documentation/networking/phy.txt18
-rw-r--r--drivers/atm/eni.c7
-rw-r--r--drivers/connector/cn_queue.c75
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c6
-rw-r--r--drivers/net/atl1c/atl1c.h2
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atlx/atl1.c12
-rw-r--r--drivers/net/atlx/atl1.h9
-rw-r--r--drivers/net/atlx/atlx.c4
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_main.c49
-rw-r--r--drivers/net/bnx2x/bnx2x.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h55
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c55
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c95
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/pch_can.c1463
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/tscan1.c216
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c33
-rw-r--r--drivers/net/cxgb4/sge.c23
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c42
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/jme.c45
-rw-r--r--drivers/net/macb.c27
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/port.c11
-rw-r--r--drivers/net/phy/phy.c13
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c19
-rw-r--r--drivers/net/qlge/qlge.h12
-rw-r--r--drivers/net/qlge/qlge_main.c24
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/sb1000.c6
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/slhc.c15
-rw-r--r--drivers/net/tg3.c10
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/typhoon.c92
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h19
-rw-r--r--drivers/net/vxge/vxge-config.c332
-rw-r--r--drivers/net/vxge/vxge-config.h227
-rw-r--r--drivers/net/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/vxge/vxge-main.c64
-rw-r--r--drivers/net/vxge/vxge-main.h59
-rw-r--r--drivers/net/vxge/vxge-traffic.c101
-rw-r--r--drivers/net/vxge/vxge-traffic.h134
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h191
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h51
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c25
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c3
-rw-r--r--drivers/net/wireless/wl1251/Makefile8
-rw-r--r--include/linux/connector.h8
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ipip.h6
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h4
-rw-r--r--net/802/garp.c18
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/pktgen.c30
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/ipv4/fib_hash.c36
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/inetpeer.c138
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/tunnel4.c29
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tunnel6.c24
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_core.c53
-rw-r--r--net/l2tp/l2tp_core.h33
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/main.c8
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_socket.c12
-rw-r--r--net/netlink/af_netlink.c65
-rw-r--r--net/wireless/reg.c2
145 files changed, 4008 insertions, 1822 deletions
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 88bb71b46da4..9eb1ba52013d 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -177,18 +177,6 @@ Doing it all yourself
177 177
178 A convenience function to print out the PHY status neatly. 178 A convenience function to print out the PHY status neatly.
179 179
180 int phy_clear_interrupt(struct phy_device *phydev);
181 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
182
183 Clear the PHY's interrupt, and configure which ones are allowed,
184 respectively. Currently only supports all on, or all off.
185
186 int phy_enable_interrupts(struct phy_device *phydev);
187 int phy_disable_interrupts(struct phy_device *phydev);
188
189 Functions which enable/disable PHY interrupts, clearing them
190 before and after, respectively.
191
192 int phy_start_interrupts(struct phy_device *phydev); 180 int phy_start_interrupts(struct phy_device *phydev);
193 int phy_stop_interrupts(struct phy_device *phydev); 181 int phy_stop_interrupts(struct phy_device *phydev);
194 182
@@ -213,12 +201,6 @@ Doing it all yourself
213 Fills the phydev structure with up-to-date information about the current 201 Fills the phydev structure with up-to-date information about the current
214 settings in the PHY. 202 settings in the PHY.
215 203
216 void phy_sanitize_settings(struct phy_device *phydev)
217
218 Resolves differences between currently desired settings, and
219 supported settings for the given PHY device. Does not make
220 the changes in the hardware, though.
221
222 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 204 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
223 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); 205 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
224 206
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 80f9f3659e4d..97c5898cd76e 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
1736 eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom)); 1736 eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
1737 if (readl(&eprom->magic) != ENI155_MAGIC) { 1737 if (readl(&eprom->magic) != ENI155_MAGIC) {
1738 printk("\n"); 1738 printk("\n");
1739 printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad " 1739 printk(KERN_ERR DEV_LABEL
1740 "magic - expected 0x%x, got 0x%x\n",dev->number, 1740 "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
1741 ENI155_MAGIC,(unsigned) readl(&eprom->magic)); 1741 dev->number, ENI155_MAGIC,
1742 (unsigned)readl(&eprom->magic));
1742 error = -EINVAL; 1743 error = -EINVAL;
1743 goto unmap; 1744 goto unmap;
1744 } 1745 }
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 210338ea222f..81270d221e5a 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,48 +31,6 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34
35/*
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
42 */
43static void cn_queue_create(struct work_struct *work)
44{
45 struct cn_queue_dev *dev;
46
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
48
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev->cn_queue);
52}
53
54/*
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
58 * creation too.
59 */
60int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61{
62 struct cn_queue_dev *pdev = cbq->pdev;
63
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
66
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
70 else
71 atomic_dec(&pdev->wq_requested);
72
73 return schedule_work(work);
74}
75
76void cn_queue_wrapper(struct work_struct *work) 34void cn_queue_wrapper(struct work_struct *work)
77{ 35{
78 struct cn_callback_entry *cbq = 36 struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
111 69
112static void cn_queue_free_callback(struct cn_callback_entry *cbq) 70static void cn_queue_free_callback(struct cn_callback_entry *cbq)
113{ 71{
114 /* The first jobs have been sent to kevent, flush them too */ 72 flush_workqueue(cbq->pdev->cn_queue);
115 flush_scheduled_work();
116 if (cbq->pdev->cn_queue)
117 flush_workqueue(cbq->pdev->cn_queue);
118
119 kfree(cbq); 73 kfree(cbq);
120} 74}
121 75
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
193 atomic_set(&dev->refcnt, 0); 147 atomic_set(&dev->refcnt, 0);
194 INIT_LIST_HEAD(&dev->queue_list); 148 INIT_LIST_HEAD(&dev->queue_list);
195 spin_lock_init(&dev->queue_lock); 149 spin_lock_init(&dev->queue_lock);
196 init_waitqueue_head(&dev->wq_created);
197 150
198 dev->nls = nls; 151 dev->nls = nls;
199 152
200 INIT_WORK(&dev->wq_creation, cn_queue_create); 153 dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
154 if (!dev->cn_queue) {
155 kfree(dev);
156 return NULL;
157 }
201 158
202 return dev; 159 return dev;
203} 160}
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
205void cn_queue_free_dev(struct cn_queue_dev *dev) 162void cn_queue_free_dev(struct cn_queue_dev *dev)
206{ 163{
207 struct cn_callback_entry *cbq, *n; 164 struct cn_callback_entry *cbq, *n;
208 long timeout;
209 DEFINE_WAIT(wait);
210
211 /* Flush the first pending jobs queued on kevent */
212 flush_scheduled_work();
213
214 /* If the connector workqueue creation is still pending, wait for it */
215 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
216 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
217 timeout = schedule_timeout(HZ * 2);
218 if (!timeout && !dev->cn_queue)
219 WARN_ON(1);
220 }
221 finish_wait(&dev->wq_created, &wait);
222 165
223 if (dev->cn_queue) { 166 flush_workqueue(dev->cn_queue);
224 flush_workqueue(dev->cn_queue); 167 destroy_workqueue(dev->cn_queue);
225 destroy_workqueue(dev->cn_queue);
226 }
227 168
228 spin_lock_bh(&dev->queue_lock); 169 spin_lock_bh(&dev->queue_lock);
229 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) 170 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 1d48f40342cb..e16c3fa8d2e3 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
133 __cbq->data.skb == NULL)) { 133 __cbq->data.skb == NULL)) {
134 __cbq->data.skb = skb; 134 __cbq->data.skb = skb;
135 135
136 if (queue_cn_work(__cbq, &__cbq->work)) 136 if (queue_work(dev->cbdev->cn_queue,
137 &__cbq->work))
137 err = 0; 138 err = 0;
138 else 139 else
139 err = -EINVAL; 140 err = -EINVAL;
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
148 d->callback = __cbq->data.callback; 149 d->callback = __cbq->data.callback;
149 d->free = __new_cbq; 150 d->free = __new_cbq;
150 151
151 __new_cbq->pdev = __cbq->pdev;
152
153 INIT_WORK(&__new_cbq->work, 152 INIT_WORK(&__new_cbq->work,
154 &cn_queue_wrapper); 153 &cn_queue_wrapper);
155 154
156 if (queue_cn_work(__new_cbq, 155 if (queue_work(dev->cbdev->cn_queue,
157 &__new_cbq->work)) 156 &__new_cbq->work))
158 err = 0; 157 err = 0;
159 else { 158 else {
160 kfree(__new_cbq); 159 kfree(__new_cbq);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index af25e1f3efd4..e90db8870b6c 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
563 mdelay(10); 563 mdelay(10);
564 hw->ipac.isac.adf2 = 0x87; 564 hw->ipac.isac.adf2 = 0x87;
565 hw->ipac.hscx[0].slot = 0x1f; 565 hw->ipac.hscx[0].slot = 0x1f;
566 hw->ipac.hscx[0].slot = 0x23; 566 hw->ipac.hscx[1].slot = 0x23;
567 break; 567 break;
568 case INF_GAZEL_R753: 568 case INF_GAZEL_R753:
569 val = inl((u32)hw->cfg.start + GAZEL_CNTRL); 569 val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index b0554f80bfb3..ee4dae1382e0 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
164 char tmp[80]; 164 char tmp[80];
165 struct sk_buff *skb = arg; 165 struct sk_buff *skb = arg;
166 166
167 p = skb->data;
168
169 /* Channel Identification */ 167 /* Channel Identification */
170 p = skb->data; 168 p = findie(skb->data, skb->len, WE0_chanID, 0);
171 if ((p = findie(p, skb->len, WE0_chanID, 0))) { 169 if (p) {
172 if (p[1] != 1) { 170 if (p[1] != 1) {
173 l3_1tr6_error(pc, "setup wrong chanID len", skb); 171 l3_1tr6_error(pc, "setup wrong chanID len", skb);
174 return; 172 return;
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b897bf..9ab58097fa2e 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@ struct atl1c_adapter {
631extern char atl1c_driver_name[]; 631extern char atl1c_driver_name[];
632extern char atl1c_driver_version[]; 632extern char atl1c_driver_version[];
633 633
634extern int atl1c_up(struct atl1c_adapter *adapter);
635extern void atl1c_down(struct atl1c_adapter *adapter);
636extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); 634extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
637extern s32 atl1c_reset_hw(struct atl1c_hw *hw); 635extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
638extern void atl1c_set_ethtool_ops(struct net_device *netdev); 636extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf667d1f..09b099bfab2b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
66static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); 66static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
67static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 67static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
68 int *work_done, int work_to_do); 68 int *work_done, int work_to_do);
69static int atl1c_up(struct atl1c_adapter *adapter);
70static void atl1c_down(struct atl1c_adapter *adapter);
69 71
70static const u16 atl1c_pay_load_size[] = { 72static const u16 atl1c_pay_load_size[] = {
71 128, 256, 512, 1024, 2048, 4096, 73 128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2309 return err; 2311 return err;
2310} 2312}
2311 2313
2312int atl1c_up(struct atl1c_adapter *adapter) 2314static int atl1c_up(struct atl1c_adapter *adapter)
2313{ 2315{
2314 struct net_device *netdev = adapter->netdev; 2316 struct net_device *netdev = adapter->netdev;
2315 int num; 2317 int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
2351 return err; 2353 return err;
2352} 2354}
2353 2355
2354void atl1c_down(struct atl1c_adapter *adapter) 2356static void atl1c_down(struct atl1c_adapter *adapter)
2355{ 2357{
2356 struct net_device *netdev = adapter->netdev; 2358 struct net_device *netdev = adapter->netdev;
2357 2359
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8e66bd..43579b3b24ac 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
91/* Temporary hack for merging atl1 and atl2 */ 91/* Temporary hack for merging atl1 and atl2 */
92#include "atlx.c" 92#include "atlx.c"
93 93
94static const struct ethtool_ops atl1_ethtool_ops;
95
94/* 96/*
95 * This is the only thing that needs to be changed to adjust the 97 * This is the only thing that needs to be changed to adjust the
96 * maximum number of ports that the driver can manage. 98 * maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
353 * hw - Struct containing variables accessed by shared code 355 * hw - Struct containing variables accessed by shared code
354 * reg_addr - address of the PHY register to read 356 * reg_addr - address of the PHY register to read
355 */ 357 */
356s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) 358static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
357{ 359{
358 u32 val; 360 u32 val;
359 int i; 361 int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
553 * 1. calcu 32bit CRC for multicast address 555 * 1. calcu 32bit CRC for multicast address
554 * 2. reverse crc with MSB to LSB 556 * 2. reverse crc with MSB to LSB
555 */ 557 */
556u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) 558static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
557{ 559{
558 u32 crc32, value = 0; 560 u32 crc32, value = 0;
559 int i; 561 int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
570 * hw - Struct containing variables accessed by shared code 572 * hw - Struct containing variables accessed by shared code
571 * hash_value - Multicast address hash value 573 * hash_value - Multicast address hash value
572 */ 574 */
573void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) 575static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
574{ 576{
575 u32 hash_bit, hash_reg; 577 u32 hash_bit, hash_reg;
576 u32 mta; 578 u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
914 return 0; 916 return 0;
915} 917}
916 918
917void atl1_set_mac_addr(struct atl1_hw *hw) 919static void atl1_set_mac_addr(struct atl1_hw *hw)
918{ 920{
919 u32 value; 921 u32 value;
920 /* 922 /*
@@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
3658 return 0; 3660 return 0;
3659} 3661}
3660 3662
3661const struct ethtool_ops atl1_ethtool_ops = { 3663static const struct ethtool_ops atl1_ethtool_ops = {
3662 .get_settings = atl1_get_settings, 3664 .get_settings = atl1_get_settings,
3663 .set_settings = atl1_set_settings, 3665 .set_settings = atl1_set_settings,
3664 .get_drvinfo = atl1_get_drvinfo, 3666 .get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb273ac8..68de8cbfb3ec 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@ struct atl1_adapter;
56struct atl1_hw; 56struct atl1_hw;
57 57
58/* function prototypes needed by multiple files */ 58/* function prototypes needed by multiple files */
59u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); 59static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
60void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); 60static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
61s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); 61static void atl1_set_mac_addr(struct atl1_hw *hw);
62void atl1_set_mac_addr(struct atl1_hw *hw);
63static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 62static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
64 int cmd); 63 int cmd);
65static u32 atl1_check_link(struct atl1_adapter *adapter); 64static u32 atl1_check_link(struct atl1_adapter *adapter);
66 65
67extern const struct ethtool_ops atl1_ethtool_ops;
68
69/* hardware definitions specific to L1 */ 66/* hardware definitions specific to L1 */
70 67
71/* Block IDLE Status Register */ 68/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2d6d3c..afb7f7dd1bb1 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
41 41
42#include "atlx.h" 42#include "atlx.h"
43 43
44static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
45static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
46static void atlx_set_mac_addr(struct atl1_hw *hw);
47
44static struct atlx_spi_flash_dev flash_table[] = { 48static struct atlx_spi_flash_dev flash_table[] = {
45/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ 49/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
46 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, 50 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305ed00b..36eca1ce75d4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@ err:
1471 return status; 1471 return status;
1472} 1472}
1473 1473
1474/* Uses sync mcc */
1475int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1476 u8 *connector)
1477{
1478 struct be_mcc_wrb *wrb;
1479 struct be_cmd_req_port_type *req;
1480 int status;
1481
1482 spin_lock_bh(&adapter->mcc_lock);
1483
1484 wrb = wrb_from_mccq(adapter);
1485 if (!wrb) {
1486 status = -EBUSY;
1487 goto err;
1488 }
1489 req = embedded_payload(wrb);
1490
1491 be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
1492 OPCODE_COMMON_READ_TRANSRECV_DATA);
1493
1494 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1495 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1496
1497 req->port = cpu_to_le32(port);
1498 req->page_num = cpu_to_le32(TR_PAGE_A0);
1499 status = be_mcc_notify_wait(adapter);
1500 if (!status) {
1501 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1502 *connector = resp->data.connector;
1503 }
1504
1505err:
1506 spin_unlock_bh(&adapter->mcc_lock);
1507 return status;
1508}
1509
1510int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1474int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1511 u32 flash_type, u32 flash_opcode, u32 buf_size) 1475 u32 flash_type, u32 flash_opcode, u32 buf_size)
1512{ 1476{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdfe1c73..8469ff061f30 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1022 u8 port_num, u8 beacon, u8 status, u8 state); 1022 u8 port_num, u8 beacon, u8 status, u8 state);
1023extern int be_cmd_get_beacon_state(struct be_adapter *adapter, 1023extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
1024 u8 port_num, u32 *state); 1024 u8 port_num, u32 *state);
1025extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1026 u8 *connector);
1027extern int be_cmd_write_flashrom(struct be_adapter *adapter, 1025extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1028 struct be_dma_mem *cmd, u32 flash_oper, 1026 struct be_dma_mem *cmd, u32 flash_oper,
1029 u32 flash_opcode, u32 buf_size); 1027 u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f6635282..c36cd2ffbadc 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
849 stats->rx_mcast_pkts++; 849 stats->rx_mcast_pkts++;
850} 850}
851 851
852static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 852static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
853{ 853{
854 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; 854 u8 l4_cksm, ipv6, ipcksm;
855 855
856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); 856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); 857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); 858 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859 if (ip_version) {
860 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
861 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
862 }
863 ipv6_chk = (ip_version && (tcpf || udpf));
864 859
865 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; 860 /* Ignore ipcksm for ipv6 pkts */
861 return l4_cksm && (ipcksm || ipv6);
866} 862}
867 863
868static struct be_rx_page_info * 864static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1017 1013
1018 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); 1014 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1019 1015
1020 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1016 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1021 skb_checksum_none_assert(skb);
1022 else
1023 skb->ip_summed = CHECKSUM_UNNECESSARY; 1017 skb->ip_summed = CHECKSUM_UNNECESSARY;
1018 else
1019 skb_checksum_none_assert(skb);
1024 1020
1025 skb->truesize = skb->len + sizeof(struct sk_buff); 1021 skb->truesize = skb->len + sizeof(struct sk_buff);
1026 skb->protocol = eth_type_trans(skb, adapter->netdev); 1022 skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1674 return (tcp_frame && !err) ? true : false; 1670 return (tcp_frame && !err) ? true : false;
1675} 1671}
1676 1672
1677int be_poll_rx(struct napi_struct *napi, int budget) 1673static int be_poll_rx(struct napi_struct *napi, int budget)
1678{ 1674{
1679 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); 1675 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1680 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); 1676 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
1806 struct be_rx_obj *rxo; 1802 struct be_rx_obj *rxo;
1807 int i; 1803 int i;
1808 1804
1805 /* when interrupts are not yet enabled, just reap any pending
1806 * mcc completions */
1807 if (!netif_running(adapter->netdev)) {
1808 int mcc_compl, status = 0;
1809
1810 mcc_compl = be_process_mcc(adapter, &status);
1811
1812 if (mcc_compl) {
1813 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1815 }
1816 goto reschedule;
1817 }
1818
1809 if (!adapter->stats_ioctl_sent) 1819 if (!adapter->stats_ioctl_sent)
1810 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1820 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1811 1821
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
1824 if (!adapter->ue_detected) 1834 if (!adapter->ue_detected)
1825 be_detect_dump_ue(adapter); 1835 be_detect_dump_ue(adapter);
1826 1836
1837reschedule:
1827 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1838 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1828} 1839}
1829 1840
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
2019 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2030 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2020 int vec, i; 2031 int vec, i;
2021 2032
2022 cancel_delayed_work_sync(&adapter->work);
2023
2024 be_async_mcc_disable(adapter); 2033 be_async_mcc_disable(adapter);
2025 2034
2026 netif_stop_queue(netdev); 2035 netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
2085 /* Now that interrupts are on we can process async mcc */ 2094 /* Now that interrupts are on we can process async mcc */
2086 be_async_mcc_enable(adapter); 2095 be_async_mcc_enable(adapter);
2087 2096
2088 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2089
2090 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 2097 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2091 &link_speed); 2098 &link_speed);
2092 if (status) 2099 if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
2299 2306
2300 2307
2301#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 2308#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2302char flash_cookie[2][16] = {"*** SE FLAS",
2303 "H DIRECTORY *** "};
2304
2305static bool be_flash_redboot(struct be_adapter *adapter, 2309static bool be_flash_redboot(struct be_adapter *adapter,
2306 const u8 *p, u32 img_start, int image_size, 2310 const u8 *p, u32 img_start, int image_size,
2307 int hdr_size) 2311 int hdr_size)
@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
2559 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2563 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2560 BE_NAPI_WEIGHT); 2564 BE_NAPI_WEIGHT);
2561 2565
2562 netif_carrier_off(netdev);
2563 netif_stop_queue(netdev); 2566 netif_stop_queue(netdev);
2564} 2567}
2565 2568
@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
2715 if (!adapter) 2718 if (!adapter)
2716 return; 2719 return;
2717 2720
2721 cancel_delayed_work_sync(&adapter->work);
2722
2718 unregister_netdev(adapter->netdev); 2723 unregister_netdev(adapter->netdev);
2719 2724
2720 be_clear(adapter); 2725 be_clear(adapter);
@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
2868 status = register_netdev(netdev); 2873 status = register_netdev(netdev);
2869 if (status != 0) 2874 if (status != 0)
2870 goto unsetup; 2875 goto unsetup;
2876 netif_carrier_off(netdev);
2871 2877
2872 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 2878 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2879 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2873 return 0; 2880 return 0;
2874 2881
2875unsetup: 2882unsetup:
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf48f35..9eea225decaf 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
1288 1288
1289#define WAIT_RAMROD_POLL 0x01 1289#define WAIT_RAMROD_POLL 0x01
1290#define WAIT_RAMROD_COMMON 0x02 1290#define WAIT_RAMROD_COMMON 0x02
1291int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1292 int *state_p, int flags);
1293 1291
1294/* dmae */ 1292/* dmae */
1295void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1293void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1296void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1294void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1297 u32 len32); 1295 u32 len32);
1298void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1299 u32 addr, u32 len);
1300void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 1296void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1301u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); 1297u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1302u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); 1298u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
1307int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1303int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1308int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1304int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1309u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 1305u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1310void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1311 1306
1312void bnx2x_calc_fc_adv(struct bnx2x *bp); 1307void bnx2x_calc_fc_adv(struct bnx2x *bp);
1313int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1308int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc5837514074..459614d2d7bc 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
25 25
26#include "bnx2x_init.h" 26#include "bnx2x_init.h"
27 27
28static int bnx2x_setup_irqs(struct bnx2x *bp);
28 29
29/* free skb in the packet ring at pos idx 30/* free skb in the packet ring at pos idx
30 * return idx of last bd freed 31 * return idx of last bd freed
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2187} 2188}
2188 2189
2189 2190
2190int bnx2x_setup_irqs(struct bnx2x *bp) 2191static int bnx2x_setup_irqs(struct bnx2x *bp)
2191{ 2192{
2192 int rc = 0; 2193 int rc = 0;
2193 if (bp->flags & USING_MSIX_FLAG) { 2194 if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab1d2d4..6b28739c5302 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
117void bnx2x_int_enable(struct bnx2x *bp); 117void bnx2x_int_enable(struct bnx2x *bp);
118 118
119/** 119/**
120 * Disable HW interrupts.
121 *
122 * @param bp
123 */
124void bnx2x_int_disable(struct bnx2x *bp);
125
126/**
127 * Disable interrupts. This function ensures that there are no 120 * Disable interrupts. This function ensures that there are no
128 * ISRs or SP DPCs (sp_task) are running after it returns. 121 * ISRs or SP DPCs (sp_task) are running after it returns.
129 * 122 *
@@ -192,17 +185,6 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
192 int is_leading); 185 int is_leading);
193 186
194/** 187/**
195 * Bring down an eth client.
196 *
197 * @param bp
198 * @param p
199 *
200 * @return int
201 */
202int bnx2x_stop_fw_client(struct bnx2x *bp,
203 struct bnx2x_client_ramrod_params *p);
204
205/**
206 * Set number of queues according to mode 188 * Set number of queues according to mode
207 * 189 *
208 * @param bp 190 * @param bp
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
250 */ 232 */
251void bnx2x_set_eth_mac(struct bnx2x *bp, int set); 233void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
252 234
253#ifdef BCM_CNIC
254/**
255 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
256 * MAC(s). The function will wait until the ramrod completion
257 * returns.
258 *
259 * @param bp driver handle
260 * @param set set or clear the CAM entry
261 *
262 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
263 */
264int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
265#endif
266
267/**
268 * Initialize status block in FW and HW
269 *
270 * @param bp driver handle
271 * @param dma_addr_t mapping
272 * @param int sb_id
273 * @param int vfid
274 * @param u8 vf_valid
275 * @param int fw_sb_id
276 * @param int igu_sb_id
277 */
278void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
279 u8 vf_valid, int fw_sb_id, int igu_sb_id);
280
281/** 235/**
282 * Set MAC filtering configurations. 236 * Set MAC filtering configurations.
283 * 237 *
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
326 * @return int 280 * @return int
327 */ 281 */
328int bnx2x_func_start(struct bnx2x *bp); 282int bnx2x_func_start(struct bnx2x *bp);
329int bnx2x_func_stop(struct bnx2x *bp);
330 283
331/** 284/**
332 * Prepare ILT configurations according to current driver 285 * Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
396int bnx2x_enable_msi(struct bnx2x *bp); 349int bnx2x_enable_msi(struct bnx2x *bp);
397 350
398/** 351/**
399 * Request IRQ vectors from OS.
400 *
401 * @param bp
402 *
403 * @return int
404 */
405int bnx2x_setup_irqs(struct bnx2x *bp);
406/**
407 * NAPI callback 352 * NAPI callback
408 * 353 *
409 * @param napi 354 * @param napi
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de784182c..a306b0e46b61 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
16#define BNX2X_INIT_OPS_H 16#define BNX2X_INIT_OPS_H
17 17
18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); 18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
19 19static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
20static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
21 u32 addr, u32 len);
20 22
21static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, 23static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
22 u32 len) 24 u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
589 return rc; 591 return rc;
590} 592}
591 593
592int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) 594static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
593{ 595{
594 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); 596 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
595 if (!rc) 597 if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
635 } 637 }
636} 638}
637 639
638void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, 640static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
639 struct ilt_client_info *ilt_cli, 641 struct ilt_client_info *ilt_cli,
640 u32 ilt_start, u8 initop) 642 u32 ilt_start, u8 initop)
641{ 643{
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
688 } 690 }
689} 691}
690 692
691void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt, 693static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
692 struct ilt_client_info *ilt_cli, u8 initop) 694 struct bnx2x_ilt *ilt,
695 struct ilt_client_info *ilt_cli,
696 u8 initop)
693{ 697{
694 int i; 698 int i;
695 699
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
703 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); 707 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
704} 708}
705 709
706void bnx2x_ilt_client_init_op(struct bnx2x *bp, 710static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
707 struct ilt_client_info *ilt_cli, u8 initop) 711 struct ilt_client_info *ilt_cli, u8 initop)
708{ 712{
709 struct bnx2x_ilt *ilt = BP_ILT(bp); 713 struct bnx2x_ilt *ilt = BP_ILT(bp);
710 714
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
720 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); 724 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
721} 725}
722 726
723void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) 727static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
724{ 728{
725 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); 729 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
726 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); 730 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
752 * called during init common stage, ilt clients should be initialized 756 * called during init common stage, ilt clients should be initialized
753 * prioir to calling this function 757 * prioir to calling this function
754 */ 758 */
755void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) 759static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
756{ 760{
757 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, 761 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
758 PXP2_REG_RQ_CDU_P_SIZE, initop); 762 PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
772#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) 776#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
773 777
774/* called during init port stage */ 778/* called during init port stage */
775void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, 779static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
776 u8 initop) 780 u8 initop)
777{ 781{
778 int port = BP_PORT(bp); 782 int port = BP_PORT(bp);
779 783
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
814} 818}
815 819
816/* called during init common stage */ 820/* called during init common stage */
817void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, 821static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
818 u8 initop) 822 u8 initop)
819{ 823{
820 if (!QM_INIT(qm_cid_count)) 824 if (!QM_INIT(qm_cid_count))
821 return; 825 return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
836****************************************************************************/ 840****************************************************************************/
837 841
838/* called during init func stage */ 842/* called during init func stage */
839void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
840 dma_addr_t t2_mapping, int src_cid_count) 844 dma_addr_t t2_mapping, int src_cid_count)
841{ 845{
842 int i; 846 int i;
843 int port = BP_PORT(bp); 847 int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9c42b9..2326774df843 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
181 (_bank + (_addr & 0xf)), \ 181 (_bank + (_addr & 0xf)), \
182 _val) 182 _val)
183 183
184static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
185 u8 devad, u16 reg, u16 *ret_val);
186
187static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
188 u8 devad, u16 reg, u16 val);
189
184static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 190static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
185{ 191{
186 u32 val = REG_RD(bp, reg); 192 u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
594 return 0; 600 return 0;
595} 601}
596 602
597u8 bnx2x_bmac_enable(struct link_params *params, 603static u8 bnx2x_bmac_enable(struct link_params *params,
598 struct link_vars *vars, 604 struct link_vars *vars,
599 u8 is_lb) 605 u8 is_lb)
600{ 606{
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2537 } 2543 }
2538} 2544}
2539 2545
2540/*
2541 *------------------------------------------------------------------------
2542 * bnx2x_override_led_value -
2543 *
2544 * Override the led value of the requested led
2545 *
2546 *------------------------------------------------------------------------
2547 */
2548u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
2549 u32 led_idx, u32 value)
2550{
2551 u32 reg_val;
2552
2553 /* If port 0 then use EMAC0, else use EMAC1*/
2554 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2555
2556 DP(NETIF_MSG_LINK,
2557 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
2558 port, led_idx, value);
2559
2560 switch (led_idx) {
2561 case 0: /* 10MB led */
2562 /* Read the current value of the LED register in
2563 the EMAC block */
2564 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2565 /* Set the OVERRIDE bit to 1 */
2566 reg_val |= EMAC_LED_OVERRIDE;
2567 /* If value is 1, set the 10M_OVERRIDE bit,
2568 otherwise reset it.*/
2569 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
2570 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
2571 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2572 break;
2573 case 1: /*100MB led */
2574 /*Read the current value of the LED register in
2575 the EMAC block */
2576 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2577 /* Set the OVERRIDE bit to 1 */
2578 reg_val |= EMAC_LED_OVERRIDE;
2579 /* If value is 1, set the 100M_OVERRIDE bit,
2580 otherwise reset it.*/
2581 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
2582 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
2583 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2584 break;
2585 case 2: /* 1000MB led */
2586 /* Read the current value of the LED register in the
2587 EMAC block */
2588 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2589 /* Set the OVERRIDE bit to 1 */
2590 reg_val |= EMAC_LED_OVERRIDE;
2591 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
2592 reset it. */
2593 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
2594 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
2595 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2596 break;
2597 case 3: /* 2500MB led */
2598 /* Read the current value of the LED register in the
2599 EMAC block*/
2600 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2601 /* Set the OVERRIDE bit to 1 */
2602 reg_val |= EMAC_LED_OVERRIDE;
2603 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
2604 reset it.*/
2605 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
2606 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
2607 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2608 break;
2609 case 4: /*10G led */
2610 if (port == 0) {
2611 REG_WR(bp, NIG_REG_LED_10G_P0,
2612 value);
2613 } else {
2614 REG_WR(bp, NIG_REG_LED_10G_P1,
2615 value);
2616 }
2617 break;
2618 case 5: /* TRAFFIC led */
2619 /* Find if the traffic control is via BMAC or EMAC */
2620 if (port == 0)
2621 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
2622 else
2623 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
2624
2625 /* Override the traffic led in the EMAC:*/
2626 if (reg_val == 1) {
2627 /* Read the current value of the LED register in
2628 the EMAC block */
2629 reg_val = REG_RD(bp, emac_base +
2630 EMAC_REG_EMAC_LED);
2631 /* Set the TRAFFIC_OVERRIDE bit to 1 */
2632 reg_val |= EMAC_LED_OVERRIDE;
2633 /* If value is 1, set the TRAFFIC bit, otherwise
2634 reset it.*/
2635 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
2636 (reg_val & ~EMAC_LED_TRAFFIC);
2637 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2638 } else { /* Override the traffic led in the BMAC: */
2639 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2640 + port*4, 1);
2641 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
2642 value);
2643 }
2644 break;
2645 default:
2646 DP(NETIF_MSG_LINK,
2647 "bnx2x_override_led_value() unknown led index %d "
2648 "(should be 0-5)\n", led_idx);
2649 return -EINVAL;
2650 }
2651
2652 return 0;
2653}
2654
2655
2656u8 bnx2x_set_led(struct link_params *params, 2546u8 bnx2x_set_led(struct link_params *params,
2657 struct link_vars *vars, u8 mode, u32 speed) 2547 struct link_vars *vars, u8 mode, u32 speed)
2658{ 2548{
@@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4099 return -EINVAL; 3989 return -EINVAL;
4100} 3990}
4101 3991
4102u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 3992static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4103 struct link_params *params, u16 addr, 3993 struct link_params *params, u16 addr,
4104 u8 byte_cnt, u8 *o_buf) 3994 u8 byte_cnt, u8 *o_buf)
4105{ 3995{
4106 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 3996 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4107 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 3997 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
6819 return 0; 6709 return 0;
6820} 6710}
6821 6711
6822u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
6823{
6824 if (phy_idx < params->num_phys)
6825 return params->phy[phy_idx].supported;
6826 return 0;
6827}
6828
6829static void set_phy_vars(struct link_params *params) 6712static void set_phy_vars(struct link_params *params)
6830{ 6713{
6831 struct bnx2x *bp = params->bp; 6714 struct bnx2x *bp = params->bp;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c7199276..171abf8097ee 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
279 279
280u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, 280u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
281 u8 devad, u16 reg, u16 val); 281 u8 devad, u16 reg, u16 val);
282
283u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
284 u8 devad, u16 reg, u16 *ret_val);
285
286u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
287 u8 devad, u16 reg, u16 val);
288/* Reads the link_status from the shmem, 282/* Reads the link_status from the shmem,
289 and update the link vars accordingly */ 283 and update the link vars accordingly */
290void bnx2x_link_status_update(struct link_params *input, 284void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
304#define LED_MODE_OPER 2 298#define LED_MODE_OPER 2
305#define LED_MODE_FRONT_PANEL_OFF 3 299#define LED_MODE_FRONT_PANEL_OFF 3
306 300
307u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
308
309/* bnx2x_handle_module_detect_int should be called upon module detection 301/* bnx2x_handle_module_detect_int should be called upon module detection
310 interrupt */ 302 interrupt */
311void bnx2x_handle_module_detect_int(struct link_params *params); 303void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
325/* Reset the external of SFX7101 */ 317/* Reset the external of SFX7101 */
326void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 318void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
327 319
328u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
329 struct link_params *params, u16 addr,
330 u8 byte_cnt, u8 *o_buf);
331
332void bnx2x_hw_reset_phy(struct link_params *params); 320void bnx2x_hw_reset_phy(struct link_params *params);
333 321
334/* Checks if HW lock is required for this phy/board type */ 322/* Checks if HW lock is required for this phy/board type */
335u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, 323u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
336 u32 shmem2_base); 324 u32 shmem2_base);
337 325
338/* Returns the aggregative supported attributes of the phys on board */
339u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
340
341/* Check swap bit and adjust PHY order */ 326/* Check swap bit and adjust PHY order */
342u32 bnx2x_phy_selection(struct link_params *params); 327u32 bnx2x_phy_selection(struct link_params *params);
343 328
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2fc0426..e9ad16f00b56 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
403/* used only at init 403/* used only at init
404 * locking is done by mcp 404 * locking is done by mcp
405 */ 405 */
406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
407{ 407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]" 430#define DMAE_DP_DST_NONE "dst_addr [none]"
431 431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) 432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
433{ 434{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435 436
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
551 return opcode; 552 return opcode;
552} 553}
553 554
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
555 u8 src_type, u8 dst_type) 556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
556{ 558{
557 memset(dmae, 0, sizeof(struct dmae_command)); 559 memset(dmae, 0, sizeof(struct dmae_command));
558 560
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
567} 569}
568 570
569/* issue a dmae command over the init-channel and wailt for completion */ 571/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
571{ 574{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40; 576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
674 bnx2x_issue_dmae_with_comp(bp, &dmae); 677 bnx2x_issue_dmae_with_comp(bp, &dmae);
675} 678}
676 679
677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len) 681 u32 addr, u32 len)
679{ 682{
680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
681 int offset = 0; 684 int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268} 1271}
1269 1272
1270void bnx2x_int_disable(struct bnx2x *bp) 1273static void bnx2x_int_disable(struct bnx2x *bp)
1271{ 1274{
1272 if (bp->common.int_block == INT_BLOCK_HC) 1275 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp); 1276 bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2236} 2239}
2237 2240
2238/* must be called under rtnl_lock */ 2241/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) 2242static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240{ 2243{
2241 u32 mask = (1 << cl_id); 2244 u32 mask = (1 << cl_id);
2242 2245
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2303 bp->mac_filters.unmatched_unicast & ~mask; 2306 bp->mac_filters.unmatched_unicast & ~mask;
2304} 2307}
2305 2308
2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2309static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307{ 2310{
2308 struct tstorm_eth_function_common_config tcfg = {0}; 2311 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs; 2312 u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2460 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; 2463 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2461} 2464}
2462 2465
2463void bnx2x_pf_init(struct bnx2x *bp) 2466static void bnx2x_pf_init(struct bnx2x *bp)
2464{ 2467{
2465 struct bnx2x_func_init_params func_init = {0}; 2468 struct bnx2x_func_init_params func_init = {0};
2466 struct bnx2x_rss_params rss = {0}; 2469 struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3928 hc_sm->time_to_expire = 0xFFFFFFFF; 3931 hc_sm->time_to_expire = 0xFFFFFFFF;
3929} 3932}
3930 3933
3931void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 3934static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3932 u8 vf_valid, int fw_sb_id, int igu_sb_id) 3935 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3933{ 3936{
3934 int igu_seg_id; 3937 int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
6021/* 6024/*
6022 * Init service functions 6025 * Init service functions
6023 */ 6026 */
6027static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6028 int *state_p, int flags);
6029
6024int bnx2x_func_start(struct bnx2x *bp) 6030int bnx2x_func_start(struct bnx2x *bp)
6025{ 6031{
6026 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); 6032 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
6030 WAIT_RAMROD_COMMON); 6036 WAIT_RAMROD_COMMON);
6031} 6037}
6032 6038
6033int bnx2x_func_stop(struct bnx2x *bp) 6039static int bnx2x_func_stop(struct bnx2x *bp)
6034{ 6040{
6035 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); 6041 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6036 6042
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6103 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); 6109 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6104} 6110}
6105 6111
6106int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 6112static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6107 int *state_p, int flags) 6113 int *state_p, int flags)
6108{ 6114{
6109 /* can take a while if any port is running */ 6115 /* can take a while if any port is running */
6110 int cnt = 5000; 6116 int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6154 return -EBUSY; 6160 return -EBUSY;
6155} 6161}
6156 6162
6157u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) 6163static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6158{ 6164{
6159 if (CHIP_IS_E1H(bp)) 6165 if (CHIP_IS_E1H(bp))
6160 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6166 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6273 * 6279 *
6274 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6280 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6275 */ 6281 */
6276int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6282static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6277{ 6283{
6278 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : 6284 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6279 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); 6285 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6383 ETH_CONNECTION_TYPE); 6389 ETH_CONNECTION_TYPE);
6384} 6390}
6385 6391
6386int bnx2x_setup_fw_client(struct bnx2x *bp, 6392static int bnx2x_setup_fw_client(struct bnx2x *bp,
6387 struct bnx2x_client_init_params *params, 6393 struct bnx2x_client_init_params *params,
6388 u8 activate, 6394 u8 activate,
6389 struct client_init_ramrod_data *data, 6395 struct client_init_ramrod_data *data,
6390 dma_addr_t data_mapping) 6396 dma_addr_t data_mapping)
6391{ 6397{
6392 u16 hc_usec; 6398 u16 hc_usec;
6393 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 6399 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6633 return rc; 6639 return rc;
6634} 6640}
6635 6641
6636int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p) 6642static int bnx2x_stop_fw_client(struct bnx2x *bp,
6643 struct bnx2x_client_ramrod_params *p)
6637{ 6644{
6638 int rc; 6645 int rc;
6639 6646
@@ -7440,7 +7447,7 @@ reset_task_exit:
7440 * Init service functions 7447 * Init service functions
7441 */ 7448 */
7442 7449
7443u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 7450static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7444{ 7451{
7445 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 7452 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7446 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 7453 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7cecd52..bdb68a600382 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
493 struct slave *slave; 493 struct slave *slave;
494 int i; 494 int i;
495 495
496 write_lock(&bond->lock); 496 write_lock_bh(&bond->lock);
497 bond->vlgrp = grp; 497 bond->vlgrp = grp;
498 write_unlock(&bond->lock); 498 write_unlock_bh(&bond->lock);
499 499
500 bond_for_each_slave(bond, slave, i) { 500 bond_for_each_slave(bond, slave, i) {
501 struct net_device *slave_dev = slave->dev; 501 struct net_device *slave_dev = slave->dev;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a9d95f..09ed3f42d673 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
31 Putting the next command and length in the start of the frame can 31 Putting the next command and length in the start of the frame can
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d619452b..b38d987da67d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
8# SPI slave physical interfaces module 8# SPI slave physical interfaces module
9cfspi_slave-objs := caif_spi.o caif_spi_slave.o 9cfspi_slave-objs := caif_spi.o caif_spi_slave.o
10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
11
12# Shared memory
13caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
14obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 000000000000..1cd90da86f13
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
9
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <mach/mbox.h>
15#include <net/caif/caif_shm.h>
16
17MODULE_LICENSE("GPL");
18MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
19
20#define MAX_SHM_INSTANCES 1
21
22enum {
23 MBX_ACC0,
24 MBX_ACC1,
25 MBX_DSP
26};
27
28static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
29
30static unsigned int shm_start;
31static unsigned int shm_size;
32
33module_param(shm_size, uint , 0440);
34MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
35
36module_param(shm_start, uint , 0440);
37MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
38
39static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
40{
41 /* Always block until msg is written successfully */
42 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
43 return 0;
44}
45
46static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
47 void *pshm_drv)
48{
49 /*
50 * For UX5500, we have only 1 SHM instance which uses MBX0
51 * for communication with the peer modem
52 */
53 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
54
55 if (!pshm_dev->hmbx)
56 return -ENODEV;
57 else
58 return 0;
59}
60
61static int __init caif_shmdev_init(void)
62{
63 int i, result;
64
65 /* Loop is currently overkill, there is only one instance */
66 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
67
68 shmdev_lyr[i].shm_base_addr = shm_start;
69 shmdev_lyr[i].shm_total_sz = shm_size;
70
71 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
72 || (shmdev_lyr[i].shm_total_sz <= 0)) {
73 pr_warn("ERROR,"
74 "Shared memory Address and/or Size incorrect"
75 ", Bailing out ...\n");
76 result = -EINVAL;
77 goto clean;
78 }
79
80 pr_info("SHM AREA (instance %d) STARTS"
81 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
82
83 shmdev_lyr[i].shm_id = i;
84 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
85 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
86
87 /*
88 * Finally, CAIF core module is called with details in place:
89 * 1. SHM base address
90 * 2. SHM size
91 * 3. MBX handle
92 */
93 result = caif_shmcore_probe(&shmdev_lyr[i]);
94 if (result) {
95 pr_warn("ERROR[%d],"
96 "Could not probe SHM core (instance %d)"
97 " Bailing out ...\n", result, i);
98 goto clean;
99 }
100 }
101
102 return 0;
103
104clean:
105 /*
106 * For now, we assume that even if one instance of SHM fails, we bail
107 * out of the driver support completely. For this, we need to release
108 * any memory allocated and unregister any instance of SHM net device.
109 */
110 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
111 if (shmdev_lyr[i].pshm_netdev)
112 unregister_netdev(shmdev_lyr[i].pshm_netdev);
113 }
114 return result;
115}
116
117static void __exit caif_shmdev_exit(void)
118{
119 int i;
120
121 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
122 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
123 kfree((void *)shmdev_lyr[i].shm_base_addr);
124 }
125
126}
127
128module_init(caif_shmdev_init);
129module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 000000000000..19f9c0656667
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16
17#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h>
19
20#define NR_TX_BUF 6
21#define NR_RX_BUF 6
22#define TX_BUF_SZ 0x2000
23#define RX_BUF_SZ 0x2000
24
25#define CAIF_NEEDED_HEADROOM 32
26
27#define CAIF_FLOW_ON 1
28#define CAIF_FLOW_OFF 0
29
30#define LOW_WATERMARK 3
31#define HIGH_WATERMARK 4
32
33/* Maximum number of CAIF buffers per shared memory buffer. */
34#define SHM_MAX_FRMS_PER_BUF 10
35
36/*
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
39 */
40#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
42
43/*
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
46 */
47#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
48
49/* Number of bytes for CAIF shared memory header. */
50#define SHM_HDR_LEN 1
51
52/* Number of padding bytes for the complete CAIF frame. */
53#define SHM_FRM_PAD_LEN 4
54
55#define CAIF_MAX_MTU 4096
56
57#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
59
60#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
62
63#define SHM_FULL_MASK (0x0F << 0)
64#define SHM_EMPTY_MASK (0x0F << 4)
65
66struct shm_pck_desc {
67 /*
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
70 */
71 u32 frm_ofs;
72 u32 frm_len;
73};
74
75struct buf_list {
76 unsigned char *desc_vptr;
77 u32 phy_addr;
78 u32 index;
79 u32 len;
80 u32 frames;
81 u32 frm_ofs;
82 struct list_head list;
83};
84
85struct shm_caif_frm {
86 /* Number of bytes of padding before the CAIF frame. */
87 u8 hdr_ofs;
88};
89
90struct shmdrv_layer {
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev;
93
94 u32 shm_tx_addr;
95 u32 shm_rx_addr;
96 u32 shm_base_addr;
97 u32 tx_empty_available;
98 spinlock_t lock;
99
100 struct list_head tx_empty_list;
101 struct list_head tx_pend_list;
102 struct list_head tx_full_list;
103 struct list_head rx_empty_list;
104 struct list_head rx_pend_list;
105 struct list_head rx_full_list;
106
107 struct workqueue_struct *pshm_tx_workqueue;
108 struct workqueue_struct *pshm_rx_workqueue;
109
110 struct work_struct shm_tx_work;
111 struct work_struct shm_rx_work;
112
113 struct sk_buff_head sk_qhead;
114 struct shmdev_layer *pshm_dev;
115};
116
117static int shm_netdev_open(struct net_device *shm_netdev)
118{
119 netif_wake_queue(shm_netdev);
120 return 0;
121}
122
123static int shm_netdev_close(struct net_device *shm_netdev)
124{
125 netif_stop_queue(shm_netdev);
126 return 0;
127}
128
129int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130{
131 struct buf_list *pbuf;
132 struct shmdrv_layer *pshm_drv;
133 struct list_head *pos;
134 u32 avail_emptybuff = 0;
135 unsigned long flags = 0;
136
137 pshm_drv = (struct shmdrv_layer *)priv;
138
139 /* Check for received buffers. */
140 if (mbx_msg & SHM_FULL_MASK) {
141 int idx;
142
143 spin_lock_irqsave(&pshm_drv->lock, flags);
144
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv->rx_empty_list)) {
147
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg);
154
155 /* Bail out. */
156 goto err_sync;
157 }
158
159 pbuf =
160 list_entry(pshm_drv->rx_empty_list.next,
161 struct buf_list, list);
162 idx = pbuf->index;
163
164 /* Check buffer synchronization. */
165 if (idx != SHM_GET_FULL(mbx_msg)) {
166
167 /* We print even in IRQ context... */
168 pr_warn(
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173 spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175 /* Bail out. */
176 goto err_sync;
177 }
178
179 list_del_init(&pbuf->list);
180 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182 spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv->shm_rx_work))
186 queue_work(pshm_drv->pshm_rx_workqueue,
187 &pshm_drv->shm_rx_work);
188 }
189
190 /* Check for emptied buffers. */
191 if (mbx_msg & SHM_EMPTY_MASK) {
192 int idx;
193
194 spin_lock_irqsave(&pshm_drv->lock, flags);
195
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv->tx_full_list)) {
198
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202 spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204 /* Bail out. */
205 goto err_sync;
206 }
207
208 pbuf =
209 list_entry(pshm_drv->tx_full_list.next,
210 struct buf_list, list);
211 idx = pbuf->index;
212
213 /* Check buffer synchronization. */
214 if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216 spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218 /* We print even in IRQ context... */
219 pr_warn("TX empty "
220 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222 /* Bail out. */
223 goto err_sync;
224 }
225 list_del_init(&pbuf->list);
226
227 /* Reset buffer parameters. */
228 pbuf->frames = 0;
229 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos, &pshm_drv->tx_empty_list)
235 avail_emptybuff++;
236
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff > HIGH_WATERMARK) &&
239 (!pshm_drv->tx_empty_available)) {
240 pshm_drv->tx_empty_available = 1;
241 pshm_drv->cfdev.flowctrl
242 (pshm_drv->pshm_dev->pshm_netdev,
243 CAIF_FLOW_ON);
244
245 spin_unlock_irqrestore(&pshm_drv->lock, flags);
246
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv->shm_tx_work))
249 queue_work(pshm_drv->pshm_tx_workqueue,
250 &pshm_drv->shm_tx_work);
251 } else
252 spin_unlock_irqrestore(&pshm_drv->lock, flags);
253 }
254
255 return 0;
256
257err_sync:
258 return -EIO;
259}
260
261static void shm_rx_work_func(struct work_struct *rx_work)
262{
263 struct shmdrv_layer *pshm_drv;
264 struct buf_list *pbuf;
265 unsigned long flags = 0;
266 struct sk_buff *skb;
267 char *p;
268 int ret;
269
270 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272 while (1) {
273
274 struct shm_pck_desc *pck_desc;
275
276 spin_lock_irqsave(&pshm_drv->lock, flags);
277
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv->rx_full_list)) {
280 spin_unlock_irqrestore(&pshm_drv->lock, flags);
281 break;
282 }
283
284 pbuf =
285 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286 list);
287 list_del_init(&pbuf->list);
288
289 /* Retrieve pointer to start of the packet descriptor area. */
290 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
291
292 /*
293 * Check whether descriptor contains a CAIF shared memory
294 * frame.
295 */
296 while (pck_desc->frm_ofs) {
297 unsigned int frm_buf_ofs;
298 unsigned int frm_pck_ofs;
299 unsigned int frm_pck_len;
300 /*
301 * Check whether offset is within buffer limits
302 * (lower).
303 */
304 if (pck_desc->frm_ofs <
305 (pbuf->phy_addr - pshm_drv->shm_base_addr))
306 break;
307 /*
308 * Check whether offset is within buffer limits
309 * (higher).
310 */
311 if (pck_desc->frm_ofs >
312 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
313 pbuf->len))
314 break;
315
316 /* Calculate offset from start of buffer. */
317 frm_buf_ofs =
318 pck_desc->frm_ofs - (pbuf->phy_addr -
319 pshm_drv->shm_base_addr);
320
321 /*
322 * Calculate offset and length of CAIF packet while
323 * taking care of the shared memory header.
324 */
325 frm_pck_ofs =
326 frm_buf_ofs + SHM_HDR_LEN +
327 (*(pbuf->desc_vptr + frm_buf_ofs));
328 frm_pck_len =
329 (pck_desc->frm_len - SHM_HDR_LEN -
330 (*(pbuf->desc_vptr + frm_buf_ofs)));
331
332 /* Check whether CAIF packet is within buffer limits */
333 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
334 break;
335
336 /* Get a suitable CAIF packet and copy in data. */
337 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
338 frm_pck_len + 1);
339 BUG_ON(skb == NULL);
340
341 p = skb_put(skb, frm_pck_len);
342 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
343
344 skb->protocol = htons(ETH_P_CAIF);
345 skb_reset_mac_header(skb);
346 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
347
348 /* Push received packet up the stack. */
349 ret = netif_rx_ni(skb);
350
351 if (!ret) {
352 pshm_drv->pshm_dev->pshm_netdev->stats.
353 rx_packets++;
354 pshm_drv->pshm_dev->pshm_netdev->stats.
355 rx_bytes += pck_desc->frm_len;
356 } else
357 ++pshm_drv->pshm_dev->pshm_netdev->stats.
358 rx_dropped;
359 /* Move to next packet descriptor. */
360 pck_desc++;
361 }
362
363 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
364
365 spin_unlock_irqrestore(&pshm_drv->lock, flags);
366
367 }
368
369 /* Schedule the work queue. if required */
370 if (!work_pending(&pshm_drv->shm_tx_work))
371 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
372
373}
374
375static void shm_tx_work_func(struct work_struct *tx_work)
376{
377 u32 mbox_msg;
378 unsigned int frmlen, avail_emptybuff, append = 0;
379 unsigned long flags = 0;
380 struct buf_list *pbuf = NULL;
381 struct shmdrv_layer *pshm_drv;
382 struct shm_caif_frm *frm;
383 struct sk_buff *skb;
384 struct shm_pck_desc *pck_desc;
385 struct list_head *pos;
386
387 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
388
389 do {
390 /* Initialize mailbox message. */
391 mbox_msg = 0x00;
392 avail_emptybuff = 0;
393
394 spin_lock_irqsave(&pshm_drv->lock, flags);
395
396 /* Check for pending receive buffers. */
397 if (!list_empty(&pshm_drv->rx_pend_list)) {
398
399 pbuf = list_entry(pshm_drv->rx_pend_list.next,
400 struct buf_list, list);
401
402 list_del_init(&pbuf->list);
403 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
404 /*
405 * Value index is never changed,
406 * so read access should be safe.
407 */
408 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
409 }
410
411 skb = skb_peek(&pshm_drv->sk_qhead);
412
413 if (skb == NULL)
414 goto send_msg;
415
416 /* Check the available no. of buffers in the empty list */
417 list_for_each(pos, &pshm_drv->tx_empty_list)
418 avail_emptybuff++;
419
420 if ((avail_emptybuff < LOW_WATERMARK) &&
421 pshm_drv->tx_empty_available) {
422 /* Update blocking condition. */
423 pshm_drv->tx_empty_available = 0;
424 pshm_drv->cfdev.flowctrl
425 (pshm_drv->pshm_dev->pshm_netdev,
426 CAIF_FLOW_OFF);
427 }
428 /*
429 * We simply return back to the caller if we do not have space
430 * either in Tx pending list or Tx empty list. In this case,
431 * we hold the received skb in the skb list, waiting to
432 * be transmitted once Tx buffers become available
433 */
434 if (list_empty(&pshm_drv->tx_empty_list))
435 goto send_msg;
436
437 /* Get the first free Tx buffer. */
438 pbuf = list_entry(pshm_drv->tx_empty_list.next,
439 struct buf_list, list);
440 do {
441 if (append) {
442 skb = skb_peek(&pshm_drv->sk_qhead);
443 if (skb == NULL)
444 break;
445 }
446
447 frm = (struct shm_caif_frm *)
448 (pbuf->desc_vptr + pbuf->frm_ofs);
449
450 frm->hdr_ofs = 0;
451 frmlen = 0;
452 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
453
454 /* Add tail padding if needed. */
455 if (frmlen % SHM_FRM_PAD_LEN)
456 frmlen += SHM_FRM_PAD_LEN -
457 (frmlen % SHM_FRM_PAD_LEN);
458
459 /*
460 * Verify that packet, header and additional padding
461 * can fit within the buffer frame area.
462 */
463 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
464 break;
465
466 if (!append) {
467 list_del_init(&pbuf->list);
468 append = 1;
469 }
470
471 skb = skb_dequeue(&pshm_drv->sk_qhead);
472 /* Copy in CAIF frame. */
473 skb_copy_bits(skb, 0, pbuf->desc_vptr +
474 pbuf->frm_ofs + SHM_HDR_LEN +
475 frm->hdr_ofs, skb->len);
476
477 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
478 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
479 frmlen;
480 dev_kfree_skb(skb);
481
482 /* Fill in the shared memory packet descriptor area. */
483 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
484 /* Forward to current frame. */
485 pck_desc += pbuf->frames;
486 pck_desc->frm_ofs = (pbuf->phy_addr -
487 pshm_drv->shm_base_addr) +
488 pbuf->frm_ofs;
489 pck_desc->frm_len = frmlen;
490 /* Terminate packet descriptor area. */
491 pck_desc++;
492 pck_desc->frm_ofs = 0;
493 /* Update buffer parameters. */
494 pbuf->frames++;
495 pbuf->frm_ofs += frmlen + (frmlen % 32);
496
497 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
498
499 /* Assign buffer as full. */
500 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
501 append = 0;
502 mbox_msg |= SHM_SET_FULL(pbuf->index);
503send_msg:
504 spin_unlock_irqrestore(&pshm_drv->lock, flags);
505
506 if (mbox_msg)
507 pshm_drv->pshm_dev->pshmdev_mbxsend
508 (pshm_drv->pshm_dev->shm_id, mbox_msg);
509 } while (mbox_msg);
510}
511
512static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
513{
514 struct shmdrv_layer *pshm_drv;
515 unsigned long flags = 0;
516
517 pshm_drv = netdev_priv(shm_netdev);
518
519 spin_lock_irqsave(&pshm_drv->lock, flags);
520
521 skb_queue_tail(&pshm_drv->sk_qhead, skb);
522
523 spin_unlock_irqrestore(&pshm_drv->lock, flags);
524
525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv->shm_tx_work))
527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
528
529 return 0;
530}
531
532static const struct net_device_ops netdev_ops = {
533 .ndo_open = shm_netdev_open,
534 .ndo_stop = shm_netdev_close,
535 .ndo_start_xmit = shm_netdev_tx,
536};
537
538static void shm_netdev_setup(struct net_device *pshm_netdev)
539{
540 struct shmdrv_layer *pshm_drv;
541 pshm_netdev->netdev_ops = &netdev_ops;
542
543 pshm_netdev->mtu = CAIF_MAX_MTU;
544 pshm_netdev->type = ARPHRD_CAIF;
545 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
546 pshm_netdev->tx_queue_len = 0;
547 pshm_netdev->destructor = free_netdev;
548
549 pshm_drv = netdev_priv(pshm_netdev);
550
551 /* Initialize structures in a clean state. */
552 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
553
554 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
555}
556
557int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
558{
559 int result, j;
560 struct shmdrv_layer *pshm_drv = NULL;
561
562 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
563 "cfshm%d", shm_netdev_setup);
564 if (!pshm_dev->pshm_netdev)
565 return -ENOMEM;
566
567 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
568 pshm_drv->pshm_dev = pshm_dev;
569
570 /*
571 * Initialization starts with the verification of the
572 * availability of MBX driver by calling its setup function.
573 * MBX driver must be available by this time for proper
574 * functioning of SHM driver.
575 */
576 if ((pshm_dev->pshmdev_mbxsetup
577 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
578 pr_warn("Could not config. SHM Mailbox,"
579 " Bailing out.....\n");
580 free_netdev(pshm_dev->pshm_netdev);
581 return -ENODEV;
582 }
583
584 skb_queue_head_init(&pshm_drv->sk_qhead);
585
586 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587 " INSTANCE AT pshm_drv =0x%p\n",
588 pshm_drv->pshm_dev->shm_id, pshm_drv);
589
590 if (pshm_dev->shm_total_sz <
591 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
592
593 pr_warn("ERROR, Amount of available"
594 " Phys. SHM cannot accomodate current SHM "
595 "driver configuration, Bailing out ...\n");
596 free_netdev(pshm_dev->pshm_netdev);
597 return -ENOMEM;
598 }
599
600 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
601 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
602
603 if (pshm_dev->shm_loopback)
604 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
605 else
606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607 (NR_TX_BUF * TX_BUF_SZ);
608
609 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
610 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
611 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
612
613 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
614 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
615 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
616
617 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
618 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
619
620 pshm_drv->pshm_tx_workqueue =
621 create_singlethread_workqueue("shm_tx_work");
622 pshm_drv->pshm_rx_workqueue =
623 create_singlethread_workqueue("shm_rx_work");
624
625 for (j = 0; j < NR_TX_BUF; j++) {
626 struct buf_list *tx_buf =
627 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
628
629 if (tx_buf == NULL) {
630 pr_warn("ERROR, Could not"
631 " allocate dynamic mem. for tx_buf,"
632 " Bailing out ...\n");
633 free_netdev(pshm_dev->pshm_netdev);
634 return -ENOMEM;
635 }
636 tx_buf->index = j;
637 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638 tx_buf->len = TX_BUF_SZ;
639 tx_buf->frames = 0;
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641
642 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
644 else
645 tx_buf->desc_vptr =
646 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
647
648 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
649 }
650
651 for (j = 0; j < NR_RX_BUF; j++) {
652 struct buf_list *rx_buf =
653 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
654
655 if (rx_buf == NULL) {
656 pr_warn("ERROR, Could not"
657 " allocate dynamic mem.for rx_buf,"
658 " Bailing out ...\n");
659 free_netdev(pshm_dev->pshm_netdev);
660 return -ENOMEM;
661 }
662 rx_buf->index = j;
663 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664 rx_buf->len = RX_BUF_SZ;
665
666 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
668 else
669 rx_buf->desc_vptr =
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
672 }
673
674 pshm_drv->tx_empty_available = 1;
675 result = register_netdev(pshm_dev->pshm_netdev);
676 if (result)
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result);
679
680 return result;
681}
682
683void caif_shmcore_remove(struct net_device *pshm_netdev)
684{
685 struct buf_list *pbuf;
686 struct shmdrv_layer *pshm_drv = NULL;
687
688 pshm_drv = netdev_priv(pshm_netdev);
689
690 while (!(list_empty(&pshm_drv->tx_pend_list))) {
691 pbuf =
692 list_entry(pshm_drv->tx_pend_list.next,
693 struct buf_list, list);
694
695 list_del(&pbuf->list);
696 kfree(pbuf);
697 }
698
699 while (!(list_empty(&pshm_drv->tx_full_list))) {
700 pbuf =
701 list_entry(pshm_drv->tx_full_list.next,
702 struct buf_list, list);
703 list_del(&pbuf->list);
704 kfree(pbuf);
705 }
706
707 while (!(list_empty(&pshm_drv->tx_empty_list))) {
708 pbuf =
709 list_entry(pshm_drv->tx_empty_list.next,
710 struct buf_list, list);
711 list_del(&pbuf->list);
712 kfree(pbuf);
713 }
714
715 while (!(list_empty(&pshm_drv->rx_full_list))) {
716 pbuf =
717 list_entry(pshm_drv->tx_full_list.next,
718 struct buf_list, list);
719 list_del(&pbuf->list);
720 kfree(pbuf);
721 }
722
723 while (!(list_empty(&pshm_drv->rx_pend_list))) {
724 pbuf =
725 list_entry(pshm_drv->tx_pend_list.next,
726 struct buf_list, list);
727 list_del(&pbuf->list);
728 kfree(pbuf);
729 }
730
731 while (!(list_empty(&pshm_drv->rx_empty_list))) {
732 pbuf =
733 list_entry(pshm_drv->rx_empty_list.next,
734 struct buf_list, list);
735 list_del(&pbuf->list);
736 kfree(pbuf);
737 }
738
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
742
743 unregister_netdev(pshm_netdev);
744}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e45394433..080574b0fff0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
82 ---help--- 82 ---help---
83 Say Y here if you want to support for Freescale FlexCAN. 83 Say Y here if you want to support for Freescale FlexCAN.
84 84
85config PCH_CAN
86 tristate "PCH CAN"
87 depends on CAN_DEV && PCI
88 ---help---
89 This driver is for PCH CAN of Topcliff which is an IOH for x86
90 embedded processor.
91 This driver can access CAN bus.
92
85source "drivers/net/can/mscan/Kconfig" 93source "drivers/net/can/mscan/Kconfig"
86 94
87source "drivers/net/can/sja1000/Kconfig" 95source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 00575373bbd0..90af15a4f106 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o 17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
19obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o 19obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
20obj-$(CONFIG_PCH_CAN) += pch_can.o
20 21
21ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 22ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86bc5e2..cee98fa668bd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@linutronix.de> 4 * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
5 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
40 40
41#include <mach/board.h> 41#include <mach/board.h>
42 42
43#define DRV_NAME "at91_can"
44#define AT91_NAPI_WEIGHT 12 43#define AT91_NAPI_WEIGHT 12
45 44
46/* 45/*
@@ -172,6 +171,7 @@ struct at91_priv {
172}; 171};
173 172
174static struct can_bittiming_const at91_bittiming_const = { 173static struct can_bittiming_const at91_bittiming_const = {
174 .name = KBUILD_MODNAME,
175 .tseg1_min = 4, 175 .tseg1_min = 4,
176 .tseg1_max = 16, 176 .tseg1_max = 16,
177 .tseg2_min = 2, 177 .tseg2_min = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
199 199
200static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) 200static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
201{ 201{
202 return readl(priv->reg_base + reg); 202 return __raw_readl(priv->reg_base + reg);
203} 203}
204 204
205static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, 205static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
206 u32 value) 206 u32 value)
207{ 207{
208 writel(value, priv->reg_base + reg); 208 __raw_writel(value, priv->reg_base + reg);
209} 209}
210 210
211static inline void set_mb_mode_prio(const struct at91_priv *priv, 211static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 243 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
245 245
246 /* reset acceptance mask and id register */
247 for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
248 at91_write(priv, AT91_MAM(i), 0x0 );
249 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
250 }
251
246 /* The last 4 mailboxes are used for transmitting. */ 252 /* The last 4 mailboxes are used for transmitting. */
247 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) 253 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
248 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
257 const struct can_bittiming *bt = &priv->can.bittiming; 263 const struct can_bittiming *bt = &priv->can.bittiming;
258 u32 reg_br; 264 u32 reg_br;
259 265
260 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) | 266 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
261 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | 267 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
262 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | 268 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
263 ((bt->phase_seg2 - 1) << 0); 269 ((bt->phase_seg2 - 1) << 0);
264 270
265 dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br); 271 netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
266 272
267 at91_write(priv, AT91_BR, reg_br); 273 at91_write(priv, AT91_BR, reg_br);
268 274
269 return 0; 275 return 0;
270} 276}
271 277
278static int at91_get_berr_counter(const struct net_device *dev,
279 struct can_berr_counter *bec)
280{
281 const struct at91_priv *priv = netdev_priv(dev);
282 u32 reg_ecr = at91_read(priv, AT91_ECR);
283
284 bec->rxerr = reg_ecr & 0xff;
285 bec->txerr = reg_ecr >> 16;
286
287 return 0;
288}
289
272static void at91_chip_start(struct net_device *dev) 290static void at91_chip_start(struct net_device *dev)
273{ 291{
274 struct at91_priv *priv = netdev_priv(dev); 292 struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
281 reg_mr = at91_read(priv, AT91_MR); 299 reg_mr = at91_read(priv, AT91_MR);
282 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); 300 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
283 301
302 at91_set_bittiming(dev);
284 at91_setup_mailboxes(dev); 303 at91_setup_mailboxes(dev);
285 at91_transceiver_switch(priv, 1); 304 at91_transceiver_switch(priv, 1);
286 305
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
350 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { 369 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
351 netif_stop_queue(dev); 370 netif_stop_queue(dev);
352 371
353 dev_err(dev->dev.parent, 372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
354 "BUG! TX buffer full when queue awake!\n");
355 return NETDEV_TX_BUSY; 373 return NETDEV_TX_BUSY;
356 } 374 }
357 375
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
435 struct sk_buff *skb; 453 struct sk_buff *skb;
436 struct can_frame *cf; 454 struct can_frame *cf;
437 455
438 dev_dbg(dev->dev.parent, "RX buffer overflow\n"); 456 netdev_dbg(dev, "RX buffer overflow\n");
439 stats->rx_over_errors++; 457 stats->rx_over_errors++;
440 stats->rx_errors++; 458 stats->rx_errors++;
441 459
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
480 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 498 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
481 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 499 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
482 500
501 /* allow RX of extended frames */
502 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
503
483 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI)) 504 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
484 at91_rx_overflow_err(dev); 505 at91_rx_overflow_err(dev);
485} 506}
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
565 586
566 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 587 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
567 reg_sr & AT91_MB_RX_LOW_MASK) 588 reg_sr & AT91_MB_RX_LOW_MASK)
568 dev_info(dev->dev.parent, 589 netdev_info(dev,
569 "order of incoming frames cannot be guaranteed\n"); 590 "order of incoming frames cannot be guaranteed\n");
570 591
571 again: 592 again:
572 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
604 625
605 /* CRC error */ 626 /* CRC error */
606 if (reg_sr & AT91_IRQ_CERR) { 627 if (reg_sr & AT91_IRQ_CERR) {
607 dev_dbg(dev->dev.parent, "CERR irq\n"); 628 netdev_dbg(dev, "CERR irq\n");
608 dev->stats.rx_errors++; 629 dev->stats.rx_errors++;
609 priv->can.can_stats.bus_error++; 630 priv->can.can_stats.bus_error++;
610 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 631 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
612 633
613 /* Stuffing Error */ 634 /* Stuffing Error */
614 if (reg_sr & AT91_IRQ_SERR) { 635 if (reg_sr & AT91_IRQ_SERR) {
615 dev_dbg(dev->dev.parent, "SERR irq\n"); 636 netdev_dbg(dev, "SERR irq\n");
616 dev->stats.rx_errors++; 637 dev->stats.rx_errors++;
617 priv->can.can_stats.bus_error++; 638 priv->can.can_stats.bus_error++;
618 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 639 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
621 642
622 /* Acknowledgement Error */ 643 /* Acknowledgement Error */
623 if (reg_sr & AT91_IRQ_AERR) { 644 if (reg_sr & AT91_IRQ_AERR) {
624 dev_dbg(dev->dev.parent, "AERR irq\n"); 645 netdev_dbg(dev, "AERR irq\n");
625 dev->stats.tx_errors++; 646 dev->stats.tx_errors++;
626 cf->can_id |= CAN_ERR_ACK; 647 cf->can_id |= CAN_ERR_ACK;
627 } 648 }
628 649
629 /* Form error */ 650 /* Form error */
630 if (reg_sr & AT91_IRQ_FERR) { 651 if (reg_sr & AT91_IRQ_FERR) {
631 dev_dbg(dev->dev.parent, "FERR irq\n"); 652 netdev_dbg(dev, "FERR irq\n");
632 dev->stats.rx_errors++; 653 dev->stats.rx_errors++;
633 priv->can.can_stats.bus_error++; 654 priv->can.can_stats.bus_error++;
634 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 655 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
637 658
638 /* Bit Error */ 659 /* Bit Error */
639 if (reg_sr & AT91_IRQ_BERR) { 660 if (reg_sr & AT91_IRQ_BERR) {
640 dev_dbg(dev->dev.parent, "BERR irq\n"); 661 netdev_dbg(dev, "BERR irq\n");
641 dev->stats.tx_errors++; 662 dev->stats.tx_errors++;
642 priv->can.can_stats.bus_error++; 663 priv->can.can_stats.bus_error++;
643 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 664 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
755 struct can_frame *cf, enum can_state new_state) 776 struct can_frame *cf, enum can_state new_state)
756{ 777{
757 struct at91_priv *priv = netdev_priv(dev); 778 struct at91_priv *priv = netdev_priv(dev);
758 u32 reg_idr, reg_ier, reg_ecr; 779 u32 reg_idr = 0, reg_ier = 0;
759 u8 tec, rec; 780 struct can_berr_counter bec;
760 781
761 reg_ecr = at91_read(priv, AT91_ECR); 782 at91_get_berr_counter(dev, &bec);
762 rec = reg_ecr & 0xff;
763 tec = reg_ecr >> 16;
764 783
765 switch (priv->can.state) { 784 switch (priv->can.state) {
766 case CAN_STATE_ERROR_ACTIVE: 785 case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
771 */ 790 */
772 if (new_state >= CAN_STATE_ERROR_WARNING && 791 if (new_state >= CAN_STATE_ERROR_WARNING &&
773 new_state <= CAN_STATE_BUS_OFF) { 792 new_state <= CAN_STATE_BUS_OFF) {
774 dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); 793 netdev_dbg(dev, "Error Warning IRQ\n");
775 priv->can.can_stats.error_warning++; 794 priv->can.can_stats.error_warning++;
776 795
777 cf->can_id |= CAN_ERR_CRTL; 796 cf->can_id |= CAN_ERR_CRTL;
778 cf->data[1] = (tec > rec) ? 797 cf->data[1] = (bec.txerr > bec.rxerr) ?
779 CAN_ERR_CRTL_TX_WARNING : 798 CAN_ERR_CRTL_TX_WARNING :
780 CAN_ERR_CRTL_RX_WARNING; 799 CAN_ERR_CRTL_RX_WARNING;
781 } 800 }
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
787 */ 806 */
788 if (new_state >= CAN_STATE_ERROR_PASSIVE && 807 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
789 new_state <= CAN_STATE_BUS_OFF) { 808 new_state <= CAN_STATE_BUS_OFF) {
790 dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); 809 netdev_dbg(dev, "Error Passive IRQ\n");
791 priv->can.can_stats.error_passive++; 810 priv->can.can_stats.error_passive++;
792 811
793 cf->can_id |= CAN_ERR_CRTL; 812 cf->can_id |= CAN_ERR_CRTL;
794 cf->data[1] = (tec > rec) ? 813 cf->data[1] = (bec.txerr > bec.rxerr) ?
795 CAN_ERR_CRTL_TX_PASSIVE : 814 CAN_ERR_CRTL_TX_PASSIVE :
796 CAN_ERR_CRTL_RX_PASSIVE; 815 CAN_ERR_CRTL_RX_PASSIVE;
797 } 816 }
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
804 if (new_state <= CAN_STATE_ERROR_PASSIVE) { 823 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
805 cf->can_id |= CAN_ERR_RESTARTED; 824 cf->can_id |= CAN_ERR_RESTARTED;
806 825
807 dev_dbg(dev->dev.parent, "restarted\n"); 826 netdev_dbg(dev, "restarted\n");
808 priv->can.can_stats.restarts++; 827 priv->can.can_stats.restarts++;
809 828
810 netif_carrier_on(dev); 829 netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
825 * circumstances. so just enable AT91_IRQ_ERRP, thus 844 * circumstances. so just enable AT91_IRQ_ERRP, thus
826 * the "fallthrough" 845 * the "fallthrough"
827 */ 846 */
828 dev_dbg(dev->dev.parent, "Error Active\n"); 847 netdev_dbg(dev, "Error Active\n");
829 cf->can_id |= CAN_ERR_PROT; 848 cf->can_id |= CAN_ERR_PROT;
830 cf->data[2] = CAN_ERR_PROT_ACTIVE; 849 cf->data[2] = CAN_ERR_PROT_ACTIVE;
831 case CAN_STATE_ERROR_WARNING: /* fallthrough */ 850 case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
843 862
844 cf->can_id |= CAN_ERR_BUSOFF; 863 cf->can_id |= CAN_ERR_BUSOFF;
845 864
846 dev_dbg(dev->dev.parent, "bus-off\n"); 865 netdev_dbg(dev, "bus-off\n");
847 netif_carrier_off(dev); 866 netif_carrier_off(dev);
848 priv->can.can_stats.bus_off++; 867 priv->can.can_stats.bus_off++;
849 868
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
881 else if (likely(reg_sr & AT91_IRQ_ERRA)) 900 else if (likely(reg_sr & AT91_IRQ_ERRA))
882 new_state = CAN_STATE_ERROR_ACTIVE; 901 new_state = CAN_STATE_ERROR_ACTIVE;
883 else { 902 else {
884 dev_err(dev->dev.parent, "BUG! hardware in undefined state\n"); 903 netdev_err(dev, "BUG! hardware in undefined state\n");
885 return; 904 return;
886 } 905 }
887 906
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
1018 .ndo_start_xmit = at91_start_xmit, 1037 .ndo_start_xmit = at91_start_xmit,
1019}; 1038};
1020 1039
1021static int __init at91_can_probe(struct platform_device *pdev) 1040static int __devinit at91_can_probe(struct platform_device *pdev)
1022{ 1041{
1023 struct net_device *dev; 1042 struct net_device *dev;
1024 struct at91_priv *priv; 1043 struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
1067 priv = netdev_priv(dev); 1086 priv = netdev_priv(dev);
1068 priv->can.clock.freq = clk_get_rate(clk); 1087 priv->can.clock.freq = clk_get_rate(clk);
1069 priv->can.bittiming_const = &at91_bittiming_const; 1088 priv->can.bittiming_const = &at91_bittiming_const;
1070 priv->can.do_set_bittiming = at91_set_bittiming;
1071 priv->can.do_set_mode = at91_set_mode; 1089 priv->can.do_set_mode = at91_set_mode;
1090 priv->can.do_get_berr_counter = at91_get_berr_counter;
1072 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 1091 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1073 priv->reg_base = addr; 1092 priv->reg_base = addr;
1074 priv->dev = dev; 1093 priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1092 return 0; 1111 return 0;
1093 1112
1094 exit_free: 1113 exit_free:
1095 free_netdev(dev); 1114 free_candev(dev);
1096 exit_iounmap: 1115 exit_iounmap:
1097 iounmap(addr); 1116 iounmap(addr);
1098 exit_release: 1117 exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
1113 1132
1114 platform_set_drvdata(pdev, NULL); 1133 platform_set_drvdata(pdev, NULL);
1115 1134
1116 free_netdev(dev);
1117
1118 iounmap(priv->reg_base); 1135 iounmap(priv->reg_base);
1119 1136
1120 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1137 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
1122 1139
1123 clk_put(priv->clk); 1140 clk_put(priv->clk);
1124 1141
1142 free_candev(dev);
1143
1125 return 0; 1144 return 0;
1126} 1145}
1127 1146
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
1129 .probe = at91_can_probe, 1148 .probe = at91_can_probe,
1130 .remove = __devexit_p(at91_can_remove), 1149 .remove = __devexit_p(at91_can_remove),
1131 .driver = { 1150 .driver = {
1132 .name = DRV_NAME, 1151 .name = KBUILD_MODNAME,
1133 .owner = THIS_MODULE, 1152 .owner = THIS_MODULE,
1134 }, 1153 },
1135}; 1154};
1136 1155
1137static int __init at91_can_module_init(void) 1156static int __init at91_can_module_init(void)
1138{ 1157{
1139 printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
1140 return platform_driver_register(&at91_can_driver); 1158 return platform_driver_register(&at91_can_driver);
1141} 1159}
1142 1160
1143static void __exit at91_can_module_exit(void) 1161static void __exit at91_can_module_exit(void)
1144{ 1162{
1145 platform_driver_unregister(&at91_can_driver); 1163 platform_driver_unregister(&at91_can_driver);
1146 printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
1147} 1164}
1148 1165
1149module_init(at91_can_module_init); 1166module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
1151 1168
1152MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); 1169MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1153MODULE_LICENSE("GPL v2"); 1170MODULE_LICENSE("GPL v2");
1154MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver"); 1171MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a090ba7..d4990568baee 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
992 992
993 unregister_flexcandev(dev); 993 unregister_flexcandev(dev);
994 platform_set_drvdata(pdev, NULL); 994 platform_set_drvdata(pdev, NULL);
995 free_candev(dev);
996 iounmap(priv->base); 995 iounmap(priv->base);
997 996
998 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 997 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
1000 999
1001 clk_put(priv->clk); 1000 clk_put(priv->clk);
1002 1001
1002 free_candev(dev);
1003
1003 return 0; 1004 return 0;
1004} 1005}
1005 1006
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e32bd5..7ab534aee452 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
169# define RXBSIDH_SHIFT 3 169# define RXBSIDH_SHIFT 3
170#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF) 170#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
171# define RXBSIDL_IDE 0x08 171# define RXBSIDL_IDE 0x08
172# define RXBSIDL_SRR 0x10
172# define RXBSIDL_EID 3 173# define RXBSIDL_EID 3
173# define RXBSIDL_SHIFT 5 174# define RXBSIDL_SHIFT 5
174#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF) 175#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
475 frame->can_id = 476 frame->can_id =
476 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | 477 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
477 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); 478 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
479 if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
480 frame->can_id |= CAN_RTR_FLAG;
478 } 481 }
479 /* Data length */ 482 /* Data length */
480 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); 483 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 000000000000..55ec324caaf4
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34
35#define MAX_MSG_OBJ 32
36#define MSG_OBJ_RX 0 /* The receive message object flag. */
37#define MSG_OBJ_TX 1 /* The transmit message object flag. */
38
39#define ENABLE 1 /* The enable flag */
40#define DISABLE 0 /* The disable flag */
41#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
42#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
43#define CAN_CTRL_IE_SIE_EIE 0x000e
44#define CAN_CTRL_CCE 0x0040
45#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
46#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
47#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
48#define CAN_CMASK_RX_TX_SET 0x00f3
49#define CAN_CMASK_RX_TX_GET 0x0073
50#define CAN_CMASK_ALL 0xff
51#define CAN_CMASK_RDWR 0x80
52#define CAN_CMASK_ARB 0x20
53#define CAN_CMASK_CTRL 0x10
54#define CAN_CMASK_MASK 0x40
55#define CAN_CMASK_NEWDAT 0x04
56#define CAN_CMASK_CLRINTPND 0x08
57
58#define CAN_IF_MCONT_NEWDAT 0x8000
59#define CAN_IF_MCONT_INTPND 0x2000
60#define CAN_IF_MCONT_UMASK 0x1000
61#define CAN_IF_MCONT_TXIE 0x0800
62#define CAN_IF_MCONT_RXIE 0x0400
63#define CAN_IF_MCONT_RMTEN 0x0200
64#define CAN_IF_MCONT_TXRQXT 0x0100
65#define CAN_IF_MCONT_EOB 0x0080
66#define CAN_IF_MCONT_DLC 0x000f
67#define CAN_IF_MCONT_MSGLOST 0x4000
68#define CAN_MASK2_MDIR_MXTD 0xc000
69#define CAN_ID2_DIR 0x2000
70#define CAN_ID_MSGVAL 0x8000
71
72#define CAN_STATUS_INT 0x8000
73#define CAN_IF_CREQ_BUSY 0x8000
74#define CAN_ID2_XTD 0x4000
75
76#define CAN_REC 0x00007f00
77#define CAN_TEC 0x000000ff
78
79#define PCH_RX_OK 0x00000010
80#define PCH_TX_OK 0x00000008
81#define PCH_BUS_OFF 0x00000080
82#define PCH_EWARN 0x00000040
83#define PCH_EPASSIV 0x00000020
84#define PCH_LEC0 0x00000001
85#define PCH_LEC1 0x00000002
86#define PCH_LEC2 0x00000004
87#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
88#define PCH_STUF_ERR PCH_LEC0
89#define PCH_FORM_ERR PCH_LEC1
90#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
91#define PCH_BIT1_ERR PCH_LEC2
92#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
93#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
94
95/* bit position of certain controller bits. */
96#define BIT_BITT_BRP 0
97#define BIT_BITT_SJW 6
98#define BIT_BITT_TSEG1 8
99#define BIT_BITT_TSEG2 12
100#define BIT_IF1_MCONT_RXIE 10
101#define BIT_IF2_MCONT_TXIE 11
102#define BIT_BRPE_BRPE 6
103#define BIT_ES_TXERRCNT 0
104#define BIT_ES_RXERRCNT 8
105#define MSK_BITT_BRP 0x3f
106#define MSK_BITT_SJW 0xc0
107#define MSK_BITT_TSEG1 0xf00
108#define MSK_BITT_TSEG2 0x7000
109#define MSK_BRPE_BRPE 0x3c0
110#define MSK_BRPE_GET 0x0f
111#define MSK_CTRL_IE_SIE_EIE 0x07
112#define MSK_MCONT_TXIE 0x08
113#define MSK_MCONT_RXIE 0x10
114#define PCH_CAN_NO_TX_BUFF 1
115#define COUNTER_LIMIT 10
116
117#define PCH_CAN_CLK 50000000 /* 50MHz */
118
119/* Define the number of message object.
120 * PCH CAN communications are done via Message RAM.
121 * The Message RAM consists of 32 message objects. */
122#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
123#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
124#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
125
126#define PCH_FIFO_THRESH 16
127
128enum pch_can_mode {
129 PCH_CAN_ENABLE,
130 PCH_CAN_DISABLE,
131 PCH_CAN_ALL,
132 PCH_CAN_NONE,
133 PCH_CAN_STOP,
134 PCH_CAN_RUN
135};
136
137struct pch_can_regs {
138 u32 cont;
139 u32 stat;
140 u32 errc;
141 u32 bitt;
142 u32 intr;
143 u32 opt;
144 u32 brpe;
145 u32 reserve1;
146 u32 if1_creq;
147 u32 if1_cmask;
148 u32 if1_mask1;
149 u32 if1_mask2;
150 u32 if1_id1;
151 u32 if1_id2;
152 u32 if1_mcont;
153 u32 if1_dataa1;
154 u32 if1_dataa2;
155 u32 if1_datab1;
156 u32 if1_datab2;
157 u32 reserve2;
158 u32 reserve3[12];
159 u32 if2_creq;
160 u32 if2_cmask;
161 u32 if2_mask1;
162 u32 if2_mask2;
163 u32 if2_id1;
164 u32 if2_id2;
165 u32 if2_mcont;
166 u32 if2_dataa1;
167 u32 if2_dataa2;
168 u32 if2_datab1;
169 u32 if2_datab2;
170 u32 reserve4;
171 u32 reserve5[20];
172 u32 treq1;
173 u32 treq2;
174 u32 reserve6[2];
175 u32 reserve7[56];
176 u32 reserve8[3];
177 u32 srst;
178};
179
180struct pch_can_priv {
181 struct can_priv can;
182 unsigned int can_num;
183 struct pci_dev *dev;
184 unsigned int tx_enable[MAX_MSG_OBJ];
185 unsigned int rx_enable[MAX_MSG_OBJ];
186 unsigned int rx_link[MAX_MSG_OBJ];
187 unsigned int int_enables;
188 unsigned int int_stat;
189 struct net_device *ndev;
190 spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
191 unsigned int msg_obj[MAX_MSG_OBJ];
192 struct pch_can_regs __iomem *regs;
193 struct napi_struct napi;
194 unsigned int tx_obj; /* Point next Tx Obj index */
195 unsigned int use_msi;
196};
197
198static struct can_bittiming_const pch_can_bittiming_const = {
199 .name = KBUILD_MODNAME,
200 .tseg1_min = 1,
201 .tseg1_max = 16,
202 .tseg2_min = 1,
203 .tseg2_max = 8,
204 .sjw_max = 4,
205 .brp_min = 1,
206 .brp_max = 1024, /* 6bit + extended 4bit */
207 .brp_inc = 1,
208};
209
210static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
211 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
212 {0,}
213};
214MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
215
216static inline void pch_can_bit_set(u32 *addr, u32 mask)
217{
218 iowrite32(ioread32(addr) | mask, addr);
219}
220
221static inline void pch_can_bit_clear(u32 *addr, u32 mask)
222{
223 iowrite32(ioread32(addr) & ~mask, addr);
224}
225
226static void pch_can_set_run_mode(struct pch_can_priv *priv,
227 enum pch_can_mode mode)
228{
229 switch (mode) {
230 case PCH_CAN_RUN:
231 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
232 break;
233
234 case PCH_CAN_STOP:
235 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
236 break;
237
238 default:
239 dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
240 break;
241 }
242}
243
244static void pch_can_set_optmode(struct pch_can_priv *priv)
245{
246 u32 reg_val = ioread32(&priv->regs->opt);
247
248 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
249 reg_val |= CAN_OPT_SILENT;
250
251 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
252 reg_val |= CAN_OPT_LBACK;
253
254 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
255 iowrite32(reg_val, &priv->regs->opt);
256}
257
258static void pch_can_set_int_custom(struct pch_can_priv *priv)
259{
260 /* Clearing the IE, SIE and EIE bits of Can control register. */
261 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
262
263 /* Appropriately setting them. */
264 pch_can_bit_set(&priv->regs->cont,
265 ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
266}
267
268/* This function retrieves interrupt enabled for the CAN device. */
269static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
270{
271 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
272 *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
273}
274
275static void pch_can_set_int_enables(struct pch_can_priv *priv,
276 enum pch_can_mode interrupt_no)
277{
278 switch (interrupt_no) {
279 case PCH_CAN_ENABLE:
280 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
281 break;
282
283 case PCH_CAN_DISABLE:
284 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
285 break;
286
287 case PCH_CAN_ALL:
288 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
289 break;
290
291 case PCH_CAN_NONE:
292 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
293 break;
294
295 default:
296 dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
297 break;
298 }
299}
300
301static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
302{
303 u32 counter = COUNTER_LIMIT;
304 u32 ifx_creq;
305
306 iowrite32(num, creq_addr);
307 while (counter) {
308 ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
309 if (!ifx_creq)
310 break;
311 counter--;
312 udelay(1);
313 }
314 if (!counter)
315 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
316}
317
318static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
319 u32 set)
320{
321 unsigned long flags;
322
323 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
324 /* Reading the receive buffer data from RAM to Interface1 registers */
325 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
326 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
327
328 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
329 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
330 &priv->regs->if1_cmask);
331
332 if (set == ENABLE) {
333 /* Setting the MsgVal and RxIE bits */
334 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
335 pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
336
337 } else if (set == DISABLE) {
338 /* Resetting the MsgVal and RxIE bits */
339 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
340 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
341 }
342
343 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
344 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
345}
346
347static void pch_can_rx_enable_all(struct pch_can_priv *priv)
348{
349 int i;
350
351 /* Traversing to obtain the object configured as receivers. */
352 for (i = 0; i < PCH_OBJ_NUM; i++) {
353 if (priv->msg_obj[i] == MSG_OBJ_RX)
354 pch_can_set_rx_enable(priv, i + 1, ENABLE);
355 }
356}
357
358static void pch_can_rx_disable_all(struct pch_can_priv *priv)
359{
360 int i;
361
362 /* Traversing to obtain the object configured as receivers. */
363 for (i = 0; i < PCH_OBJ_NUM; i++) {
364 if (priv->msg_obj[i] == MSG_OBJ_RX)
365 pch_can_set_rx_enable(priv, i + 1, DISABLE);
366 }
367}
368
369static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
370 u32 set)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
375 /* Reading the Msg buffer from Message RAM to Interface2 registers. */
376 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
377 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
378
379 /* Setting the IF2CMASK register for accessing the
380 MsgVal and TxIE bits */
381 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
382 &priv->regs->if2_cmask);
383
384 if (set == ENABLE) {
385 /* Setting the MsgVal and TxIE bits */
386 pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
387 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
388 } else if (set == DISABLE) {
389 /* Resetting the MsgVal and TxIE bits. */
390 pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
391 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
392 }
393
394 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
395 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
396}
397
398static void pch_can_tx_enable_all(struct pch_can_priv *priv)
399{
400 int i;
401
402 /* Traversing to obtain the object configured as transmit object. */
403 for (i = 0; i < PCH_OBJ_NUM; i++) {
404 if (priv->msg_obj[i] == MSG_OBJ_TX)
405 pch_can_set_tx_enable(priv, i + 1, ENABLE);
406 }
407}
408
409static void pch_can_tx_disable_all(struct pch_can_priv *priv)
410{
411 int i;
412
413 /* Traversing to obtain the object configured as transmit object. */
414 for (i = 0; i < PCH_OBJ_NUM; i++) {
415 if (priv->msg_obj[i] == MSG_OBJ_TX)
416 pch_can_set_tx_enable(priv, i + 1, DISABLE);
417 }
418}
419
420static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
421 u32 *enable)
422{
423 unsigned long flags;
424
425 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
426 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
427 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
428
429 if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
430 ((ioread32(&priv->regs->if1_mcont)) &
431 CAN_IF_MCONT_RXIE))
432 *enable = ENABLE;
433 else
434 *enable = DISABLE;
435 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
436}
437
438static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
439 u32 *enable)
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
444 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
445 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
446
447 if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
448 ((ioread32(&priv->regs->if2_mcont)) &
449 CAN_IF_MCONT_TXIE)) {
450 *enable = ENABLE;
451 } else {
452 *enable = DISABLE;
453 }
454 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
455}
456
457static int pch_can_int_pending(struct pch_can_priv *priv)
458{
459 return ioread32(&priv->regs->intr) & 0xffff;
460}
461
462static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
463 u32 buffer_num, u32 set)
464{
465 unsigned long flags;
466
467 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
468 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
469 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
470 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
471 if (set == ENABLE)
472 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
473 else
474 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
475
476 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
477 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
478}
479
480static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
481 u32 buffer_num, u32 *link)
482{
483 unsigned long flags;
484
485 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
486 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
487 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
488
489 if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
490 *link = DISABLE;
491 else
492 *link = ENABLE;
493 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
494}
495
496static void pch_can_clear_buffers(struct pch_can_priv *priv)
497{
498 int i;
499
500 for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
501 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
502 iowrite32(0xffff, &priv->regs->if1_mask1);
503 iowrite32(0xffff, &priv->regs->if1_mask2);
504 iowrite32(0x0, &priv->regs->if1_id1);
505 iowrite32(0x0, &priv->regs->if1_id2);
506 iowrite32(0x0, &priv->regs->if1_mcont);
507 iowrite32(0x0, &priv->regs->if1_dataa1);
508 iowrite32(0x0, &priv->regs->if1_dataa2);
509 iowrite32(0x0, &priv->regs->if1_datab1);
510 iowrite32(0x0, &priv->regs->if1_datab2);
511 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
512 CAN_CMASK_ARB | CAN_CMASK_CTRL,
513 &priv->regs->if1_cmask);
514 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
515 }
516
517 for (i = i; i < PCH_OBJ_NUM; i++) {
518 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
519 iowrite32(0xffff, &priv->regs->if2_mask1);
520 iowrite32(0xffff, &priv->regs->if2_mask2);
521 iowrite32(0x0, &priv->regs->if2_id1);
522 iowrite32(0x0, &priv->regs->if2_id2);
523 iowrite32(0x0, &priv->regs->if2_mcont);
524 iowrite32(0x0, &priv->regs->if2_dataa1);
525 iowrite32(0x0, &priv->regs->if2_dataa2);
526 iowrite32(0x0, &priv->regs->if2_datab1);
527 iowrite32(0x0, &priv->regs->if2_datab2);
528 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
529 CAN_CMASK_ARB | CAN_CMASK_CTRL,
530 &priv->regs->if2_cmask);
531 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
532 }
533}
534
535static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
536{
537 int i;
538 unsigned long flags;
539
540 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
541
542 for (i = 0; i < PCH_OBJ_NUM; i++) {
543 if (priv->msg_obj[i] == MSG_OBJ_RX) {
544 iowrite32(CAN_CMASK_RX_TX_GET,
545 &priv->regs->if1_cmask);
546 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
547
548 iowrite32(0x0, &priv->regs->if1_id1);
549 iowrite32(0x0, &priv->regs->if1_id2);
550
551 pch_can_bit_set(&priv->regs->if1_mcont,
552 CAN_IF_MCONT_UMASK);
553
554 /* Set FIFO mode set to 0 except last Rx Obj*/
555 pch_can_bit_clear(&priv->regs->if1_mcont,
556 CAN_IF_MCONT_EOB);
557 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
558 if (i == (PCH_RX_OBJ_NUM - 1))
559 pch_can_bit_set(&priv->regs->if1_mcont,
560 CAN_IF_MCONT_EOB);
561
562 iowrite32(0, &priv->regs->if1_mask1);
563 pch_can_bit_clear(&priv->regs->if1_mask2,
564 0x1fff | CAN_MASK2_MDIR_MXTD);
565
566 /* Setting CMASK for writing */
567 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
568 CAN_CMASK_ARB | CAN_CMASK_CTRL,
569 &priv->regs->if1_cmask);
570
571 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
572 } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
573 iowrite32(CAN_CMASK_RX_TX_GET,
574 &priv->regs->if2_cmask);
575 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
576
577 /* Resetting DIR bit for reception */
578 iowrite32(0x0, &priv->regs->if2_id1);
579 iowrite32(0x0, &priv->regs->if2_id2);
580 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
581
582 /* Setting EOB bit for transmitter */
583 iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
584
585 pch_can_bit_set(&priv->regs->if2_mcont,
586 CAN_IF_MCONT_UMASK);
587
588 iowrite32(0, &priv->regs->if2_mask1);
589 pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
590
591 /* Setting CMASK for writing */
592 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
593 CAN_CMASK_ARB | CAN_CMASK_CTRL,
594 &priv->regs->if2_cmask);
595
596 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
597 }
598 }
599 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
600}
601
602static void pch_can_init(struct pch_can_priv *priv)
603{
604 /* Stopping the Can device. */
605 pch_can_set_run_mode(priv, PCH_CAN_STOP);
606
607 /* Clearing all the message object buffers. */
608 pch_can_clear_buffers(priv);
609
610 /* Configuring the respective message object as either rx/tx object. */
611 pch_can_config_rx_tx_buffers(priv);
612
613 /* Enabling the interrupts. */
614 pch_can_set_int_enables(priv, PCH_CAN_ALL);
615}
616
617static void pch_can_release(struct pch_can_priv *priv)
618{
619 /* Stooping the CAN device. */
620 pch_can_set_run_mode(priv, PCH_CAN_STOP);
621
622 /* Disabling the interrupts. */
623 pch_can_set_int_enables(priv, PCH_CAN_NONE);
624
625 /* Disabling all the receive object. */
626 pch_can_rx_disable_all(priv);
627
628 /* Disabling all the transmit object. */
629 pch_can_tx_disable_all(priv);
630}
631
632/* This function clears interrupt(s) from the CAN device. */
633static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
634{
635 if (mask == CAN_STATUS_INT) {
636 ioread32(&priv->regs->stat);
637 return;
638 }
639
640 /* Clear interrupt for transmit object */
641 if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
642 /* Setting CMASK for clearing interrupts for
643 frame transmission. */
644 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
645 &priv->regs->if2_cmask);
646
647 /* Resetting the ID registers. */
648 pch_can_bit_set(&priv->regs->if2_id2,
649 CAN_ID2_DIR | (0x7ff << 2));
650 iowrite32(0x0, &priv->regs->if2_id1);
651
652 /* Claring NewDat, TxRqst & IntPnd */
653 pch_can_bit_clear(&priv->regs->if2_mcont,
654 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
655 CAN_IF_MCONT_TXRQXT);
656 pch_can_check_if_busy(&priv->regs->if2_creq, mask);
657 } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
658 /* Setting CMASK for clearing the reception interrupts. */
659 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
660 &priv->regs->if1_cmask);
661
662 /* Clearing the Dir bit. */
663 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
664
665 /* Clearing NewDat & IntPnd */
666 pch_can_bit_clear(&priv->regs->if1_mcont,
667 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
668
669 pch_can_check_if_busy(&priv->regs->if1_creq, mask);
670 }
671}
672
673static int pch_can_get_buffer_status(struct pch_can_priv *priv)
674{
675 return (ioread32(&priv->regs->treq1) & 0xffff) |
676 ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
677}
678
679static void pch_can_reset(struct pch_can_priv *priv)
680{
681 /* write to sw reset register */
682 iowrite32(1, &priv->regs->srst);
683 iowrite32(0, &priv->regs->srst);
684}
685
686static void pch_can_error(struct net_device *ndev, u32 status)
687{
688 struct sk_buff *skb;
689 struct pch_can_priv *priv = netdev_priv(ndev);
690 struct can_frame *cf;
691 u32 errc;
692 struct net_device_stats *stats = &(priv->ndev->stats);
693 enum can_state state = priv->can.state;
694
695 skb = alloc_can_err_skb(ndev, &cf);
696 if (!skb)
697 return;
698
699 if (status & PCH_BUS_OFF) {
700 pch_can_tx_disable_all(priv);
701 pch_can_rx_disable_all(priv);
702 state = CAN_STATE_BUS_OFF;
703 cf->can_id |= CAN_ERR_BUSOFF;
704 can_bus_off(ndev);
705 pch_can_set_run_mode(priv, PCH_CAN_RUN);
706 dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
707 }
708
709 /* Warning interrupt. */
710 if (status & PCH_EWARN) {
711 state = CAN_STATE_ERROR_WARNING;
712 priv->can.can_stats.error_warning++;
713 cf->can_id |= CAN_ERR_CRTL;
714 errc = ioread32(&priv->regs->errc);
715 if (((errc & CAN_REC) >> 8) > 96)
716 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
717 if ((errc & CAN_TEC) > 96)
718 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
719 dev_warn(&ndev->dev,
720 "%s -> Error Counter is more than 96.\n", __func__);
721 }
722 /* Error passive interrupt. */
723 if (status & PCH_EPASSIV) {
724 priv->can.can_stats.error_passive++;
725 state = CAN_STATE_ERROR_PASSIVE;
726 cf->can_id |= CAN_ERR_CRTL;
727 errc = ioread32(&priv->regs->errc);
728 if (((errc & CAN_REC) >> 8) > 127)
729 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
730 if ((errc & CAN_TEC) > 127)
731 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
732 dev_err(&ndev->dev,
733 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
734 }
735
736 if (status & PCH_LEC_ALL) {
737 priv->can.can_stats.bus_error++;
738 stats->rx_errors++;
739 switch (status & PCH_LEC_ALL) {
740 case PCH_STUF_ERR:
741 cf->data[2] |= CAN_ERR_PROT_STUFF;
742 break;
743 case PCH_FORM_ERR:
744 cf->data[2] |= CAN_ERR_PROT_FORM;
745 break;
746 case PCH_ACK_ERR:
747 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
748 CAN_ERR_PROT_LOC_ACK_DEL;
749 break;
750 case PCH_BIT1_ERR:
751 case PCH_BIT0_ERR:
752 cf->data[2] |= CAN_ERR_PROT_BIT;
753 break;
754 case PCH_CRC_ERR:
755 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
756 CAN_ERR_PROT_LOC_CRC_DEL;
757 break;
758 default:
759 iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
760 break;
761 }
762
763 }
764
765 priv->can.state = state;
766 netif_rx(skb);
767
768 stats->rx_packets++;
769 stats->rx_bytes += cf->can_dlc;
770}
771
772static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
773{
774 struct net_device *ndev = (struct net_device *)dev_id;
775 struct pch_can_priv *priv = netdev_priv(ndev);
776
777 pch_can_set_int_enables(priv, PCH_CAN_NONE);
778
779 napi_schedule(&priv->napi);
780
781 return IRQ_HANDLED;
782}
783
784static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
785{
786 u32 reg;
787 canid_t id;
788 u32 ide;
789 u32 rtr;
790 int i, j, k;
791 int rcv_pkts = 0;
792 struct sk_buff *skb;
793 struct can_frame *cf;
794 struct pch_can_priv *priv = netdev_priv(ndev);
795 struct net_device_stats *stats = &(priv->ndev->stats);
796
797 /* Reading the messsage object from the Message RAM */
798 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
799 pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
800
801 /* Reading the MCONT register. */
802 reg = ioread32(&priv->regs->if1_mcont);
803 reg &= 0xffff;
804
805 for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
806 /* If MsgLost bit set. */
807 if (reg & CAN_IF_MCONT_MSGLOST) {
808 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
809 pch_can_bit_clear(&priv->regs->if1_mcont,
810 CAN_IF_MCONT_MSGLOST);
811 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
812 &priv->regs->if1_cmask);
813 pch_can_check_if_busy(&priv->regs->if1_creq, k);
814
815 skb = alloc_can_err_skb(ndev, &cf);
816 if (!skb)
817 return -ENOMEM;
818
819 priv->can.can_stats.error_passive++;
820 priv->can.state = CAN_STATE_ERROR_PASSIVE;
821 cf->can_id |= CAN_ERR_CRTL;
822 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
823 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
824 stats->rx_packets++;
825 stats->rx_bytes += cf->can_dlc;
826
827 netif_receive_skb(skb);
828 rcv_pkts++;
829 goto RX_NEXT;
830 }
831 if (!(reg & CAN_IF_MCONT_NEWDAT))
832 goto RX_NEXT;
833
834 skb = alloc_can_skb(priv->ndev, &cf);
835 if (!skb)
836 return -ENOMEM;
837
838 /* Get Received data */
839 ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
840 if (ide) {
841 id = (ioread32(&priv->regs->if1_id1) & 0xffff);
842 id |= (((ioread32(&priv->regs->if1_id2)) &
843 0x1fff) << 16);
844 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
845 } else {
846 id = (((ioread32(&priv->regs->if1_id2)) &
847 (CAN_SFF_MASK << 2)) >> 2);
848 cf->can_id = (id & CAN_SFF_MASK);
849 }
850
851 rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
852 if (rtr) {
853 cf->can_dlc = 0;
854 cf->can_id |= CAN_RTR_FLAG;
855 } else {
856 cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
857 0x0f);
858 }
859
860 for (i = 0, j = 0; i < cf->can_dlc; j++) {
861 reg = ioread32(&priv->regs->if1_dataa1 + j*4);
862 cf->data[i++] = cpu_to_le32(reg & 0xff);
863 if (i == cf->can_dlc)
864 break;
865 cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
866 }
867
868 netif_receive_skb(skb);
869 rcv_pkts++;
870 stats->rx_packets++;
871 stats->rx_bytes += cf->can_dlc;
872
873 if (k < PCH_FIFO_THRESH) {
874 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
875 CAN_CMASK_ARB, &priv->regs->if1_cmask);
876
877 /* Clearing the Dir bit. */
878 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
879
880 /* Clearing NewDat & IntPnd */
881 pch_can_bit_clear(&priv->regs->if1_mcont,
882 CAN_IF_MCONT_INTPND);
883 pch_can_check_if_busy(&priv->regs->if1_creq, k);
884 } else if (k > PCH_FIFO_THRESH) {
885 pch_can_int_clr(priv, k);
886 } else if (k == PCH_FIFO_THRESH) {
887 int cnt;
888 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
889 pch_can_int_clr(priv, cnt+1);
890 }
891RX_NEXT:
892 /* Reading the messsage object from the Message RAM */
893 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
894 pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
895 reg = ioread32(&priv->regs->if1_mcont);
896 }
897
898 return rcv_pkts;
899}
900static int pch_can_rx_poll(struct napi_struct *napi, int quota)
901{
902 struct net_device *ndev = napi->dev;
903 struct pch_can_priv *priv = netdev_priv(ndev);
904 struct net_device_stats *stats = &(priv->ndev->stats);
905 u32 dlc;
906 u32 int_stat;
907 int rcv_pkts = 0;
908 u32 reg_stat;
909 unsigned long flags;
910
911 int_stat = pch_can_int_pending(priv);
912 if (!int_stat)
913 return 0;
914
915INT_STAT:
916 if (int_stat == CAN_STATUS_INT) {
917 reg_stat = ioread32(&priv->regs->stat);
918 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
919 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
920 pch_can_error(ndev, reg_stat);
921 }
922
923 if (reg_stat & PCH_TX_OK) {
924 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
925 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
926 pch_can_check_if_busy(&priv->regs->if2_creq,
927 ioread32(&priv->regs->intr));
928 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
929 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
930 }
931
932 if (reg_stat & PCH_RX_OK)
933 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
934
935 int_stat = pch_can_int_pending(priv);
936 if (int_stat == CAN_STATUS_INT)
937 goto INT_STAT;
938 }
939
940MSG_OBJ:
941 if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
942 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
943 rcv_pkts = pch_can_rx_normal(ndev, int_stat);
944 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
945 if (rcv_pkts < 0)
946 return 0;
947 } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
948 if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
949 /* Handle transmission interrupt */
950 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
951 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
952 iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
953 &priv->regs->if2_cmask);
954 dlc = ioread32(&priv->regs->if2_mcont) &
955 CAN_IF_MCONT_DLC;
956 pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
957 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
958 if (dlc > 8)
959 dlc = 8;
960 stats->tx_bytes += dlc;
961 stats->tx_packets++;
962 }
963 }
964
965 int_stat = pch_can_int_pending(priv);
966 if (int_stat == CAN_STATUS_INT)
967 goto INT_STAT;
968 else if (int_stat >= 1 && int_stat <= 32)
969 goto MSG_OBJ;
970
971 napi_complete(napi);
972 pch_can_set_int_enables(priv, PCH_CAN_ALL);
973
974 return rcv_pkts;
975}
976
977static int pch_set_bittiming(struct net_device *ndev)
978{
979 struct pch_can_priv *priv = netdev_priv(ndev);
980 const struct can_bittiming *bt = &priv->can.bittiming;
981 u32 canbit;
982 u32 bepe;
983 u32 brp;
984
985 /* Setting the CCE bit for accessing the Can Timing register. */
986 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
987
988 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
989 canbit = brp & MSK_BITT_BRP;
990 canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
991 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
992 canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
993 bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
994 iowrite32(canbit, &priv->regs->bitt);
995 iowrite32(bepe, &priv->regs->brpe);
996 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
997
998 return 0;
999}
1000
1001static void pch_can_start(struct net_device *ndev)
1002{
1003 struct pch_can_priv *priv = netdev_priv(ndev);
1004
1005 if (priv->can.state != CAN_STATE_STOPPED)
1006 pch_can_reset(priv);
1007
1008 pch_set_bittiming(ndev);
1009 pch_can_set_optmode(priv);
1010
1011 pch_can_tx_enable_all(priv);
1012 pch_can_rx_enable_all(priv);
1013
1014 /* Setting the CAN to run mode. */
1015 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1016
1017 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1018
1019 return;
1020}
1021
1022static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
1023{
1024 int ret = 0;
1025
1026 switch (mode) {
1027 case CAN_MODE_START:
1028 pch_can_start(ndev);
1029 netif_wake_queue(ndev);
1030 break;
1031 default:
1032 ret = -EOPNOTSUPP;
1033 break;
1034 }
1035
1036 return ret;
1037}
1038
1039static int pch_can_open(struct net_device *ndev)
1040{
1041 struct pch_can_priv *priv = netdev_priv(ndev);
1042 int retval;
1043
1044 retval = pci_enable_msi(priv->dev);
1045 if (retval) {
1046 dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
1047 priv->use_msi = 0;
1048 } else {
1049 dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
1050 priv->use_msi = 1;
1051 }
1052
1053 /* Regsitering the interrupt. */
1054 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
1055 ndev->name, ndev);
1056 if (retval) {
1057 dev_err(&ndev->dev, "request_irq failed.\n");
1058 goto req_irq_err;
1059 }
1060
1061 /* Open common can device */
1062 retval = open_candev(ndev);
1063 if (retval) {
1064 dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
1065 goto err_open_candev;
1066 }
1067
1068 pch_can_init(priv);
1069 pch_can_start(ndev);
1070 napi_enable(&priv->napi);
1071 netif_start_queue(ndev);
1072
1073 return 0;
1074
1075err_open_candev:
1076 free_irq(priv->dev->irq, ndev);
1077req_irq_err:
1078 if (priv->use_msi)
1079 pci_disable_msi(priv->dev);
1080
1081 pch_can_release(priv);
1082
1083 return retval;
1084}
1085
1086static int pch_close(struct net_device *ndev)
1087{
1088 struct pch_can_priv *priv = netdev_priv(ndev);
1089
1090 netif_stop_queue(ndev);
1091 napi_disable(&priv->napi);
1092 pch_can_release(priv);
1093 free_irq(priv->dev->irq, ndev);
1094 if (priv->use_msi)
1095 pci_disable_msi(priv->dev);
1096 close_candev(ndev);
1097 priv->can.state = CAN_STATE_STOPPED;
1098 return 0;
1099}
1100
1101static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
1102{
1103 u32 buffer_status = 0;
1104 struct pch_can_priv *priv = netdev_priv(ndev);
1105
1106 /* Getting the message object status. */
1107 buffer_status = (u32) pch_can_get_buffer_status(priv);
1108
1109 return buffer_status & obj_id;
1110}
1111
1112
1113static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1114{
1115 int i, j;
1116 unsigned long flags;
1117 struct pch_can_priv *priv = netdev_priv(ndev);
1118 struct can_frame *cf = (struct can_frame *)skb->data;
1119 int tx_buffer_avail = 0;
1120
1121 if (can_dropped_invalid_skb(ndev, skb))
1122 return NETDEV_TX_OK;
1123
1124 if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
1125 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
1126 PCH_RX_OBJ_NUM)))
1127 udelay(500);
1128
1129 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
1130 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
1131 } else {
1132 tx_buffer_avail = priv->tx_obj;
1133 }
1134 priv->tx_obj++;
1135
1136 /* Attaining the lock. */
1137 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
1138
1139 /* Reading the Msg Obj from the Msg RAM to the Interface register. */
1140 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
1141 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1142
1143 /* Setting the CMASK register. */
1144 pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
1145
1146 /* If ID extended is set. */
1147 pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
1148 pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
1149 if (cf->can_id & CAN_EFF_FLAG) {
1150 pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
1151 pch_can_bit_set(&priv->regs->if2_id2,
1152 ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
1153 } else {
1154 pch_can_bit_set(&priv->regs->if2_id1, 0);
1155 pch_can_bit_set(&priv->regs->if2_id2,
1156 (cf->can_id & CAN_SFF_MASK) << 2);
1157 }
1158
1159 /* If remote frame has to be transmitted.. */
1160 if (cf->can_id & CAN_RTR_FLAG)
1161 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
1162
1163 for (i = 0, j = 0; i < cf->can_dlc; j++) {
1164 iowrite32(le32_to_cpu(cf->data[i++]),
1165 (&priv->regs->if2_dataa1) + j*4);
1166 if (i == cf->can_dlc)
1167 break;
1168 iowrite32(le32_to_cpu(cf->data[i++] << 8),
1169 (&priv->regs->if2_dataa1) + j*4);
1170 }
1171
1172 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
1173
1174 /* Updating the size of the data. */
1175 pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
1176 pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
1177
1178 /* Clearing IntPend, NewDat & TxRqst */
1179 pch_can_bit_clear(&priv->regs->if2_mcont,
1180 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
1181 CAN_IF_MCONT_TXRQXT);
1182
1183 /* Setting NewDat, TxRqst bits */
1184 pch_can_bit_set(&priv->regs->if2_mcont,
1185 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
1186
1187 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1188
1189 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
1190
1191 return NETDEV_TX_OK;
1192}
1193
1194static const struct net_device_ops pch_can_netdev_ops = {
1195 .ndo_open = pch_can_open,
1196 .ndo_stop = pch_close,
1197 .ndo_start_xmit = pch_xmit,
1198};
1199
1200static void __devexit pch_can_remove(struct pci_dev *pdev)
1201{
1202 struct net_device *ndev = pci_get_drvdata(pdev);
1203 struct pch_can_priv *priv = netdev_priv(ndev);
1204
1205 unregister_candev(priv->ndev);
1206 free_candev(priv->ndev);
1207 pci_iounmap(pdev, priv->regs);
1208 pci_release_regions(pdev);
1209 pci_disable_device(pdev);
1210 pci_set_drvdata(pdev, NULL);
1211 pch_can_reset(priv);
1212}
1213
1214#ifdef CONFIG_PM
1215static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1216{
1217 int i; /* Counter variable. */
1218 int retval; /* Return value. */
1219 u32 buf_stat; /* Variable for reading the transmit buffer status. */
1220 u32 counter = 0xFFFFFF;
1221
1222 struct net_device *dev = pci_get_drvdata(pdev);
1223 struct pch_can_priv *priv = netdev_priv(dev);
1224
1225 /* Stop the CAN controller */
1226 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1227
1228 /* Indicate that we are aboutto/in suspend */
1229 priv->can.state = CAN_STATE_SLEEPING;
1230
1231 /* Waiting for all transmission to complete. */
1232 while (counter) {
1233 buf_stat = pch_can_get_buffer_status(priv);
1234 if (!buf_stat)
1235 break;
1236 counter--;
1237 udelay(1);
1238 }
1239 if (!counter)
1240 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
1241
1242 /* Save interrupt configuration and then disable them */
1243 pch_can_get_int_enables(priv, &(priv->int_enables));
1244 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1245
1246 /* Save Tx buffer enable state */
1247 for (i = 0; i < PCH_OBJ_NUM; i++) {
1248 if (priv->msg_obj[i] == MSG_OBJ_TX)
1249 pch_can_get_tx_enable(priv, i + 1,
1250 &(priv->tx_enable[i]));
1251 }
1252
1253 /* Disable all Transmit buffers */
1254 pch_can_tx_disable_all(priv);
1255
1256 /* Save Rx buffer enable state */
1257 for (i = 0; i < PCH_OBJ_NUM; i++) {
1258 if (priv->msg_obj[i] == MSG_OBJ_RX) {
1259 pch_can_get_rx_enable(priv, i + 1,
1260 &(priv->rx_enable[i]));
1261 pch_can_get_rx_buffer_link(priv, i + 1,
1262 &(priv->rx_link[i]));
1263 }
1264 }
1265
1266 /* Disable all Receive buffers */
1267 pch_can_rx_disable_all(priv);
1268 retval = pci_save_state(pdev);
1269 if (retval) {
1270 dev_err(&pdev->dev, "pci_save_state failed.\n");
1271 } else {
1272 pci_enable_wake(pdev, PCI_D3hot, 0);
1273 pci_disable_device(pdev);
1274 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1275 }
1276
1277 return retval;
1278}
1279
1280static int pch_can_resume(struct pci_dev *pdev)
1281{
1282 int i; /* Counter variable. */
1283 int retval; /* Return variable. */
1284 struct net_device *dev = pci_get_drvdata(pdev);
1285 struct pch_can_priv *priv = netdev_priv(dev);
1286
1287 pci_set_power_state(pdev, PCI_D0);
1288 pci_restore_state(pdev);
1289 retval = pci_enable_device(pdev);
1290 if (retval) {
1291 dev_err(&pdev->dev, "pci_enable_device failed.\n");
1292 return retval;
1293 }
1294
1295 pci_enable_wake(pdev, PCI_D3hot, 0);
1296
1297 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1298
1299 /* Disabling all interrupts. */
1300 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1301
1302 /* Setting the CAN device in Stop Mode. */
1303 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1304
1305 /* Configuring the transmit and receive buffers. */
1306 pch_can_config_rx_tx_buffers(priv);
1307
1308 /* Restore the CAN state */
1309 pch_set_bittiming(dev);
1310
1311 /* Listen/Active */
1312 pch_can_set_optmode(priv);
1313
1314 /* Enabling the transmit buffer. */
1315 for (i = 0; i < PCH_OBJ_NUM; i++) {
1316 if (priv->msg_obj[i] == MSG_OBJ_TX) {
1317 pch_can_set_tx_enable(priv, i + 1,
1318 priv->tx_enable[i]);
1319 }
1320 }
1321
1322 /* Configuring the receive buffer and enabling them. */
1323 for (i = 0; i < PCH_OBJ_NUM; i++) {
1324 if (priv->msg_obj[i] == MSG_OBJ_RX) {
1325 /* Restore buffer link */
1326 pch_can_set_rx_buffer_link(priv, i + 1,
1327 priv->rx_link[i]);
1328
1329 /* Restore buffer enables */
1330 pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
1331 }
1332 }
1333
1334 /* Enable CAN Interrupts */
1335 pch_can_set_int_custom(priv);
1336
1337 /* Restore Run Mode */
1338 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1339
1340 return retval;
1341}
1342#else
1343#define pch_can_suspend NULL
1344#define pch_can_resume NULL
1345#endif
1346
1347static int pch_can_get_berr_counter(const struct net_device *dev,
1348 struct can_berr_counter *bec)
1349{
1350 struct pch_can_priv *priv = netdev_priv(dev);
1351
1352 bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
1353 bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
1354
1355 return 0;
1356}
1357
1358static int __devinit pch_can_probe(struct pci_dev *pdev,
1359 const struct pci_device_id *id)
1360{
1361 struct net_device *ndev;
1362 struct pch_can_priv *priv;
1363 int rc;
1364 int index;
1365 void __iomem *addr;
1366
1367 rc = pci_enable_device(pdev);
1368 if (rc) {
1369 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1370 goto probe_exit_endev;
1371 }
1372
1373 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1374 if (rc) {
1375 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1376 goto probe_exit_pcireq;
1377 }
1378
1379 addr = pci_iomap(pdev, 1, 0);
1380 if (!addr) {
1381 rc = -EIO;
1382 dev_err(&pdev->dev, "Failed pci_iomap\n");
1383 goto probe_exit_ipmap;
1384 }
1385
1386 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
1387 if (!ndev) {
1388 rc = -ENOMEM;
1389 dev_err(&pdev->dev, "Failed alloc_candev\n");
1390 goto probe_exit_alloc_candev;
1391 }
1392
1393 priv = netdev_priv(ndev);
1394 priv->ndev = ndev;
1395 priv->regs = addr;
1396 priv->dev = pdev;
1397 priv->can.bittiming_const = &pch_can_bittiming_const;
1398 priv->can.do_set_mode = pch_can_do_set_mode;
1399 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1400 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1401 CAN_CTRLMODE_LOOPBACK;
1402 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
1403
1404 ndev->irq = pdev->irq;
1405 ndev->flags |= IFF_ECHO;
1406
1407 pci_set_drvdata(pdev, ndev);
1408 SET_NETDEV_DEV(ndev, &pdev->dev);
1409 ndev->netdev_ops = &pch_can_netdev_ops;
1410
1411 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1412 for (index = 0; index < PCH_RX_OBJ_NUM;)
1413 priv->msg_obj[index++] = MSG_OBJ_RX;
1414
1415 for (index = index; index < PCH_OBJ_NUM;)
1416 priv->msg_obj[index++] = MSG_OBJ_TX;
1417
1418 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
1419
1420 rc = register_candev(ndev);
1421 if (rc) {
1422 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1423 goto probe_exit_reg_candev;
1424 }
1425
1426 return 0;
1427
1428probe_exit_reg_candev:
1429 free_candev(ndev);
1430probe_exit_alloc_candev:
1431 pci_iounmap(pdev, addr);
1432probe_exit_ipmap:
1433 pci_release_regions(pdev);
1434probe_exit_pcireq:
1435 pci_disable_device(pdev);
1436probe_exit_endev:
1437 return rc;
1438}
1439
1440static struct pci_driver pch_can_pcidev = {
1441 .name = "pch_can",
1442 .id_table = pch_pci_tbl,
1443 .probe = pch_can_probe,
1444 .remove = __devexit_p(pch_can_remove),
1445 .suspend = pch_can_suspend,
1446 .resume = pch_can_resume,
1447};
1448
1449static int __init pch_can_pci_init(void)
1450{
1451 return pci_register_driver(&pch_can_pcidev);
1452}
1453module_init(pch_can_pci_init);
1454
1455static void __exit pch_can_pci_exit(void)
1456{
1457 pci_unregister_driver(&pch_can_pcidev);
1458}
1459module_exit(pch_can_pci_exit);
1460
1461MODULE_DESCRIPTION("Controller Area Network Driver");
1462MODULE_LICENSE("GPL v2");
1463MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505afd682..6fdc031daaae 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
58 - esd CAN-PCIe/2000 58 - esd CAN-PCIe/2000
59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) 59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) 60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
61
62config CAN_TSCAN1
63 tristate "TS-CAN1 PC104 boards"
64 depends on ISA
65 help
66 This driver is for Technologic Systems' TSCAN-1 PC104 boards.
67 http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
68 The driver supports multiple boards and automatically configures them:
69 PLD IO base addresses are read from jumpers JP1 and JP2,
70 IRQ numbers are read from jumpers JP4 and JP5,
71 SJA1000 IO base addresses are chosen heuristically (first that works).
72
61endif 73endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce924553995d..2c591eb321c7 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o 11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
12obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
12 13
13ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 14ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 000000000000..9756099a883a
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
1/*
2 * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
3 *
4 * Copyright 2010 Andre B. Oliveira
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * References:
22 * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
23 * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
24 */
25
26#include <linux/init.h>
27#include <linux/io.h>
28#include <linux/ioport.h>
29#include <linux/isa.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include "sja1000.h"
33
34MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
35MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
36MODULE_LICENSE("GPL");
37
38/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
39#define TSCAN1_MAXDEV 4
40
41/* PLD registers address offsets */
42#define TSCAN1_ID1 0
43#define TSCAN1_ID2 1
44#define TSCAN1_VERSION 2
45#define TSCAN1_LED 3
46#define TSCAN1_PAGE 4
47#define TSCAN1_MODE 5
48#define TSCAN1_JUMPERS 6
49
50/* PLD board identifier registers magic values */
51#define TSCAN1_ID1_VALUE 0xf6
52#define TSCAN1_ID2_VALUE 0xb9
53
54/* PLD mode register SJA1000 IO enable bit */
55#define TSCAN1_MODE_ENABLE 0x40
56
57/* PLD jumpers register bits */
58#define TSCAN1_JP4 0x10
59#define TSCAN1_JP5 0x20
60
61/* PLD IO base addresses start */
62#define TSCAN1_PLD_ADDRESS 0x150
63
64/* PLD register space size */
65#define TSCAN1_PLD_SIZE 8
66
67/* SJA1000 register space size */
68#define TSCAN1_SJA1000_SIZE 32
69
70/* SJA1000 crystal frequency (16MHz) */
71#define TSCAN1_SJA1000_XTAL 16000000
72
73/* SJA1000 IO base addresses */
74static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
75 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
76};
77
78/* Read SJA1000 register */
79static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
80{
81 return inb((unsigned long)priv->reg_base + reg);
82}
83
84/* Write SJA1000 register */
85static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
86{
87 outb(val, (unsigned long)priv->reg_base + reg);
88}
89
90/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
91static int __devinit tscan1_probe(struct device *dev, unsigned id)
92{
93 struct net_device *netdev;
94 struct sja1000_priv *priv;
95 unsigned long pld_base, sja1000_base;
96 int irq, i;
97
98 pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
99 if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
100 return -EBUSY;
101
102 if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
103 inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
104 release_region(pld_base, TSCAN1_PLD_SIZE);
105 return -ENODEV;
106 }
107
108 switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
109 case TSCAN1_JP4:
110 irq = 6;
111 break;
112 case TSCAN1_JP5:
113 irq = 7;
114 break;
115 case TSCAN1_JP4 | TSCAN1_JP5:
116 irq = 5;
117 break;
118 default:
119 dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
120 release_region(pld_base, TSCAN1_PLD_SIZE);
121 return -EINVAL;
122 }
123
124 netdev = alloc_sja1000dev(0);
125 if (!netdev) {
126 release_region(pld_base, TSCAN1_PLD_SIZE);
127 return -ENOMEM;
128 }
129
130 dev_set_drvdata(dev, netdev);
131 SET_NETDEV_DEV(netdev, dev);
132
133 netdev->base_addr = pld_base;
134 netdev->irq = irq;
135
136 priv = netdev_priv(netdev);
137 priv->read_reg = tscan1_read;
138 priv->write_reg = tscan1_write;
139 priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
140 priv->cdr = CDR_CBP | CDR_CLK_OFF;
141 priv->ocr = OCR_TX0_PUSHPULL;
142
143 /* Select the first SJA1000 IO address that is free and that works */
144 for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
145 sja1000_base = tscan1_sja1000_addresses[i];
146 if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
147 dev_name(dev)))
148 continue;
149
150 /* Set SJA1000 IO base address and enable it */
151 outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
152
153 priv->reg_base = (void __iomem *)sja1000_base;
154 if (!register_sja1000dev(netdev)) {
155 /* SJA1000 probe succeeded; turn LED off and return */
156 outb(0, pld_base + TSCAN1_LED);
157 netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
158 pld_base, sja1000_base, irq);
159 return 0;
160 }
161
162 /* SJA1000 probe failed; release and try next address */
163 outb(0, pld_base + TSCAN1_MODE);
164 release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
165 }
166
167 dev_err(dev, "failed to assign SJA1000 IO address\n");
168 dev_set_drvdata(dev, NULL);
169 free_sja1000dev(netdev);
170 release_region(pld_base, TSCAN1_PLD_SIZE);
171 return -ENXIO;
172}
173
174static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
175{
176 struct net_device *netdev;
177 struct sja1000_priv *priv;
178 unsigned long pld_base, sja1000_base;
179
180 netdev = dev_get_drvdata(dev);
181 unregister_sja1000dev(netdev);
182 dev_set_drvdata(dev, NULL);
183
184 priv = netdev_priv(netdev);
185 pld_base = netdev->base_addr;
186 sja1000_base = (unsigned long)priv->reg_base;
187
188 outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
189
190 release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
191 release_region(pld_base, TSCAN1_PLD_SIZE);
192
193 free_sja1000dev(netdev);
194
195 return 0;
196}
197
198static struct isa_driver tscan1_isa_driver = {
199 .probe = tscan1_probe,
200 .remove = __devexit_p(tscan1_remove),
201 .driver = {
202 .name = "tscan1",
203 },
204};
205
206static int __init tscan1_init(void)
207{
208 return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
209}
210module_init(tscan1_init);
211
212static void __exit tscan1_exit(void)
213{
214 isa_unregister_driver(&tscan1_isa_driver);
215}
216module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a5f637..4e3c12371aae 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
1266 } 1266 }
1267 1267
1268 if (!(adap->flags & QUEUES_BOUND)) { 1268 if (!(adap->flags & QUEUES_BOUND)) {
1269 err = bind_qsets(adap); 1269 int ret = bind_qsets(adap);
1270 if (err) { 1270
1271 CH_ERR(adap, "failed to bind qsets, err %d\n", err); 1271 if (ret < 0) {
1272 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1272 t3_intr_disable(adap); 1273 t3_intr_disable(adap);
1273 free_irq_resources(adap); 1274 free_irq_resources(adap);
1275 err = ret;
1274 goto out; 1276 goto out;
1275 } 1277 }
1276 adap->flags |= QUEUES_BOUND; 1278 adap->flags |= QUEUES_BOUND;
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4119f1..3d4253d311eb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@ struct sge_rspq;
281 281
282struct port_info { 282struct port_info {
283 struct adapter *adapter; 283 struct adapter *adapter;
284 struct vlan_group *vlan_grp;
285 u16 viid; 284 u16 viid;
286 s16 xact_addr_filt; /* index of exact MAC address filter */ 285 s16 xact_addr_filt; /* index of exact MAC address filter */
287 u16 rss_size; /* size of VI's RSS table slice */ 286 u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0a5746..f17703f410b3 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
403 * that step explicitly. 403 * that step explicitly.
404 */ 404 */
405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
406 pi->vlan_grp != NULL, true); 406 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
407 if (ret == 0) { 407 if (ret == 0) {
408 ret = t4_change_mac(pi->adapter, mb, pi->viid, 408 ret = t4_change_mac(pi->adapter, mb, pi->viid,
409 pi->xact_addr_filt, dev->dev_addr, true, 409 pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
1881 1881
1882static int set_flags(struct net_device *dev, u32 flags) 1882static int set_flags(struct net_device *dev, u32 flags)
1883{ 1883{
1884 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH); 1884 int err;
1885 unsigned long old_feat = dev->features;
1886
1887 err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
1888 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1889 if (err)
1890 return err;
1891
1892 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
1893 const struct port_info *pi = netdev_priv(dev);
1894
1895 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1896 -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
1897 true);
1898 if (err)
1899 dev->features = old_feat;
1900 }
1901 return err;
1885} 1902}
1886 1903
1887static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) 1904static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2842 return 0; 2859 return 0;
2843} 2860}
2844 2861
2845static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2846{
2847 struct port_info *pi = netdev_priv(dev);
2848
2849 pi->vlan_grp = grp;
2850 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2851 grp != NULL, true);
2852}
2853
2854#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
2855static void cxgb_netpoll(struct net_device *dev) 2863static void cxgb_netpoll(struct net_device *dev)
2856{ 2864{
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
2878 .ndo_validate_addr = eth_validate_addr, 2886 .ndo_validate_addr = eth_validate_addr,
2879 .ndo_do_ioctl = cxgb_ioctl, 2887 .ndo_do_ioctl = cxgb_ioctl,
2880 .ndo_change_mtu = cxgb_change_mtu, 2888 .ndo_change_mtu = cxgb_change_mtu,
2881 .ndo_vlan_rx_register = vlan_rx_register,
2882#ifdef CONFIG_NET_POLL_CONTROLLER 2889#ifdef CONFIG_NET_POLL_CONTROLLER
2883 .ndo_poll_controller = cxgb_netpoll, 2890 .ndo_poll_controller = cxgb_netpoll,
2884#endif 2891#endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3658 pi->rx_offload = RX_CSO; 3665 pi->rx_offload = RX_CSO;
3659 pi->port_id = i; 3666 pi->port_id = i;
3660 netif_carrier_off(netdev); 3667 netif_carrier_off(netdev);
3661 netif_tx_stop_all_queues(netdev);
3662 netdev->irq = pdev->irq; 3668 netdev->irq = pdev->irq;
3663 3669
3664 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3670 netdev->features |= NETIF_F_SG | TSO_FLAGS;
@@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3730 3736
3731 __set_bit(i, &adapter->registered_device_map); 3737 __set_bit(i, &adapter->registered_device_map);
3732 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; 3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3739 netif_tx_stop_all_queues(adapter->port[i]);
3733 } 3740 }
3734 } 3741 }
3735 if (!adapter->registered_device_map) { 3742 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3debce7..17022258ed68 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1530 skb->rxhash = (__force u32)pkt->rsshdr.hash_val; 1530 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1531 1531
1532 if (unlikely(pkt->vlan_ex)) { 1532 if (unlikely(pkt->vlan_ex)) {
1533 struct port_info *pi = netdev_priv(rxq->rspq.netdev); 1533 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1534 struct vlan_group *grp = pi->vlan_grp;
1535
1536 rxq->stats.vlan_ex++; 1534 rxq->stats.vlan_ex++;
1537 if (likely(grp)) {
1538 ret = vlan_gro_frags(&rxq->rspq.napi, grp,
1539 ntohs(pkt->vlan));
1540 goto stats;
1541 }
1542 } 1535 }
1543 ret = napi_gro_frags(&rxq->rspq.napi); 1536 ret = napi_gro_frags(&rxq->rspq.napi);
1544stats: if (ret == GRO_HELD) 1537 if (ret == GRO_HELD)
1545 rxq->stats.lro_pkts++; 1538 rxq->stats.lro_pkts++;
1546 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 1539 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1547 rxq->stats.lro_merged++; 1540 rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1608 skb_checksum_none_assert(skb); 1601 skb_checksum_none_assert(skb);
1609 1602
1610 if (unlikely(pkt->vlan_ex)) { 1603 if (unlikely(pkt->vlan_ex)) {
1611 struct vlan_group *grp = pi->vlan_grp; 1604 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1612
1613 rxq->stats.vlan_ex++; 1605 rxq->stats.vlan_ex++;
1614 if (likely(grp)) 1606 }
1615 vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); 1607 netif_receive_skb(skb);
1616 else
1617 dev_kfree_skb_any(skb);
1618 } else
1619 netif_receive_skb(skb);
1620
1621 return 0; 1608 return 0;
1622} 1609}
1623 1610
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a0252e..4686c3983fc3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
521 e1000_clean_all_rx_rings(adapter); 521 e1000_clean_all_rx_rings(adapter);
522} 522}
523 523
524void e1000_reinit_safe(struct e1000_adapter *adapter) 524static void e1000_reinit_safe(struct e1000_adapter *adapter)
525{ 525{
526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
527 msleep(1); 527 msleep(1);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6401cf..8e745e74828d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@ struct ehea_port_res {
396 int swqe_ll_count; 396 int swqe_ll_count;
397 u32 swqe_id_counter; 397 u32 swqe_id_counter;
398 u64 tx_packets; 398 u64 tx_packets;
399 u64 tx_bytes;
399 u64 rx_packets; 400 u64 rx_packets;
401 u64 rx_bytes;
400 u32 poll_counter; 402 u32 poll_counter;
401 struct net_lro_mgr lro_mgr; 403 struct net_lro_mgr lro_mgr;
402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 404 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306fb446..182b2a7be8dc 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
330 struct ehea_port *port = netdev_priv(dev); 330 struct ehea_port *port = netdev_priv(dev);
331 struct net_device_stats *stats = &port->stats; 331 struct net_device_stats *stats = &port->stats;
332 struct hcp_ehea_port_cb2 *cb2; 332 struct hcp_ehea_port_cb2 *cb2;
333 u64 hret, rx_packets, tx_packets; 333 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
334 int i; 334 int i;
335 335
336 memset(stats, 0, sizeof(*stats)); 336 memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); 353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
354 354
355 rx_packets = 0; 355 rx_packets = 0;
356 for (i = 0; i < port->num_def_qps; i++) 356 for (i = 0; i < port->num_def_qps; i++) {
357 rx_packets += port->port_res[i].rx_packets; 357 rx_packets += port->port_res[i].rx_packets;
358 rx_bytes += port->port_res[i].rx_bytes;
359 }
358 360
359 tx_packets = 0; 361 tx_packets = 0;
360 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 362 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
361 tx_packets += port->port_res[i].tx_packets; 363 tx_packets += port->port_res[i].tx_packets;
364 tx_bytes += port->port_res[i].tx_bytes;
365 }
362 366
363 stats->tx_packets = tx_packets; 367 stats->tx_packets = tx_packets;
364 stats->multicast = cb2->rxmcp; 368 stats->multicast = cb2->rxmcp;
365 stats->rx_errors = cb2->rxuerr; 369 stats->rx_errors = cb2->rxuerr;
366 stats->rx_bytes = cb2->rxo; 370 stats->rx_bytes = rx_bytes;
367 stats->tx_bytes = cb2->txo; 371 stats->tx_bytes = tx_bytes;
368 stats->rx_packets = rx_packets; 372 stats->rx_packets = rx_packets;
369 373
370out_herr: 374out_herr:
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
703 int skb_arr_rq2_len = pr->rq2_skba.len; 707 int skb_arr_rq2_len = pr->rq2_skba.len;
704 int skb_arr_rq3_len = pr->rq3_skba.len; 708 int skb_arr_rq3_len = pr->rq3_skba.len;
705 int processed, processed_rq1, processed_rq2, processed_rq3; 709 int processed, processed_rq1, processed_rq2, processed_rq3;
710 u64 processed_bytes = 0;
706 int wqe_index, last_wqe_index, rq, port_reset; 711 int wqe_index, last_wqe_index, rq, port_reset;
707 712
708 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 713 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
760 processed_rq3++; 765 processed_rq3++;
761 } 766 }
762 767
768 processed_bytes += skb->len;
763 ehea_proc_skb(pr, cqe, skb); 769 ehea_proc_skb(pr, cqe, skb);
764 } else { 770 } else {
765 pr->p_stats.poll_receive_errors++; 771 pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
775 lro_flush_all(&pr->lro_mgr); 781 lro_flush_all(&pr->lro_mgr);
776 782
777 pr->rx_packets += processed; 783 pr->rx_packets += processed;
784 pr->rx_bytes += processed_bytes;
778 785
779 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 786 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
780 ehea_refill_rq2(pr, processed_rq2); 787 ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1509 enum ehea_eq_type eq_type = EHEA_EQ; 1516 enum ehea_eq_type eq_type = EHEA_EQ;
1510 struct ehea_qp_init_attr *init_attr = NULL; 1517 struct ehea_qp_init_attr *init_attr = NULL;
1511 int ret = -EIO; 1518 int ret = -EIO;
1519 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1520
1521 tx_bytes = pr->tx_bytes;
1522 tx_packets = pr->tx_packets;
1523 rx_bytes = pr->rx_bytes;
1524 rx_packets = pr->rx_packets;
1512 1525
1513 memset(pr, 0, sizeof(struct ehea_port_res)); 1526 memset(pr, 0, sizeof(struct ehea_port_res));
1514 1527
1528 pr->tx_bytes = rx_bytes;
1529 pr->tx_packets = tx_packets;
1530 pr->rx_bytes = rx_bytes;
1531 pr->rx_packets = rx_packets;
1532
1515 pr->port = port; 1533 pr->port = port;
1516 spin_lock_init(&pr->xmit_lock); 1534 spin_lock_init(&pr->xmit_lock);
1517 spin_lock_init(&pr->netif_queue); 1535 spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2249 memset(swqe, 0, SWQE_HEADER_SIZE); 2267 memset(swqe, 0, SWQE_HEADER_SIZE);
2250 atomic_dec(&pr->swqe_avail); 2268 atomic_dec(&pr->swqe_avail);
2251 2269
2270 if (vlan_tx_tag_present(skb)) {
2271 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2272 swqe->vlan_tag = vlan_tx_tag_get(skb);
2273 }
2274
2275 pr->tx_packets++;
2276 pr->tx_bytes += skb->len;
2277
2252 if (skb->len <= SWQE3_MAX_IMM) { 2278 if (skb->len <= SWQE3_MAX_IMM) {
2253 u32 sig_iv = port->sig_comp_iv; 2279 u32 sig_iv = port->sig_comp_iv;
2254 u32 swqe_num = pr->swqe_id_counter; 2280 u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2279 } 2305 }
2280 pr->swqe_id_counter += 1; 2306 pr->swqe_id_counter += 1;
2281 2307
2282 if (vlan_tx_tag_present(skb)) {
2283 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2284 swqe->vlan_tag = vlan_tx_tag_get(skb);
2285 }
2286
2287 if (netif_msg_tx_queued(port)) { 2308 if (netif_msg_tx_queued(port)) {
2288 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 2309 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2289 ehea_dump(swqe, 512, "swqe"); 2310 ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2295 } 2316 }
2296 2317
2297 ehea_post_swqe(pr->qp, swqe); 2318 ehea_post_swqe(pr->qp, swqe);
2298 pr->tx_packets++;
2299 2319
2300 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2320 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2301 spin_lock_irqsave(&pr->netif_queue, flags); 2321 spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80ec0a1..49e4ce1246a7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2511 skb_recycle_check(skb, priv->rx_buffer_size + 2511 skb_recycle_check(skb, priv->rx_buffer_size +
2512 RXBUF_ALIGNMENT)) { 2512 RXBUF_ALIGNMENT)) {
2513 gfar_align_skb(skb); 2513 gfar_align_skb(skb);
2514 __skb_queue_head(&priv->rx_recycle, skb); 2514 skb_queue_head(&priv->rx_recycle, skb);
2515 } else 2515 } else
2516 dev_kfree_skb_any(skb); 2516 dev_kfree_skb_any(skb);
2517 2517
@@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
2594 struct gfar_private *priv = netdev_priv(dev); 2594 struct gfar_private *priv = netdev_priv(dev);
2595 struct sk_buff *skb = NULL; 2595 struct sk_buff *skb = NULL;
2596 2596
2597 skb = __skb_dequeue(&priv->rx_recycle); 2597 skb = skb_dequeue(&priv->rx_recycle);
2598 if (!skb) 2598 if (!skb)
2599 skb = gfar_alloc_skb(dev); 2599 skb = gfar_alloc_skb(dev);
2600 2600
@@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2750 if (unlikely(!newskb)) 2750 if (unlikely(!newskb))
2751 newskb = skb; 2751 newskb = skb;
2752 else if (skb) 2752 else if (skb)
2753 __skb_queue_head(&priv->rx_recycle, skb); 2753 skb_queue_head(&priv->rx_recycle, skb);
2754 } else { 2754 } else {
2755 /* Increment the number of packets */ 2755 /* Increment the number of packets */
2756 rx_queue->stats.rx_packets++; 2756 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975ee2add..d85edf3119c2 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@ err_out:
1623 return rc; 1623 return rc;
1624} 1624}
1625 1625
1626#ifdef CONFIG_PM
1627static void 1626static void
1628jme_set_100m_half(struct jme_adapter *jme) 1627jme_set_100m_half(struct jme_adapter *jme)
1629{ 1628{
1630 u32 bmcr, tmp; 1629 u32 bmcr, tmp;
1631 1630
1631 jme_phy_on(jme);
1632 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1632 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1633 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1633 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1634 BMCR_SPEED1000 | BMCR_FULLDPLX); 1634 BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
1656 phylink = jme_linkstat_from_phy(jme); 1656 phylink = jme_linkstat_from_phy(jme);
1657 } 1657 }
1658} 1658}
1659#endif
1660 1659
1661static inline void 1660static inline void
1662jme_phy_off(struct jme_adapter *jme) 1661jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
1664 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); 1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1665} 1664}
1666 1665
1666static void
1667jme_powersave_phy(struct jme_adapter *jme)
1668{
1669 if (jme->reg_pmcs) {
1670 jme_set_100m_half(jme);
1671
1672 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1673 jme_wait_link(jme);
1674
1675 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
1676 } else {
1677 jme_phy_off(jme);
1678 }
1679}
1680
1667static int 1681static int
1668jme_close(struct net_device *netdev) 1682jme_close(struct net_device *netdev)
1669{ 1683{
@@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
2991 3005
2992} 3006}
2993 3007
3008static void
3009jme_shutdown(struct pci_dev *pdev)
3010{
3011 struct net_device *netdev = pci_get_drvdata(pdev);
3012 struct jme_adapter *jme = netdev_priv(netdev);
3013
3014 jme_powersave_phy(jme);
3015 pci_pme_active(pdev, true);
3016}
3017
2994#ifdef CONFIG_PM 3018#ifdef CONFIG_PM
2995static int 3019static int
2996jme_suspend(struct pci_dev *pdev, pm_message_t state) 3020jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3028 tasklet_hi_enable(&jme->rxempty_task); 3052 tasklet_hi_enable(&jme->rxempty_task);
3029 3053
3030 pci_save_state(pdev); 3054 pci_save_state(pdev);
3031 if (jme->reg_pmcs) { 3055 jme_powersave_phy(jme);
3032 jme_set_100m_half(jme); 3056 pci_enable_wake(jme->pdev, PCI_D3hot, true);
3033 3057 pci_set_power_state(pdev, PCI_D3hot);
3034 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
3035 jme_wait_link(jme);
3036
3037 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3038
3039 pci_enable_wake(pdev, PCI_D3cold, true);
3040 } else {
3041 jme_phy_off(jme);
3042 }
3043 pci_set_power_state(pdev, PCI_D3cold);
3044 3058
3045 return 0; 3059 return 0;
3046} 3060}
@@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
3087 .suspend = jme_suspend, 3101 .suspend = jme_suspend,
3088 .resume = jme_resume, 3102 .resume = jme_resume,
3089#endif /* CONFIG_PM */ 3103#endif /* CONFIG_PM */
3104 .shutdown = jme_shutdown,
3090}; 3105};
3091 3106
3092static int __init 3107static int __init
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e8c4bc..f69e73e2191e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
515 (unsigned long)status, budget); 515 (unsigned long)status, budget);
516 516
517 work_done = macb_rx(bp, budget); 517 work_done = macb_rx(bp, budget);
518 if (work_done < budget) 518 if (work_done < budget) {
519 napi_complete(napi); 519 napi_complete(napi);
520 520
521 /* 521 /*
522 * We've done what we can to clean the buffers. Make sure we 522 * We've done what we can to clean the buffers. Make sure we
523 * get notified when new packets arrive. 523 * get notified when new packets arrive.
524 */ 524 */
525 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 525 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
526 }
526 527
527 /* TODO: Handle errors */ 528 /* TODO: Handle errors */
528 529
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
550 } 551 }
551 552
552 if (status & MACB_RX_INT_FLAGS) { 553 if (status & MACB_RX_INT_FLAGS) {
554 /*
555 * There's no point taking any more interrupts
556 * until we have processed the buffers. The
557 * scheduling call may fail if the poll routine
558 * is already scheduled, so disable interrupts
559 * now.
560 */
561 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
562
553 if (napi_schedule_prep(&bp->napi)) { 563 if (napi_schedule_prep(&bp->napi)) {
554 /*
555 * There's no point taking any more interrupts
556 * until we have processed the buffers
557 */
558 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
559 dev_dbg(&bp->pdev->dev, 564 dev_dbg(&bp->pdev->dev,
560 "scheduling RX softirq\n"); 565 "scheduling RX softirq\n");
561 __napi_schedule(&bp->napi); 566 __napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4dee80aa..02393fdf44c1 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
211} 211}
212 212
213int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 213static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
214{ 214{
215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
216 MLX4_CMD_TIME_CLASS_B); 216 MLX4_CMD_TIME_CLASS_B);
217} 217}
218 218
219int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
220{
221 struct mlx4_cmd_mailbox *mailbox;
222 __be64 *inbox;
223 int err;
224
225 mailbox = mlx4_alloc_cmd_mailbox(dev);
226 if (IS_ERR(mailbox))
227 return PTR_ERR(mailbox);
228 inbox = mailbox->buf;
229
230 inbox[0] = cpu_to_be64(virt);
231 inbox[1] = cpu_to_be64(dma_addr);
232
233 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
234 MLX4_CMD_TIME_CLASS_B);
235
236 mlx4_free_cmd_mailbox(dev, mailbox);
237
238 if (!err)
239 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
240 (unsigned long long) dma_addr, (unsigned long long) virt);
241
242 return err;
243}
244
245int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 219int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
246{ 220{
247 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 221 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f89b65..b10c07a1dc1a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
129} 129}
130 130
131int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
132int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
133int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 131int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
134int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); 132int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
135 133
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 56371ef328ef..451339559bdc 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
111 goto out; 111 goto out;
112 } 112 }
113 } 113 }
114
115 if (free < 0) {
116 err = -ENOMEM;
117 goto out;
118 }
119
114 mlx4_dbg(dev, "Free MAC index is %d\n", free); 120 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115 121
116 if (table->total == table->max) { 122 if (table->total == table->max) {
@@ -224,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
224 } 230 }
225 } 231 }
226 232
233 if (free < 0) {
234 err = -ENOMEM;
235 goto out;
236 }
237
227 if (table->total == table->max) { 238 if (table->total == table->max) {
228 /* No free vlan entries */ 239 /* No free vlan entries */
229 err = -ENOSPC; 240 err = -ENOSPC;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb79433..7670aac0e93f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
65 * 65 *
66 * Returns 0 on success on < 0 on error. 66 * Returns 0 on success on < 0 on error.
67 */ 67 */
68int phy_clear_interrupt(struct phy_device *phydev) 68static int phy_clear_interrupt(struct phy_device *phydev)
69{ 69{
70 int err = 0; 70 int err = 0;
71 71
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
82 * 82 *
83 * Returns 0 on success on < 0 on error. 83 * Returns 0 on success on < 0 on error.
84 */ 84 */
85int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 85static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
86{ 86{
87 int err = 0; 87 int err = 0;
88 88
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
208 * duplexes. Drop down by one in this order: 1000/FULL, 208 * duplexes. Drop down by one in this order: 1000/FULL,
209 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 209 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
210 */ 210 */
211void phy_sanitize_settings(struct phy_device *phydev) 211static void phy_sanitize_settings(struct phy_device *phydev)
212{ 212{
213 u32 features = phydev->supported; 213 u32 features = phydev->supported;
214 int idx; 214 int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
223 phydev->speed = settings[idx].speed; 223 phydev->speed = settings[idx].speed;
224 phydev->duplex = settings[idx].duplex; 224 phydev->duplex = settings[idx].duplex;
225} 225}
226EXPORT_SYMBOL(phy_sanitize_settings);
227 226
228/** 227/**
229 * phy_ethtool_sset - generic ethtool sset function, handles all the details 228 * phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
532 * phy_enable_interrupts - Enable the interrupts from the PHY side 531 * phy_enable_interrupts - Enable the interrupts from the PHY side
533 * @phydev: target phy_device struct 532 * @phydev: target phy_device struct
534 */ 533 */
535int phy_enable_interrupts(struct phy_device *phydev) 534static int phy_enable_interrupts(struct phy_device *phydev)
536{ 535{
537 int err; 536 int err;
538 537
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
545 544
546 return err; 545 return err;
547} 546}
548EXPORT_SYMBOL(phy_enable_interrupts);
549 547
550/** 548/**
551 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 549 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
552 * @phydev: target phy_device struct 550 * @phydev: target phy_device struct
553 */ 551 */
554int phy_disable_interrupts(struct phy_device *phydev) 552static int phy_disable_interrupts(struct phy_device *phydev)
555{ 553{
556 int err; 554 int err;
557 555
@@ -574,7 +572,6 @@ phy_err:
574 572
575 return err; 573 return err;
576} 574}
577EXPORT_SYMBOL(phy_disable_interrupts);
578 575
579/** 576/**
580 * phy_start_interrupts - request and enable interrupts for a PHY device 577 * phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77313cb..993c52c82aeb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
57static LIST_HEAD(phy_fixup_list); 57static LIST_HEAD(phy_fixup_list);
58static DEFINE_MUTEX(phy_fixup_lock); 58static DEFINE_MUTEX(phy_fixup_lock);
59 59
60static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
61 u32 flags, phy_interface_t interface);
62
60/* 63/*
61 * Creates a new phy_fixup and adds it to the list 64 * Creates a new phy_fixup and adds it to the list
62 * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID) 65 * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
146} 149}
147EXPORT_SYMBOL(phy_scan_fixups); 150EXPORT_SYMBOL(phy_scan_fixups);
148 151
149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 152static struct phy_device* phy_device_create(struct mii_bus *bus,
153 int addr, int phy_id)
150{ 154{
151 struct phy_device *dev; 155 struct phy_device *dev;
152 156
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
193 197
194 return dev; 198 return dev;
195} 199}
196EXPORT_SYMBOL(phy_device_create);
197 200
198/** 201/**
199 * get_phy_id - reads the specified addr for its ID. 202 * get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
316 * If you want to monitor your own link state, don't call 319 * If you want to monitor your own link state, don't call
317 * this function. 320 * this function.
318 */ 321 */
319void phy_prepare_link(struct phy_device *phydev, 322static void phy_prepare_link(struct phy_device *phydev,
320 void (*handler)(struct net_device *)) 323 void (*handler)(struct net_device *))
321{ 324{
322 phydev->adjust_link = handler; 325 phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
435 * the attaching device, and given a callback for link status 438 * the attaching device, and given a callback for link status
436 * change. The phy_device is returned to the attaching driver. 439 * change. The phy_device is returned to the attaching driver.
437 */ 440 */
438int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 441static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
439 u32 flags, phy_interface_t interface) 442 u32 flags, phy_interface_t interface)
440{ 443{
441 struct device *d = &phydev->dev; 444 struct device *d = &phydev->dev;
442 445
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
473 * (dev_flags and interface) */ 476 * (dev_flags and interface) */
474 return phy_init_hw(phydev); 477 return phy_init_hw(phydev);
475} 478}
476EXPORT_SYMBOL(phy_attach_direct);
477 479
478/** 480/**
479 * phy_attach - attach a network device to a particular PHY device 481 * phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
540 * what is supported. Returns < 0 on error, 0 if the PHY's advertisement 542 * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
541 * hasn't changed, and > 0 if it has changed. 543 * hasn't changed, and > 0 if it has changed.
542 */ 544 */
543int genphy_config_advert(struct phy_device *phydev) 545static int genphy_config_advert(struct phy_device *phydev)
544{ 546{
545 u32 advertise; 547 u32 advertise;
546 int oldadv, adv; 548 int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
605 607
606 return changed; 608 return changed;
607} 609}
608EXPORT_SYMBOL(genphy_config_advert);
609 610
610/** 611/**
611 * genphy_setup_forced - configures/forces speed/duplex from @phydev 612 * genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
615 * to the values in phydev. Assumes that the values are valid. 616 * to the values in phydev. Assumes that the values are valid.
616 * Please see phy_sanitize_settings(). 617 * Please see phy_sanitize_settings().
617 */ 618 */
618int genphy_setup_forced(struct phy_device *phydev) 619static int genphy_setup_forced(struct phy_device *phydev)
619{ 620{
620 int err; 621 int err;
621 int ctl = 0; 622 int ctl = 0;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3a5868..8ecc170c9b74 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
146#define MAX_CMD_DESCRIPTORS 1024 146#define MAX_CMD_DESCRIPTORS 1024
147#define MAX_RCV_DESCRIPTORS_1G 4096 147#define MAX_RCV_DESCRIPTORS_1G 4096
148#define MAX_RCV_DESCRIPTORS_10G 8192 148#define MAX_RCV_DESCRIPTORS_10G 8192
149#define MAX_RCV_DESCRIPTORS_VF 2048
149#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 150#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
150#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 151#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
151 152
152#define DEFAULT_RCV_DESCRIPTORS_1G 2048 153#define DEFAULT_RCV_DESCRIPTORS_1G 2048
153#define DEFAULT_RCV_DESCRIPTORS_10G 4096 154#define DEFAULT_RCV_DESCRIPTORS_10G 4096
155#define DEFAULT_RCV_DESCRIPTORS_VF 1024
154#define MAX_RDS_RINGS 2 156#define MAX_RDS_RINGS 2
155 157
156#define get_next_index(index, length) \ 158#define get_next_index(index, length) \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
942#define QLCNIC_LOOPBACK_TEST 2 944#define QLCNIC_LOOPBACK_TEST 2
943 945
944#define QLCNIC_FILTER_AGE 80 946#define QLCNIC_FILTER_AGE 80
947#define QLCNIC_READD_AGE 20
945#define QLCNIC_LB_MAX_FILTERS 64 948#define QLCNIC_LB_MAX_FILTERS 64
946 949
947struct qlcnic_filter { 950struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
970 u16 num_txd; 973 u16 num_txd;
971 u16 num_rxd; 974 u16 num_rxd;
972 u16 num_jumbo_rxd; 975 u16 num_jumbo_rxd;
976 u16 max_rxd;
977 u16 max_jumbo_rxd;
973 978
974 u8 max_rds_rings; 979 u8 max_rds_rings;
975 u8 max_sds_rings; 980 u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
1129#define MAX_RX_QUEUES 4 1134#define MAX_RX_QUEUES 4
1130#define DEFAULT_MAC_LEARN 1 1135#define DEFAULT_MAC_LEARN 1
1131 1136
1132#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID) 1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1133#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1138#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
1134#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1139#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1135#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1140#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a53fca0..ec21d24015c4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
437 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; 437 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
438 ring->tx_pending = adapter->num_txd; 438 ring->tx_pending = adapter->num_txd;
439 439
440 if (adapter->ahw.port_type == QLCNIC_GBE) { 440 ring->rx_max_pending = adapter->max_rxd;
441 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; 441 ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
442 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
443 } else {
444 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
445 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
446 }
447
448 ring->tx_max_pending = MAX_CMD_DESCRIPTORS; 442 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
449 443
450 ring->rx_mini_max_pending = 0; 444 ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
472 struct ethtool_ringparam *ring) 466 struct ethtool_ringparam *ring)
473{ 467{
474 struct qlcnic_adapter *adapter = netdev_priv(dev); 468 struct qlcnic_adapter *adapter = netdev_priv(dev);
475 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
476 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
477 u16 num_rxd, num_jumbo_rxd, num_txd; 469 u16 num_rxd, num_jumbo_rxd, num_txd;
478 470
479
480 if (ring->rx_mini_pending) 471 if (ring->rx_mini_pending)
481 return -EOPNOTSUPP; 472 return -EOPNOTSUPP;
482 473
483 if (adapter->ahw.port_type == QLCNIC_GBE) {
484 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
485 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
486 }
487
488 num_rxd = qlcnic_validate_ringparam(ring->rx_pending, 474 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
489 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); 475 MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
490 476
491 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, 477 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
492 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); 478 MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
479 "rx jumbo");
493 480
494 num_txd = qlcnic_validate_ringparam(ring->tx_pending, 481 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
495 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); 482 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c48314..7a298cdf9ab3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
656 656
657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
658 fw_major, fw_minor, fw_build); 658 fw_major, fw_minor, fw_build);
659
660 if (adapter->ahw.port_type == QLCNIC_XGBE) { 659 if (adapter->ahw.port_type == QLCNIC_XGBE) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 660 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
662 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
663 } else {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
665 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
666 }
667
662 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 668 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
669 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
670
663 } else if (adapter->ahw.port_type == QLCNIC_GBE) { 671 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 672 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
665 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 673 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
674 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
675 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
666 } 676 }
667 677
668 adapter->msix_supported = !!use_msi_x; 678 adapter->msix_supported = !!use_msi_x;
@@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1860 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 1870 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1861 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 1871 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1862 tmp_fil->vlan_id == vlan_id) { 1872 tmp_fil->vlan_id == vlan_id) {
1873
1874 if (jiffies >
1875 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1876 qlcnic_change_filter(adapter, src_addr, vlan_id,
1877 tx_ring);
1863 tmp_fil->ftime = jiffies; 1878 tmp_fil->ftime = jiffies;
1864 return; 1879 return;
1865 } 1880 }
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786840a6..22821398fc63 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2226int ql_core_dump(struct ql_adapter *qdev, 2226int ql_core_dump(struct ql_adapter *qdev,
2227 struct ql_mpi_coredump *mpi_coredump); 2227 struct ql_mpi_coredump *mpi_coredump);
2228int ql_mb_about_fw(struct ql_adapter *qdev); 2228int ql_mb_about_fw(struct ql_adapter *qdev);
2229int ql_wol(struct ql_adapter *qdev);
2230int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2229int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
2231int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); 2230int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
2232int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); 2231int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2243void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2242void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2244int ql_own_firmware(struct ql_adapter *qdev); 2243int ql_own_firmware(struct ql_adapter *qdev);
2245int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2244int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2246void qlge_set_multicast_list(struct net_device *ndev);
2247 2245
2248#if 1 2246/* #define QL_ALL_DUMP */
2249#define QL_ALL_DUMP 2247/* #define QL_REG_DUMP */
2250#define QL_REG_DUMP 2248/* #define QL_DEV_DUMP */
2251#define QL_DEV_DUMP 2249/* #define QL_CB_DUMP */
2252#define QL_CB_DUMP
2253/* #define QL_IB_DUMP */ 2250/* #define QL_IB_DUMP */
2254/* #define QL_OB_DUMP */ 2251/* #define QL_OB_DUMP */
2255#endif
2256 2252
2257#ifdef QL_REG_DUMP 2253#ifdef QL_REG_DUMP
2258extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); 2254extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d8515e..c30e0fe55a31 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
94 94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96 96
97static int ql_wol(struct ql_adapter *qdev);
98static void qlge_set_multicast_list(struct net_device *ndev);
99
97/* This hardware semaphore causes exclusive access to 100/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware, 101 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver. 102 * FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2382 2385
2383} 2386}
2384 2387
2388static void qlge_restore_vlan(struct ql_adapter *qdev)
2389{
2390 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2391
2392 if (qdev->vlgrp) {
2393 u16 vid;
2394 for (vid = 0; vid < VLAN_N_VID; vid++) {
2395 if (!vlan_group_get_device(qdev->vlgrp, vid))
2396 continue;
2397 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2398 }
2399 }
2400}
2401
2385/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ 2402/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2386static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 2403static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2387{ 2404{
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
3842 "MAC address %pM\n", ndev->dev_addr); 3859 "MAC address %pM\n", ndev->dev_addr);
3843} 3860}
3844 3861
3845int ql_wol(struct ql_adapter *qdev) 3862static int ql_wol(struct ql_adapter *qdev)
3846{ 3863{
3847 int status = 0; 3864 int status = 0;
3848 u32 wol = MB_WOL_DISABLE; 3865 u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3957 clear_bit(QL_PROMISCUOUS, &qdev->flags); 3974 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958 qlge_set_multicast_list(qdev->ndev); 3975 qlge_set_multicast_list(qdev->ndev);
3959 3976
3977 /* Restore vlan setting. */
3978 qlge_restore_vlan(qdev);
3979
3960 ql_enable_interrupts(qdev); 3980 ql_enable_interrupts(qdev);
3961 ql_enable_all_completion_interrupts(qdev); 3981 ql_enable_all_completion_interrupts(qdev);
3962 netif_tx_start_all_queues(qdev->ndev); 3982 netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4242 return &ndev->stats; 4262 return &ndev->stats;
4243} 4263}
4244 4264
4245void qlge_set_multicast_list(struct net_device *ndev) 4265static void qlge_set_multicast_list(struct net_device *ndev)
4246{ 4266{
4247 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4248 struct netdev_hw_addr *ha; 4268 struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e8570c7cb..0e7c7c7ee164 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
681/* Send and ACK mailbox command to the firmware to 681/* Send and ACK mailbox command to the firmware to
682 * let it continue with the change. 682 * let it continue with the change.
683 */ 683 */
684int ql_mb_idc_ack(struct ql_adapter *qdev) 684static int ql_mb_idc_ack(struct ql_adapter *qdev)
685{ 685{
686 struct mbox_params mbc; 686 struct mbox_params mbc;
687 struct mbox_params *mbcp = &mbc; 687 struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
744 return status; 744 return status;
745} 745}
746 746
747int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, 747static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
748 u32 size) 748 u32 size)
749{ 749{
750 int status = 0; 750 int status = 0;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505e1baf..66c2f1a01963 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
961 lp->rx_error_count = 0; 961 lp->rx_error_count = 0;
962 lp->rx_error_dpc_count = 0; 962 lp->rx_error_dpc_count = 0;
963 lp->rx_session_id[0] = 0x50; 963 lp->rx_session_id[0] = 0x50;
964 lp->rx_session_id[0] = 0x48; 964 lp->rx_session_id[1] = 0x48;
965 lp->rx_session_id[0] = 0x44; 965 lp->rx_session_id[2] = 0x44;
966 lp->rx_session_id[0] = 0x42; 966 lp->rx_session_id[3] = 0x42;
967 lp->rx_frame_id[0] = 0; 967 lp->rx_frame_id[0] = 0;
968 lp->rx_frame_id[1] = 0; 968 lp->rx_frame_id[1] = 0;
969 lp->rx_frame_id[2] = 0; 969 lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315baa0b..3a0cc63428ee 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
531 531
532 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { 532 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
533 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); 533 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
534 err = -EAGAIN; 534 return -EAGAIN;
535 } 535 }
536 536
537 err = init_seeq(dev, sp, sregs); 537 err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fad9d45..ab9e3b785b5b 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
688 return 0; 688 return 0;
689} 689}
690 690
691
692/* VJ header compression */
693EXPORT_SYMBOL(slhc_init);
694EXPORT_SYMBOL(slhc_free);
695EXPORT_SYMBOL(slhc_remember);
696EXPORT_SYMBOL(slhc_compress);
697EXPORT_SYMBOL(slhc_uncompress);
698EXPORT_SYMBOL(slhc_toss);
699
700#else /* CONFIG_INET */ 691#else /* CONFIG_INET */
701 692
702
703int 693int
704slhc_toss(struct slcompress *comp) 694slhc_toss(struct slcompress *comp)
705{ 695{
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
738 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init"); 728 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
739 return NULL; 729 return NULL;
740} 730}
731
732#endif /* CONFIG_INET */
733
734/* VJ header compression */
741EXPORT_SYMBOL(slhc_init); 735EXPORT_SYMBOL(slhc_init);
742EXPORT_SYMBOL(slhc_free); 736EXPORT_SYMBOL(slhc_free);
743EXPORT_SYMBOL(slhc_remember); 737EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
745EXPORT_SYMBOL(slhc_uncompress); 739EXPORT_SYMBOL(slhc_uncompress);
746EXPORT_SYMBOL(slhc_toss); 740EXPORT_SYMBOL(slhc_toss);
747 741
748#endif /* CONFIG_INET */
749MODULE_LICENSE("Dual BSD/GPL"); 742MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917778f8..30ccbb6d097a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9948 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) 9948 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9949 return -EINVAL; 9949 return -EINVAL;
9950 9950
9951 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
9952
9951 spin_lock_bh(&tp->lock); 9953 spin_lock_bh(&tp->lock);
9952 if (wol->wolopts & WAKE_MAGIC) { 9954 if (device_may_wakeup(dp))
9953 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 9955 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9954 device_set_wakeup_enable(dp, true); 9956 else
9955 } else {
9956 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 9957 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9957 device_set_wakeup_enable(dp, false);
9958 }
9959 spin_unlock_bh(&tp->lock); 9958 spin_unlock_bh(&tp->lock);
9960 9959
9960
9961 return 0; 9961 return 0;
9962} 9962}
9963 9963
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b8860a531..793020347e54 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
1220 tmp = schedule_timeout_interruptible(tmp); 1220 tmp = schedule_timeout_interruptible(tmp);
1221 } while(time_after(tmp, jiffies)); 1221 } while(time_after(tmp, jiffies));
1222#else 1222#else
1223 udelay(time); 1223 mdelay(time / 1000);
1224#endif 1224#endif
1225} 1225}
1226 1226
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc67138adbf..5b83c3f35f47 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
24 3XP Processor. It has been tested on x86 and sparc64. 24 3XP Processor. It has been tested on x86 and sparc64.
25 25
26 KNOWN ISSUES: 26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware 27 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it. 28 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable 29 *) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
280 struct pci_dev * pdev; 276 struct pci_dev * pdev;
281 struct net_device * dev; 277 struct net_device * dev;
282 struct napi_struct napi; 278 struct napi_struct napi;
283 spinlock_t state_lock;
284 struct vlan_group * vlgrp;
285 struct basic_ring rxHiRing; 279 struct basic_ring rxHiRing;
286 struct basic_ring rxBuffRing; 280 struct basic_ring rxBuffRing;
287 struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; 281 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
695 return err; 689 return err;
696} 690}
697 691
698static void
699typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
700{
701 struct typhoon *tp = netdev_priv(dev);
702 struct cmd_desc xp_cmd;
703 int err;
704
705 spin_lock_bh(&tp->state_lock);
706 if(!tp->vlgrp != !grp) {
707 /* We've either been turned on for the first time, or we've
708 * been turned off. Update the 3XP.
709 */
710 if(grp)
711 tp->offload |= TYPHOON_OFFLOAD_VLAN;
712 else
713 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
714
715 /* If the interface is up, the runtime is running -- and we
716 * must be up for the vlan core to call us.
717 *
718 * Do the command outside of the spin lock, as it is slow.
719 */
720 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
721 TYPHOON_CMD_SET_OFFLOAD_TASKS);
722 xp_cmd.parm2 = tp->offload;
723 xp_cmd.parm3 = tp->offload;
724 spin_unlock_bh(&tp->state_lock);
725 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
726 if(err < 0)
727 netdev_err(tp->dev, "vlan offload error %d\n", -err);
728 spin_lock_bh(&tp->state_lock);
729 }
730
731 /* now make the change visible */
732 tp->vlgrp = grp;
733 spin_unlock_bh(&tp->state_lock);
734}
735
736static inline void 692static inline void
737typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, 693typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738 u32 ring_dma) 694 u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
818 first_txd->processFlags |= 774 first_txd->processFlags |=
819 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; 775 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
820 first_txd->processFlags |= 776 first_txd->processFlags |=
821 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) << 777 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
822 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 778 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
823 } 779 }
824 780
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
936 filter |= TYPHOON_RX_FILTER_MCAST_HASH; 892 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
937 } 893 }
938 894
939 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); 895 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
940 xp_cmd.parm1 = filter; 896 xp_cmd.parm1 = filter;
941 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 897 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942} 898}
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
1198 return 1; 1154 return 1;
1199} 1155}
1200 1156
1157static int
1158typhoon_set_flags(struct net_device *dev, u32 data)
1159{
1160 /* There's no way to turn off the RX VLAN offloading and stripping
1161 * on the current 3XP firmware -- it does not respect the offload
1162 * settings -- so we only allow the user to toggle the TX processing.
1163 */
1164 if (!(data & ETH_FLAG_RXVLAN))
1165 return -EINVAL;
1166
1167 return ethtool_op_set_flags(dev, data,
1168 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1169}
1170
1201static void 1171static void
1202typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 1172typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1203{ 1173{
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
1224 .set_sg = ethtool_op_set_sg, 1194 .set_sg = ethtool_op_set_sg,
1225 .set_tso = ethtool_op_set_tso, 1195 .set_tso = ethtool_op_set_tso,
1226 .get_ringparam = typhoon_get_ringparam, 1196 .get_ringparam = typhoon_get_ringparam,
1197 .set_flags = typhoon_set_flags,
1198 .get_flags = ethtool_op_get_flags,
1227}; 1199};
1228 1200
1229static int 1201static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
1309 1281
1310 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; 1282 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1311 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; 1283 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1284 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1312 1285
1313 spin_lock_init(&tp->command_lock); 1286 spin_lock_init(&tp->command_lock);
1314 spin_lock_init(&tp->state_lock);
1315 1287
1316 /* Force the writes to the shared memory area out before continuing. */ 1288 /* Force the writes to the shared memory area out before continuing. */
1317 wmb(); 1289 wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
1328 tp->rxHiRing.lastWrite = 0; 1300 tp->rxHiRing.lastWrite = 0;
1329 tp->rxBuffRing.lastWrite = 0; 1301 tp->rxBuffRing.lastWrite = 0;
1330 tp->cmdRing.lastWrite = 0; 1302 tp->cmdRing.lastWrite = 0;
1331 tp->cmdRing.lastWrite = 0; 1303 tp->respRing.lastWrite = 0;
1332 1304
1333 tp->txLoRing.lastRead = 0; 1305 tp->txLoRing.lastRead = 0;
1334 tp->txHiRing.lastRead = 0; 1306 tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1762 } else 1734 } else
1763 skb_checksum_none_assert(new_skb); 1735 skb_checksum_none_assert(new_skb);
1764 1736
1765 spin_lock(&tp->state_lock); 1737 if (rx->rxStatus & TYPHOON_RX_VLAN)
1766 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN) 1738 __vlan_hwaccel_put_tag(new_skb,
1767 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp, 1739 ntohl(rx->vlanTag) & 0xffff);
1768 ntohl(rx->vlanTag) & 0xffff); 1740 netif_receive_skb(new_skb);
1769 else
1770 netif_receive_skb(new_skb);
1771 spin_unlock(&tp->state_lock);
1772 1741
1773 received++; 1742 received++;
1774 budget--; 1743 budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
1989 goto error_out; 1958 goto error_out;
1990 1959
1991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); 1960 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1992 spin_lock_bh(&tp->state_lock);
1993 xp_cmd.parm2 = tp->offload; 1961 xp_cmd.parm2 = tp->offload;
1994 xp_cmd.parm3 = tp->offload; 1962 xp_cmd.parm3 = tp->offload;
1995 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1963 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1996 spin_unlock_bh(&tp->state_lock);
1997 if(err < 0) 1964 if(err < 0)
1998 goto error_out; 1965 goto error_out;
1999 1966
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2231 if(!netif_running(dev)) 2198 if(!netif_running(dev))
2232 return 0; 2199 return 0;
2233 2200
2234 spin_lock_bh(&tp->state_lock); 2201 /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
2235 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) { 2202 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2236 spin_unlock_bh(&tp->state_lock); 2203 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2237 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2238 return -EBUSY;
2239 }
2240 spin_unlock_bh(&tp->state_lock);
2241 2204
2242 netif_device_detach(dev); 2205 netif_device_detach(dev);
2243 2206
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
2338 .ndo_validate_addr = eth_validate_addr, 2301 .ndo_validate_addr = eth_validate_addr,
2339 .ndo_set_mac_address = typhoon_set_mac_address, 2302 .ndo_set_mac_address = typhoon_set_mac_address,
2340 .ndo_change_mtu = eth_change_mtu, 2303 .ndo_change_mtu = eth_change_mtu,
2341 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2342}; 2304};
2343 2305
2344static int __devinit 2306static int __devinit
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb226d3..969c751ee404 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
88 88
89/* features */ 89/* features */
90enum { 90enum {
91 UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ 91 UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
92 UPT1_F_RSS = 0x0002, 92 UPT1_F_RSS = cpu_to_le64(0x0002),
93 UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ 93 UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
94 UPT1_F_LRO = 0x0008, 94 UPT1_F_LRO = cpu_to_le64(0x0008),
95}; 95};
96#endif 96#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b940ad..4d84912c99ba 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
523#define VMXNET3_PM_MAX_PATTERN_SIZE 128 523#define VMXNET3_PM_MAX_PATTERN_SIZE 128
524#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) 524#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
525 525
526#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ 526#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
527#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching 527#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
528 * filters */ 528 * filters */
529 529
530 530
531struct Vmxnet3_PM_PktFilter { 531struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e3097b..e3658e10db39 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1563 adapter->vlan_grp = grp; 1563 adapter->vlan_grp = grp;
1564 1564
1565 /* update FEATURES to device */ 1565 /* update FEATURES to device */
1566 set_flag_le64(&devRead->misc.uptFeatures, 1566 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1567 UPT1_F_RXVLAN);
1568 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1567 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1569 VMXNET3_CMD_UPDATE_FEATURE); 1568 VMXNET3_CMD_UPDATE_FEATURE);
1570 /* 1569 /*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1587 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1586 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1588 adapter->vlan_grp = NULL; 1587 adapter->vlan_grp = NULL;
1589 1588
1590 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) { 1589 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1591 int i; 1590 int i;
1592 1591
1593 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1592 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1600 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1599 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1601 1600
1602 /* update FEATURES to device */ 1601 /* update FEATURES to device */
1603 reset_flag_le64(&devRead->misc.uptFeatures, 1602 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1604 UPT1_F_RXVLAN);
1605 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1603 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1606 VMXNET3_CMD_UPDATE_FEATURE); 1604 VMXNET3_CMD_UPDATE_FEATURE);
1607 } 1605 }
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1762 1760
1763 /* set up feature flags */ 1761 /* set up feature flags */
1764 if (adapter->rxcsum) 1762 if (adapter->rxcsum)
1765 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM); 1763 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1766 1764
1767 if (adapter->lro) { 1765 if (adapter->lro) {
1768 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO); 1766 devRead->misc.uptFeatures |= UPT1_F_LRO;
1769 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 1767 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1770 } 1768 }
1771 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 1769 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
1772 adapter->vlan_grp) { 1770 adapter->vlan_grp) {
1773 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN); 1771 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1774 } 1772 }
1775 1773
1776 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 1774 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
2577 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2575 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2578 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2576 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2579 2577
2580 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); 2578 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2581 i++; 2579 i++;
2582 } 2580 }
2583 2581
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
2619 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2617 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2620 in_dev_put(in_dev); 2618 in_dev_put(in_dev);
2621 2619
2622 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); 2620 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2623 i++; 2621 i++;
2624 } 2622 }
2625 2623
2626skip_arp: 2624skip_arp:
2627 if (adapter->wol & WAKE_MAGIC) 2625 if (adapter->wol & WAKE_MAGIC)
2628 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC); 2626 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2629 2627
2630 pmConf->numFilters = i; 2628 pmConf->numFilters = i;
2631 2629
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
2667 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 2665 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2668 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 2666 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2669 *pmConf)); 2667 *pmConf));
2670 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys( 2668 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2671 pmConf)); 2669 pmConf));
2672 2670
2673 netif_device_attach(netdev); 2671 netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a89165a..b79070bcc92e 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 set_flag_le64( 53 adapter->shared->devRead.misc.uptFeatures |=
54 &adapter->shared->devRead.misc.uptFeatures, 54 UPT1_F_RXCSUM;
55 UPT1_F_RXCSUM);
56 else 55 else
57 reset_flag_le64( 56 adapter->shared->devRead.misc.uptFeatures &=
58 &adapter->shared->devRead.misc.uptFeatures, 57 ~UPT1_F_RXCSUM;
59 UPT1_F_RXCSUM);
60 58
61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
62 VMXNET3_CMD_UPDATE_FEATURE); 60 VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 /* update harware LRO capability accordingly */ 290 /* update harware LRO capability accordingly */
293 if (lro_requested) 291 if (lro_requested)
294 adapter->shared->devRead.misc.uptFeatures |= 292 adapter->shared->devRead.misc.uptFeatures |=
295 cpu_to_le64(UPT1_F_LRO); 293 UPT1_F_LRO;
296 else 294 else
297 adapter->shared->devRead.misc.uptFeatures &= 295 adapter->shared->devRead.misc.uptFeatures &=
298 cpu_to_le64(~UPT1_F_LRO); 296 ~UPT1_F_LRO;
299 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
300 VMXNET3_CMD_UPDATE_FEATURE); 298 VMXNET3_CMD_UPDATE_FEATURE);
301 } 299 }
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5cbba0d..8a2f4712284c 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
301 struct net_device *netdev; 301 struct net_device *netdev;
302 struct pci_dev *pdev; 302 struct pci_dev *pdev;
303 303
304 u8 *hw_addr0; /* for BAR 0 */ 304 u8 __iomem *hw_addr0; /* for BAR 0 */
305 u8 *hw_addr1; /* for BAR 1 */ 305 u8 __iomem *hw_addr1; /* for BAR 1 */
306 306
307 /* feature control */ 307 /* feature control */
308 bool rxcsum; 308 bool rxcsum;
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356static inline void set_flag_le16(__le16 *data, u16 flag)
357{
358 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
359}
360
361static inline void set_flag_le64(__le64 *data, u64 flag)
362{
363 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
364}
365
366static inline void reset_flag_le64(__le64 *data, u64 flag)
367{
368 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
369}
370
371int 356int
372vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 357vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
373 358
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db5935609..906a3ca3676b 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22
23static enum vxge_hw_status
24__vxge_hw_fifo_create(
25 struct __vxge_hw_vpath_handle *vpath_handle,
26 struct vxge_hw_fifo_attr *attr);
27
28static enum vxge_hw_status
29__vxge_hw_fifo_abort(
30 struct __vxge_hw_fifo *fifoh);
31
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35
36static enum vxge_hw_status
37__vxge_hw_fifo_delete(
38 struct __vxge_hw_vpath_handle *vpath_handle);
39
40static struct __vxge_hw_blockpool_entry *
41__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42 u32 size);
43
44static void
45__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
46 struct __vxge_hw_blockpool_entry *entry);
47
48static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49 void *block_addr,
50 u32 length,
51 struct pci_dev *dma_h,
52 struct pci_dev *acc_handle);
53
54static enum vxge_hw_status
55__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56 struct __vxge_hw_blockpool *blockpool,
57 u32 pool_size,
58 u32 pool_max);
59
60static void
61__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
62
63static void *
64__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
65 u32 size,
66 struct vxge_hw_mempool_dma *dma_object);
67
68static void
69__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
70 void *memblock,
71 u32 size,
72 struct vxge_hw_mempool_dma *dma_object);
73
74
75static struct __vxge_hw_channel*
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77 enum __vxge_hw_channel_type type, u32 length,
78 u32 per_dtr_space, void *userdata);
79
80static void
81__vxge_hw_channel_free(
82 struct __vxge_hw_channel *channel);
83
84static enum vxge_hw_status
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87
88static enum vxge_hw_status
89__vxge_hw_channel_reset(
90 struct __vxge_hw_channel *channel);
91
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93
94static enum vxge_hw_status
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97static enum vxge_hw_status
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99
100static void
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103static void
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get(
108 u32 vp_id,
109 struct vxge_hw_vpath_reg __iomem *vpath_reg,
110 struct vxge_hw_device_hw_info *hw_info);
111
112static enum vxge_hw_status
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115static void
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118static enum vxge_hw_status
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121static enum vxge_hw_status
122__vxge_hw_device_register_poll(
123 void __iomem *reg,
124 u64 mask, u32 max_millis);
125
126static inline enum vxge_hw_status
127__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
128 u64 mask, u32 max_millis)
129{
130 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131 wmb();
132
133 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134 wmb();
135
136 return __vxge_hw_device_register_poll(addr, mask, max_millis);
137}
138
139static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata);
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146static enum vxge_hw_status
147__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
148 struct vxge_hw_vpath_stats_hw_info *hw_stats);
149
150static enum vxge_hw_status
151vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152
153static enum vxge_hw_status
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155
156static u64
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
158 struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160static u32
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162
163static enum vxge_hw_status
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
166
167static enum vxge_hw_status
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
169
170
171static enum vxge_hw_status
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
173
174static enum vxge_hw_status
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
176 struct vxge_hw_device_hw_info *hw_info);
177
178static enum vxge_hw_status
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180
181static void
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
183
184static enum vxge_hw_status
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186 u32 operation, u32 offset, u64 *stat);
187
188static enum vxge_hw_status
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191
192static enum vxge_hw_status
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
195
23/* 196/*
24 * __vxge_hw_channel_allocate - Allocate memory for channel 197 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays 198 * This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
190 * Will poll certain register for specified amount of time. 363 * Will poll certain register for specified amount of time.
191 * Will poll until masked bit is not cleared. 364 * Will poll until masked bit is not cleared.
192 */ 365 */
193enum vxge_hw_status 366static enum vxge_hw_status
194__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) 367__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
195{ 368{
196 u64 val64; 369 u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
221 * in progress 394 * in progress
222 * This routine checks the vpath reset in progress register is turned zero 395 * This routine checks the vpath reset in progress register is turned zero
223 */ 396 */
224enum vxge_hw_status 397static enum vxge_hw_status
225__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) 398__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
226{ 399{
227 enum vxge_hw_status status; 400 enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
236 * This routine sets the swapper and reads the toc pointer and returns the 409 * This routine sets the swapper and reads the toc pointer and returns the
237 * memory mapped address of the toc 410 * memory mapped address of the toc
238 */ 411 */
239struct vxge_hw_toc_reg __iomem * 412static struct vxge_hw_toc_reg __iomem *
240__vxge_hw_device_toc_get(void __iomem *bar0) 413__vxge_hw_device_toc_get(void __iomem *bar0)
241{ 414{
242 u64 val64; 415 u64 val64;
@@ -779,7 +952,7 @@ exit:
779 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port 952 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
780 * Get the Statistics on aggregate port 953 * Get the Statistics on aggregate port
781 */ 954 */
782enum vxge_hw_status 955static enum vxge_hw_status
783vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, 956vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
784 struct vxge_hw_xmac_aggr_stats *aggr_stats) 957 struct vxge_hw_xmac_aggr_stats *aggr_stats)
785{ 958{
@@ -814,7 +987,7 @@ exit:
814 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port 987 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
815 * Get the Statistics on port 988 * Get the Statistics on port
816 */ 989 */
817enum vxge_hw_status 990static enum vxge_hw_status
818vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, 991vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
819 struct vxge_hw_xmac_port_stats *port_stats) 992 struct vxge_hw_xmac_port_stats *port_stats)
820{ 993{
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
952 return 0; 1125 return 0;
953#endif 1126#endif
954} 1127}
955/*
956 * vxge_hw_device_debug_mask_get - Get the debug mask
957 * This routine returns the current debug mask set
958 */
959u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
960{
961#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
962 if (hldev == NULL)
963 return 0;
964 return hldev->debug_module_mask;
965#else
966 return 0;
967#endif
968}
969 1128
970/* 1129/*
971 * vxge_hw_getpause_data -Pause frame frame generation and reception. 1130 * vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1090 * first block 1249 * first block
1091 * Returns the dma address of the first RxD block 1250 * Returns the dma address of the first RxD block
1092 */ 1251 */
1093u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) 1252static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1094{ 1253{
1095 struct vxge_hw_mempool_dma *dma_object; 1254 struct vxge_hw_mempool_dma *dma_object;
1096 1255
@@ -1252,7 +1411,7 @@ exit:
1252 * This function creates Ring and initializes it. 1411 * This function creates Ring and initializes it.
1253 * 1412 *
1254 */ 1413 */
1255enum vxge_hw_status 1414static enum vxge_hw_status
1256__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1257 struct vxge_hw_ring_attr *attr) 1416 struct vxge_hw_ring_attr *attr)
1258{ 1417{
@@ -1363,7 +1522,7 @@ exit:
1363 * __vxge_hw_ring_abort - Returns the RxD 1522 * __vxge_hw_ring_abort - Returns the RxD
1364 * This function terminates the RxDs of ring 1523 * This function terminates the RxDs of ring
1365 */ 1524 */
1366enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) 1525static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1367{ 1526{
1368 void *rxdh; 1527 void *rxdh;
1369 struct __vxge_hw_channel *channel; 1528 struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1392 * __vxge_hw_ring_reset - Resets the ring 1551 * __vxge_hw_ring_reset - Resets the ring
1393 * This function resets the ring during vpath reset operation 1552 * This function resets the ring during vpath reset operation
1394 */ 1553 */
1395enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) 1554static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1396{ 1555{
1397 enum vxge_hw_status status = VXGE_HW_OK; 1556 enum vxge_hw_status status = VXGE_HW_OK;
1398 struct __vxge_hw_channel *channel; 1557 struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
1419 * __vxge_hw_ring_delete - Removes the ring 1578 * __vxge_hw_ring_delete - Removes the ring
1420 * This function freeup the memory pool and removes the ring 1579 * This function freeup the memory pool and removes the ring
1421 */ 1580 */
1422enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) 1581static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1423{ 1582{
1424 struct __vxge_hw_ring *ring = vp->vpath->ringh; 1583 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1425 1584
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1438 * __vxge_hw_mempool_grow 1597 * __vxge_hw_mempool_grow
1439 * Will resize mempool up to %num_allocate value. 1598 * Will resize mempool up to %num_allocate value.
1440 */ 1599 */
1441enum vxge_hw_status 1600static enum vxge_hw_status
1442__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, 1601__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1443 u32 *num_allocated) 1602 u32 *num_allocated)
1444{ 1603{
@@ -1527,7 +1686,7 @@ exit:
1527 * with size enough to hold %items_initial number of items. Memory is 1686 * with size enough to hold %items_initial number of items. Memory is
1528 * DMA-able but client must map/unmap before interoperating with the device. 1687 * DMA-able but client must map/unmap before interoperating with the device.
1529 */ 1688 */
1530struct vxge_hw_mempool* 1689static struct vxge_hw_mempool*
1531__vxge_hw_mempool_create( 1690__vxge_hw_mempool_create(
1532 struct __vxge_hw_device *devh, 1691 struct __vxge_hw_device *devh,
1533 u32 memblock_size, 1692 u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
1644/* 1803/*
1645 * vxge_hw_mempool_destroy 1804 * vxge_hw_mempool_destroy
1646 */ 1805 */
1647void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) 1806static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1648{ 1807{
1649 u32 i, j; 1808 u32 i, j;
1650 struct __vxge_hw_device *devh = mempool->devh; 1809 struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1700 * __vxge_hw_device_vpath_config_check - Check vpath configuration. 1859 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1701 * Check the vpath configuration 1860 * Check the vpath configuration
1702 */ 1861 */
1703enum vxge_hw_status 1862static enum vxge_hw_status
1704__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) 1863__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1705{ 1864{
1706 enum vxge_hw_status status; 1865 enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1922 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. 2081 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1923 * Set the swapper bits appropriately for the lagacy section. 2082 * Set the swapper bits appropriately for the lagacy section.
1924 */ 2083 */
1925enum vxge_hw_status 2084static enum vxge_hw_status
1926__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) 2085__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1927{ 2086{
1928 u64 val64; 2087 u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1977 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. 2136 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1978 * Set the swapper bits appropriately for the vpath. 2137 * Set the swapper bits appropriately for the vpath.
1979 */ 2138 */
1980enum vxge_hw_status 2139static enum vxge_hw_status
1981__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) 2140__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1982{ 2141{
1983#ifndef __BIG_ENDIAN 2142#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1996 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. 2155 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
1997 * Set the swapper bits appropriately for the vpath. 2156 * Set the swapper bits appropriately for the vpath.
1998 */ 2157 */
1999enum vxge_hw_status 2158static enum vxge_hw_status
2000__vxge_hw_kdfc_swapper_set( 2159__vxge_hw_kdfc_swapper_set(
2001 struct vxge_hw_legacy_reg __iomem *legacy_reg, 2160 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2002 struct vxge_hw_vpath_reg __iomem *vpath_reg) 2161 struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@ __vxge_hw_kdfc_swapper_set(
2021} 2180}
2022 2181
2023/* 2182/*
2024 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2025 * Get device configuration. Permits to retrieve at run-time configuration
2026 * values that were used to initialize and configure the device.
2027 */
2028enum vxge_hw_status
2029vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2030 struct vxge_hw_device_config *dev_config, int size)
2031{
2032
2033 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2034 return VXGE_HW_ERR_INVALID_DEVICE;
2035
2036 if (size != sizeof(struct vxge_hw_device_config))
2037 return VXGE_HW_ERR_VERSION_CONFLICT;
2038
2039 memcpy(dev_config, &hldev->config,
2040 sizeof(struct vxge_hw_device_config));
2041
2042 return VXGE_HW_OK;
2043}
2044
2045/*
2046 * vxge_hw_mgmt_reg_read - Read Titan register. 2183 * vxge_hw_mgmt_reg_read - Read Titan register.
2047 */ 2184 */
2048enum vxge_hw_status 2185enum vxge_hw_status
@@ -2438,7 +2575,7 @@ exit:
2438 * __vxge_hw_fifo_abort - Returns the TxD 2575 * __vxge_hw_fifo_abort - Returns the TxD
2439 * This function terminates the TxDs of fifo 2576 * This function terminates the TxDs of fifo
2440 */ 2577 */
2441enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) 2578static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2442{ 2579{
2443 void *txdlh; 2580 void *txdlh;
2444 2581
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2466 * __vxge_hw_fifo_reset - Resets the fifo 2603 * __vxge_hw_fifo_reset - Resets the fifo
2467 * This function resets the fifo during vpath reset operation 2604 * This function resets the fifo during vpath reset operation
2468 */ 2605 */
2469enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) 2606static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2470{ 2607{
2471 enum vxge_hw_status status = VXGE_HW_OK; 2608 enum vxge_hw_status status = VXGE_HW_OK;
2472 2609
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2501 * in pci config space. 2638 * in pci config space.
2502 * Read from the vpath pci config space. 2639 * Read from the vpath pci config space.
2503 */ 2640 */
2504enum vxge_hw_status 2641static enum vxge_hw_status
2505__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, 2642__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2506 u32 phy_func_0, u32 offset, u32 *val) 2643 u32 phy_func_0, u32 offset, u32 *val)
2507{ 2644{
@@ -2542,7 +2679,7 @@ exit:
2542 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. 2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2543 * Returns the function number of the vpath. 2680 * Returns the function number of the vpath.
2544 */ 2681 */
2545u32 2682static u32
2546__vxge_hw_vpath_func_id_get(u32 vp_id, 2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2547 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) 2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2548{ 2685{
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2573 * __vxge_hw_vpath_card_info_get - Get the serial numbers, 2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2574 * part number and product description. 2711 * part number and product description.
2575 */ 2712 */
2576enum vxge_hw_status 2713static enum vxge_hw_status
2577__vxge_hw_vpath_card_info_get( 2714__vxge_hw_vpath_card_info_get(
2578 u32 vp_id, 2715 u32 vp_id,
2579 struct vxge_hw_vpath_reg __iomem *vpath_reg, 2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
2695 * __vxge_hw_vpath_fw_ver_get - Get the fw version 2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2696 * Returns FW Version 2833 * Returns FW Version
2697 */ 2834 */
2698enum vxge_hw_status 2835static enum vxge_hw_status
2699__vxge_hw_vpath_fw_ver_get( 2836__vxge_hw_vpath_fw_ver_get(
2700 u32 vp_id, 2837 u32 vp_id,
2701 struct vxge_hw_vpath_reg __iomem *vpath_reg, 2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
2789 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode 2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2790 * Returns pci function mode 2927 * Returns pci function mode
2791 */ 2928 */
2792u64 2929static u64
2793__vxge_hw_vpath_pci_func_mode_get( 2930__vxge_hw_vpath_pci_func_mode_get(
2794 u32 vp_id, 2931 u32 vp_id,
2795 struct vxge_hw_vpath_reg __iomem *vpath_reg) 2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
2995 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath 3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
2996 * from MAC address table. 3133 * from MAC address table.
2997 */ 3134 */
2998enum vxge_hw_status 3135static enum vxge_hw_status
2999__vxge_hw_vpath_addr_get( 3136__vxge_hw_vpath_addr_get(
3000 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3001 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) 3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
3347 * This routine checks the vpath_rst_in_prog register to see if 3484 * This routine checks the vpath_rst_in_prog register to see if
3348 * adapter completed the reset process for the vpath 3485 * adapter completed the reset process for the vpath
3349 */ 3486 */
3350enum vxge_hw_status 3487static enum vxge_hw_status
3351__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) 3488__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3352{ 3489{
3353 enum vxge_hw_status status; 3490 enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3365 * __vxge_hw_vpath_reset 3502 * __vxge_hw_vpath_reset
3366 * This routine resets the vpath on the device 3503 * This routine resets the vpath on the device
3367 */ 3504 */
3368enum vxge_hw_status 3505static enum vxge_hw_status
3369__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) 3506__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3370{ 3507{
3371 u64 val64; 3508 u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3383 * __vxge_hw_vpath_sw_reset 3520 * __vxge_hw_vpath_sw_reset
3384 * This routine resets the vpath structures 3521 * This routine resets the vpath structures
3385 */ 3522 */
3386enum vxge_hw_status 3523static enum vxge_hw_status
3387__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) 3524__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3388{ 3525{
3389 enum vxge_hw_status status = VXGE_HW_OK; 3526 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
3408 * This routine configures the prc registers of virtual path using the config 3545 * This routine configures the prc registers of virtual path using the config
3409 * passed 3546 * passed
3410 */ 3547 */
3411void 3548static void
3412__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3549__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3413{ 3550{
3414 u64 val64; 3551 u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3480 * This routine configures the kdfc registers of virtual path using the 3617 * This routine configures the kdfc registers of virtual path using the
3481 * config passed 3618 * config passed
3482 */ 3619 */
3483enum vxge_hw_status 3620static enum vxge_hw_status
3484__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3621__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3485{ 3622{
3486 u64 val64; 3623 u64 val64;
@@ -3553,7 +3690,7 @@ exit:
3553 * __vxge_hw_vpath_mac_configure 3690 * __vxge_hw_vpath_mac_configure
3554 * This routine configures the mac of virtual path using the config passed 3691 * This routine configures the mac of virtual path using the config passed
3555 */ 3692 */
3556enum vxge_hw_status 3693static enum vxge_hw_status
3557__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3694__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3558{ 3695{
3559 u64 val64; 3696 u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3621 * This routine configures the tim registers of virtual path using the config 3758 * This routine configures the tim registers of virtual path using the config
3622 * passed 3759 * passed
3623 */ 3760 */
3624enum vxge_hw_status 3761static enum vxge_hw_status
3625__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3762__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3626{ 3763{
3627 u64 val64; 3764 u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3897 * This routine is the final phase of init which initializes the 4034 * This routine is the final phase of init which initializes the
3898 * registers of the vpath using the configuration passed. 4035 * registers of the vpath using the configuration passed.
3899 */ 4036 */
3900enum vxge_hw_status 4037static enum vxge_hw_status
3901__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) 4038__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3902{ 4039{
3903 u64 val64; 4040 u64 val64;
@@ -3966,7 +4103,7 @@ exit:
3966 * This routine is the initial phase of init which resets the vpath and 4103 * This routine is the initial phase of init which resets the vpath and
3967 * initializes the software support structures. 4104 * initializes the software support structures.
3968 */ 4105 */
3969enum vxge_hw_status 4106static enum vxge_hw_status
3970__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, 4107__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3971 struct vxge_hw_vp_config *config) 4108 struct vxge_hw_vp_config *config)
3972{ 4109{
@@ -4022,7 +4159,7 @@ exit:
4022 * __vxge_hw_vp_terminate - Terminate Virtual Path structure 4159 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4023 * This routine closes all channels it opened and freeup memory 4160 * This routine closes all channels it opened and freeup memory
4024 */ 4161 */
4025void 4162static void
4026__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) 4163__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4027{ 4164{
4028 struct __vxge_hw_virtualpath *vpath; 4165 struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4384 * Enable the DMA vpath statistics. The function is to be called to re-enable 4521 * Enable the DMA vpath statistics. The function is to be called to re-enable
4385 * the adapter to update stats into the host memory 4522 * the adapter to update stats into the host memory
4386 */ 4523 */
4387enum vxge_hw_status 4524static enum vxge_hw_status
4388vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) 4525vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4389{ 4526{
4390 enum vxge_hw_status status = VXGE_HW_OK; 4527 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
4409 * __vxge_hw_vpath_stats_access - Get the statistics from the given location 4546 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4410 * and offset and perform an operation 4547 * and offset and perform an operation
4411 */ 4548 */
4412enum vxge_hw_status 4549static enum vxge_hw_status
4413__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, 4550__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4414 u32 operation, u32 offset, u64 *stat) 4551 u32 operation, u32 offset, u64 *stat)
4415{ 4552{
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
4445/* 4582/*
4446 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath 4583 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4447 */ 4584 */
4448enum vxge_hw_status 4585static enum vxge_hw_status
4449__vxge_hw_vpath_xmac_tx_stats_get( 4586__vxge_hw_vpath_xmac_tx_stats_get(
4450 struct __vxge_hw_virtualpath *vpath, 4587 struct __vxge_hw_virtualpath *vpath,
4451 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) 4588 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
4478/* 4615/*
4479 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath 4616 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4480 */ 4617 */
4481enum vxge_hw_status 4618static enum vxge_hw_status
4482__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 4619__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4483 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) 4620 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4484{ 4621{
4485 u64 *val64; 4622 u64 *val64;
4486 enum vxge_hw_status status = VXGE_HW_OK; 4623 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
4509/* 4646/*
4510 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. 4647 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4511 */ 4648 */
4512enum vxge_hw_status __vxge_hw_vpath_stats_get( 4649static enum vxge_hw_status
4513 struct __vxge_hw_virtualpath *vpath, 4650__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4514 struct vxge_hw_vpath_stats_hw_info *hw_stats) 4651 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4515{ 4652{
4516 u64 val64; 4653 u64 val64;
4517 enum vxge_hw_status status = VXGE_HW_OK; 4654 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
4643 return status; 4780 return status;
4644} 4781}
4645 4782
4783
4784static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4785 unsigned long size)
4786{
4787 gfp_t flags;
4788 void *vaddr;
4789
4790 if (in_interrupt())
4791 flags = GFP_ATOMIC | GFP_DMA;
4792 else
4793 flags = GFP_KERNEL | GFP_DMA;
4794
4795 vaddr = kmalloc((size), flags);
4796
4797 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4798}
4799
4800static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4801 struct pci_dev **p_dma_acch)
4802{
4803 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4804 u8 *tmp = (u8 *)vaddr;
4805 tmp -= misaligned;
4806 kfree((void *)tmp);
4807}
4808
4646/* 4809/*
4647 * __vxge_hw_blockpool_create - Create block pool 4810 * __vxge_hw_blockpool_create - Create block pool
4648 */ 4811 */
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4845 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async 5008 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4846 * Adds a block to block pool 5009 * Adds a block to block pool
4847 */ 5010 */
4848void vxge_hw_blockpool_block_add( 5011static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
4849 struct __vxge_hw_device *devh, 5012 void *block_addr,
4850 void *block_addr, 5013 u32 length,
4851 u32 length, 5014 struct pci_dev *dma_h,
4852 struct pci_dev *dma_h, 5015 struct pci_dev *acc_handle)
4853 struct pci_dev *acc_handle)
4854{ 5016{
4855 struct __vxge_hw_blockpool *blockpool; 5017 struct __vxge_hw_blockpool *blockpool;
4856 struct __vxge_hw_blockpool_entry *entry = NULL; 5018 struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343023cb..5c00861b6c2c 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
183 char version[VXGE_HW_FW_STRLEN]; 183 char version[VXGE_HW_FW_STRLEN];
184}; 184};
185 185
186u64
187__vxge_hw_vpath_pci_func_mode_get(
188 u32 vp_id,
189 struct vxge_hw_vpath_reg __iomem *vpath_reg);
190
191/** 186/**
192 * struct vxge_hw_fifo_config - Configuration of fifo. 187 * struct vxge_hw_fifo_config - Configuration of fifo.
193 * @enable: Is this fifo to be commissioned 188 * @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
1426 u8 hash_type_ipv6ex_en; 1421 u8 hash_type_ipv6ex_en;
1427}; 1422};
1428 1423
1429u32
1430vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1431
1432void vxge_hw_device_debug_set( 1424void vxge_hw_device_debug_set(
1433 struct __vxge_hw_device *devh, 1425 struct __vxge_hw_device *devh,
1434 enum vxge_debug_level level, 1426 enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1440u32 1432u32
1441vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); 1433vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1442 1434
1443u32
1444vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1445
1446/** 1435/**
1447 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. 1436 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1448 * @buf_mode: Buffer mode (1, 3 or 5) 1437 * @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
1817 struct vxge_hw_fifo_attr fifo_attr; 1806 struct vxge_hw_fifo_attr fifo_attr;
1818}; 1807};
1819 1808
1820enum vxge_hw_status
1821__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1822 struct __vxge_hw_blockpool *blockpool,
1823 u32 pool_size,
1824 u32 pool_max);
1825
1826void
1827__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
1828
1829struct __vxge_hw_blockpool_entry *
1830__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
1831 u32 size);
1832
1833void
1834__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
1835 struct __vxge_hw_blockpool_entry *entry);
1836
1837void *
1838__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
1839 u32 size,
1840 struct vxge_hw_mempool_dma *dma_object);
1841
1842void
1843__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
1844 void *memblock,
1845 u32 size,
1846 struct vxge_hw_mempool_dma *dma_object);
1847
1848enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
1850
1851enum vxge_hw_status
1852__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
1853
1854enum vxge_hw_status
1855vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
1856 struct vxge_hw_device_config *dev_config, int size);
1857
1858enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( 1809enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1859 void __iomem *bar0, 1810 void __iomem *bar0,
1860 struct vxge_hw_device_hw_info *hw_info); 1811 struct vxge_hw_device_hw_info *hw_info);
1861 1812
1862enum vxge_hw_status
1863__vxge_hw_vpath_fw_ver_get(
1864 u32 vp_id,
1865 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1866 struct vxge_hw_device_hw_info *hw_info);
1867
1868enum vxge_hw_status
1869__vxge_hw_vpath_card_info_get(
1870 u32 vp_id,
1871 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1872 struct vxge_hw_device_hw_info *hw_info);
1873
1874enum vxge_hw_status __devinit vxge_hw_device_config_default_get( 1813enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1875 struct vxge_hw_device_config *device_config); 1814 struct vxge_hw_device_config *device_config);
1876 1815
@@ -1954,38 +1893,6 @@ out:
1954 return vaddr; 1893 return vaddr;
1955} 1894}
1956 1895
1957extern void vxge_hw_blockpool_block_add(
1958 struct __vxge_hw_device *devh,
1959 void *block_addr,
1960 u32 length,
1961 struct pci_dev *dma_h,
1962 struct pci_dev *acc_handle);
1963
1964static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
1965 unsigned long size)
1966{
1967 gfp_t flags;
1968 void *vaddr;
1969
1970 if (in_interrupt())
1971 flags = GFP_ATOMIC | GFP_DMA;
1972 else
1973 flags = GFP_KERNEL | GFP_DMA;
1974
1975 vaddr = kmalloc((size), flags);
1976
1977 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1978}
1979
1980static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1981 struct pci_dev **p_dma_acch)
1982{
1983 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1984 u8 *tmp = (u8 *)vaddr;
1985 tmp -= misaligned;
1986 kfree((void *)tmp);
1987}
1988
1989/* 1896/*
1990 * __vxge_hw_mempool_item_priv - will return pointer on per item private space 1897 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1991 */ 1898 */
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
2010 (*memblock_item_idx) * mempool->items_priv_size; 1917 (*memblock_item_idx) * mempool->items_priv_size;
2011} 1918}
2012 1919
2013enum vxge_hw_status
2014__vxge_hw_mempool_grow(
2015 struct vxge_hw_mempool *mempool,
2016 u32 num_allocate,
2017 u32 *num_allocated);
2018
2019struct vxge_hw_mempool*
2020__vxge_hw_mempool_create(
2021 struct __vxge_hw_device *devh,
2022 u32 memblock_size,
2023 u32 item_size,
2024 u32 private_size,
2025 u32 items_initial,
2026 u32 items_max,
2027 struct vxge_hw_mempool_cbs *mp_callback,
2028 void *userdata);
2029
2030struct __vxge_hw_channel*
2031__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2032 enum __vxge_hw_channel_type type, u32 length,
2033 u32 per_dtr_space, void *userdata);
2034
2035void
2036__vxge_hw_channel_free(
2037 struct __vxge_hw_channel *channel);
2038
2039enum vxge_hw_status
2040__vxge_hw_channel_initialize(
2041 struct __vxge_hw_channel *channel);
2042
2043enum vxge_hw_status
2044__vxge_hw_channel_reset(
2045 struct __vxge_hw_channel *channel);
2046
2047/* 1920/*
2048 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated 1921 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
2049 * for the fifo. 1922 * for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
2065 struct vxge_hw_vpath_attr *attr, 1938 struct vxge_hw_vpath_attr *attr,
2066 struct __vxge_hw_vpath_handle **vpath_handle); 1939 struct __vxge_hw_vpath_handle **vpath_handle);
2067 1940
2068enum vxge_hw_status
2069__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
2070
2071enum vxge_hw_status vxge_hw_vpath_close( 1941enum vxge_hw_status vxge_hw_vpath_close(
2072 struct __vxge_hw_vpath_handle *vpath_handle); 1942 struct __vxge_hw_vpath_handle *vpath_handle);
2073 1943
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
2089 struct __vxge_hw_vpath_handle *vpath_handle, 1959 struct __vxge_hw_vpath_handle *vpath_handle,
2090 u32 new_mtu); 1960 u32 new_mtu);
2091 1961
2092enum vxge_hw_status vxge_hw_vpath_stats_enable(
2093 struct __vxge_hw_vpath_handle *vpath_handle);
2094
2095enum vxge_hw_status
2096__vxge_hw_vpath_stats_access(
2097 struct __vxge_hw_virtualpath *vpath,
2098 u32 operation,
2099 u32 offset,
2100 u64 *stat);
2101
2102enum vxge_hw_status
2103__vxge_hw_vpath_xmac_tx_stats_get(
2104 struct __vxge_hw_virtualpath *vpath,
2105 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
2106
2107enum vxge_hw_status
2108__vxge_hw_vpath_xmac_rx_stats_get(
2109 struct __vxge_hw_virtualpath *vpath,
2110 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
2111
2112enum vxge_hw_status
2113__vxge_hw_vpath_stats_get(
2114 struct __vxge_hw_virtualpath *vpath,
2115 struct vxge_hw_vpath_stats_hw_info *hw_stats);
2116
2117void 1962void
2118vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); 1963vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2119 1964
2120enum vxge_hw_status
2121__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
2122
2123void
2124__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
2125
2126enum vxge_hw_status
2127__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
2128
2129enum vxge_hw_status
2130__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
2131
2132enum vxge_hw_status
2133__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2134 struct vxge_hw_vpath_reg __iomem *vpath_reg);
2135
2136enum vxge_hw_status
2137__vxge_hw_device_register_poll(
2138 void __iomem *reg,
2139 u64 mask, u32 max_millis);
2140 1965
2141#ifndef readq 1966#ifndef readq
2142static inline u64 readq(void __iomem *addr) 1967static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2168 writel(val, addr); 1993 writel(val, addr);
2169} 1994}
2170 1995
2171static inline enum vxge_hw_status
2172__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
2173 u64 mask, u32 max_millis)
2174{
2175 enum vxge_hw_status status = VXGE_HW_OK;
2176
2177 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
2178 wmb();
2179 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
2180 wmb();
2181
2182 status = __vxge_hw_device_register_poll(addr, mask, max_millis);
2183 return status;
2184}
2185
2186struct vxge_hw_toc_reg __iomem *
2187__vxge_hw_device_toc_get(void __iomem *bar0);
2188
2189enum vxge_hw_status
2190__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
2191
2192void
2193__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
2194
2195void
2196__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
2197
2198enum vxge_hw_status 1996enum vxge_hw_status
2199vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); 1997vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2200 1998
2201enum vxge_hw_status 1999enum vxge_hw_status
2202__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
2203
2204enum vxge_hw_status
2205__vxge_hw_vpath_pci_read(
2206 struct __vxge_hw_virtualpath *vpath,
2207 u32 phy_func_0,
2208 u32 offset,
2209 u32 *val);
2210
2211enum vxge_hw_status
2212__vxge_hw_vpath_addr_get(
2213 u32 vp_id,
2214 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2215 u8 (macaddr)[ETH_ALEN],
2216 u8 (macaddr_mask)[ETH_ALEN]);
2217
2218u32
2219__vxge_hw_vpath_func_id_get(
2220 u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
2221
2222enum vxge_hw_status
2223__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2224
2225enum vxge_hw_status
2226vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001
2227/** 2002/**
2228 * vxge_debug 2003 * vxge_debug
2229 * @level: level of debug verbosity. 2004 * @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e306fdd..b67746eef923 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1142 .get_ethtool_stats = vxge_get_ethtool_stats,
1143}; 1143};
1144 1144
1145void initialize_ethtool_ops(struct net_device *ndev) 1145void vxge_initialize_ethtool_ops(struct net_device *ndev)
1146{ 1146{
1147 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); 1147 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
1148} 1148}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542ecb68d..813829f3d024 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
82 82
83static struct vxge_drv_config *driver_config; 83static struct vxge_drv_config *driver_config;
84 84
85static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
86 struct macInfo *mac);
87static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
88 struct macInfo *mac);
89static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94
85static inline int is_vxge_card_up(struct vxgedev *vdev) 95static inline int is_vxge_card_up(struct vxgedev *vdev)
86{ 96{
87 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 97 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
138 * This function is called during interrupt context to notify link up state 148 * This function is called during interrupt context to notify link up state
139 * change. 149 * change.
140 */ 150 */
141void 151static void
142vxge_callback_link_up(struct __vxge_hw_device *hldev) 152vxge_callback_link_up(struct __vxge_hw_device *hldev)
143{ 153{
144 struct net_device *dev = hldev->ndev; 154 struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
162 * This function is called during interrupt context to notify link down state 172 * This function is called during interrupt context to notify link down state
163 * change. 173 * change.
164 */ 174 */
165void 175static void
166vxge_callback_link_down(struct __vxge_hw_device *hldev) 176vxge_callback_link_down(struct __vxge_hw_device *hldev)
167{ 177{
168 struct net_device *dev = hldev->ndev; 178 struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
354 * If the interrupt is because of a received frame or if the receive ring 364 * If the interrupt is because of a received frame or if the receive ring
355 * contains fresh as yet un-processed frames, this function is called. 365 * contains fresh as yet un-processed frames, this function is called.
356 */ 366 */
357enum vxge_hw_status 367static enum vxge_hw_status
358vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, 368vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
359 u8 t_code, void *userdata) 369 u8 t_code, void *userdata)
360{ 370{
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
531 * freed and frees all skbs whose data have already DMA'ed into the NICs 541 * freed and frees all skbs whose data have already DMA'ed into the NICs
532 * internal memory. 542 * internal memory.
533 */ 543 */
534enum vxge_hw_status 544static enum vxge_hw_status
535vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, 545vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
536 enum vxge_hw_fifo_tcode t_code, void *userdata, 546 enum vxge_hw_fifo_tcode t_code, void *userdata,
537 struct sk_buff ***skb_ptr, int nr_skb, int *more) 547 struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1246 * 1256 *
1247 * Enables the interrupts for the vpath 1257 * Enables the interrupts for the vpath
1248*/ 1258*/
1249void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1259static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1250{ 1260{
1251 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1261 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1252 int msix_id = 0; 1262 int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1279 * 1289 *
1280 * Disables the interrupts for the vpath 1290 * Disables the interrupts for the vpath
1281*/ 1291*/
1282void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1283{ 1293{
1284 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1285 int msix_id; 1295 int msix_id;
@@ -1553,7 +1563,7 @@ out:
1553 * 1563 *
1554 * driver may reset the chip on events of serr, eccerr, etc 1564 * driver may reset the chip on events of serr, eccerr, etc
1555 */ 1565 */
1556int vxge_reset(struct vxgedev *vdev) 1566static int vxge_reset(struct vxgedev *vdev)
1557{ 1567{
1558 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1568 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1559} 1569}
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1724 return status; 1734 return status;
1725} 1735}
1726 1736
1727int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) 1737static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1728{ 1738{
1729 struct vxge_mac_addrs *new_mac_entry; 1739 struct vxge_mac_addrs *new_mac_entry;
1730 u8 *mac_address = NULL; 1740 u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1757} 1767}
1758 1768
1759/* Add a mac address to DA table */ 1769/* Add a mac address to DA table */
1760enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1770static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1771 struct macInfo *mac)
1761{ 1772{
1762 enum vxge_hw_status status = VXGE_HW_OK; 1773 enum vxge_hw_status status = VXGE_HW_OK;
1763 struct vxge_vpath *vpath; 1774 struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1782 return status; 1793 return status;
1783} 1794}
1784 1795
1785int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) 1796static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1786{ 1797{
1787 struct list_head *entry, *next; 1798 struct list_head *entry, *next;
1788 u64 del_mac = 0; 1799 u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1807 return FALSE; 1818 return FALSE;
1808} 1819}
1809/* delete a mac address from DA table */ 1820/* delete a mac address from DA table */
1810enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1821static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1822 struct macInfo *mac)
1811{ 1823{
1812 enum vxge_hw_status status = VXGE_HW_OK; 1824 enum vxge_hw_status status = VXGE_HW_OK;
1813 struct vxge_vpath *vpath; 1825 struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1854} 1866}
1855 1867
1856/* Store all vlan ids from the list to the vid table */ 1868/* Store all vlan ids from the list to the vid table */
1857enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) 1869static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1858{ 1870{
1859 enum vxge_hw_status status = VXGE_HW_OK; 1871 enum vxge_hw_status status = VXGE_HW_OK;
1860 struct vxgedev *vdev = vpath->vdev; 1872 struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1874} 1886}
1875 1887
1876/* Store all mac addresses from the list to the DA table */ 1888/* Store all mac addresses from the list to the DA table */
1877enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) 1889static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1878{ 1890{
1879 enum vxge_hw_status status = VXGE_HW_OK; 1891 enum vxge_hw_status status = VXGE_HW_OK;
1880 struct macInfo mac_info; 1892 struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1916} 1928}
1917 1929
1918/* reset vpaths */ 1930/* reset vpaths */
1919enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1920{ 1932{
1921 enum vxge_hw_status status = VXGE_HW_OK; 1933 enum vxge_hw_status status = VXGE_HW_OK;
1922 struct vxge_vpath *vpath; 1934 struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1948} 1960}
1949 1961
1950/* close vpaths */ 1962/* close vpaths */
1951void vxge_close_vpaths(struct vxgedev *vdev, int index) 1963static void vxge_close_vpaths(struct vxgedev *vdev, int index)
1952{ 1964{
1953 struct vxge_vpath *vpath; 1965 struct vxge_vpath *vpath;
1954 int i; 1966 int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
1966} 1978}
1967 1979
1968/* open vpaths */ 1980/* open vpaths */
1969int vxge_open_vpaths(struct vxgedev *vdev) 1981static int vxge_open_vpaths(struct vxgedev *vdev)
1970{ 1982{
1971 struct vxge_hw_vpath_attr attr; 1983 struct vxge_hw_vpath_attr attr;
1972 enum vxge_hw_status status; 1984 enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2517 * Return value: '0' on success and an appropriate (-)ve integer as 2529 * Return value: '0' on success and an appropriate (-)ve integer as
2518 * defined in errno.h file on failure. 2530 * defined in errno.h file on failure.
2519 */ 2531 */
2520int 2532static int
2521vxge_open(struct net_device *dev) 2533vxge_open(struct net_device *dev)
2522{ 2534{
2523 enum vxge_hw_status status; 2535 enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
2721} 2733}
2722 2734
2723/* Loop throught the mac address list and delete all the entries */ 2735/* Loop throught the mac address list and delete all the entries */
2724void vxge_free_mac_add_list(struct vxge_vpath *vpath) 2736static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2725{ 2737{
2726 2738
2727 struct list_head *entry, *next; 2739 struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
2745 } 2757 }
2746} 2758}
2747 2759
2748int do_vxge_close(struct net_device *dev, int do_io) 2760static int do_vxge_close(struct net_device *dev, int do_io)
2749{ 2761{
2750 enum vxge_hw_status status; 2762 enum vxge_hw_status status;
2751 struct vxgedev *vdev; 2763 struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
2856 * Return value: '0' on success and an appropriate (-)ve integer as 2868 * Return value: '0' on success and an appropriate (-)ve integer as
2857 * defined in errno.h file on failure. 2869 * defined in errno.h file on failure.
2858 */ 2870 */
2859int 2871static int
2860vxge_close(struct net_device *dev) 2872vxge_close(struct net_device *dev)
2861{ 2873{
2862 do_vxge_close(dev, 1); 2874 do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
3113#endif 3125#endif
3114}; 3126};
3115 3127
3116int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3117 struct vxge_config *config, 3129 struct vxge_config *config,
3118 int high_dma, int no_of_vpath, 3130 int high_dma, int no_of_vpath,
3119 struct vxgedev **vdev_out) 3131 struct vxgedev **vdev_out)
3120{ 3132{
3121 struct net_device *ndev; 3133 struct net_device *ndev;
3122 enum vxge_hw_status status = VXGE_HW_OK; 3134 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3164 3176
3165 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3177 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3166 3178
3167 initialize_ethtool_ops(ndev); 3179 vxge_initialize_ethtool_ops(ndev);
3168 3180
3169 /* Allocate memory for vpath */ 3181 /* Allocate memory for vpath */
3170 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
3249 * 3261 *
3250 * This function will unregister and free network device 3262 * This function will unregister and free network device
3251 */ 3263 */
3252void 3264static void
3253vxge_device_unregister(struct __vxge_hw_device *hldev) 3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3254{ 3266{
3255 struct vxgedev *vdev; 3267 struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07eaacd7..de64536cb7d0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
396 mod_timer(&timer, (jiffies + exp)); \ 396 mod_timer(&timer, (jiffies + exp)); \
397 } while (0); 397 } while (0);
398 398
399int __devinit vxge_device_register(struct __vxge_hw_device *devh, 399extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
400 struct vxge_config *config,
401 int high_dma, int no_of_vpath,
402 struct vxgedev **vdev);
403
404void vxge_device_unregister(struct __vxge_hw_device *devh);
405
406void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
407
408void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
409
410void vxge_callback_link_up(struct __vxge_hw_device *devh);
411
412void vxge_callback_link_down(struct __vxge_hw_device *devh);
413
414enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
415 struct macInfo *mac);
416
417int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
418
419int vxge_reset(struct vxgedev *vdev);
420
421enum vxge_hw_status
422vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
423 u8 t_code, void *userdata);
424
425enum vxge_hw_status
426vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
427 enum vxge_hw_fifo_tcode t_code, void *userdata,
428 struct sk_buff ***skb_ptr, int nr_skbs, int *more);
429
430int vxge_close(struct net_device *dev);
431
432int vxge_open(struct net_device *dev);
433
434void vxge_close_vpaths(struct vxgedev *vdev, int index);
435
436int vxge_open_vpaths(struct vxgedev *vdev);
437
438enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
439
440enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
441 struct macInfo *mac);
442
443enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
444 struct macInfo *mac);
445
446int vxge_mac_list_add(struct vxge_vpath *vpath,
447 struct macInfo *mac);
448
449void vxge_free_mac_add_list(struct vxge_vpath *vpath);
450
451enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
452
453enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
454
455int do_vxge_close(struct net_device *dev, int do_io);
456extern void initialize_ethtool_ops(struct net_device *ndev);
457/** 400/**
458 * #define VXGE_DEBUG_INIT: debug for initialization functions 401 * #define VXGE_DEBUG_INIT: debug for initialization functions
459 * #define VXGE_DEBUG_TX : debug transmit related functions 402 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f99cb3..4bdb611a6842 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
17#include "vxge-config.h" 17#include "vxge-config.h"
18#include "vxge-main.h" 18#include "vxge-main.h"
19 19
20static enum vxge_hw_status
21__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23static enum vxge_hw_status
24__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
25 u32 skip_alarms);
26
20/* 27/*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle. 29 * @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
513 * Link up indication handler. The function is invoked by HW when 520 * Link up indication handler. The function is invoked by HW when
514 * Titan indicates that the link is up for programmable amount of time. 521 * Titan indicates that the link is up for programmable amount of time.
515 */ 522 */
516enum vxge_hw_status 523static enum vxge_hw_status
517__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) 524__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
518{ 525{
519 /* 526 /*
@@ -538,7 +545,7 @@ exit:
538 * Link down indication handler. The function is invoked by HW when 545 * Link down indication handler. The function is invoked by HW when
539 * Titan indicates that the link is down. 546 * Titan indicates that the link is down.
540 */ 547 */
541enum vxge_hw_status 548static enum vxge_hw_status
542__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) 549__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
543{ 550{
544 /* 551 /*
@@ -564,7 +571,7 @@ exit:
564 * 571 *
565 * Handle error. 572 * Handle error.
566 */ 573 */
567enum vxge_hw_status 574static enum vxge_hw_status
568__vxge_hw_device_handle_error( 575__vxge_hw_device_handle_error(
569 struct __vxge_hw_device *hldev, 576 struct __vxge_hw_device *hldev,
570 u32 vp_id, 577 u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
646 * it swaps the reserve and free arrays. 653 * it swaps the reserve and free arrays.
647 * 654 *
648 */ 655 */
649enum vxge_hw_status 656static enum vxge_hw_status
650vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) 657vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
651{ 658{
652 void **tmp_arr; 659 void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
692 * Posts a dtr to work array. 699 * Posts a dtr to work array.
693 * 700 *
694 */ 701 */
695void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) 702static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
703 void *dtrh)
696{ 704{
697 vxge_assert(channel->work_arr[channel->post_index] == NULL); 705 vxge_assert(channel->work_arr[channel->post_index] == NULL);
698 706
@@ -1658,37 +1666,6 @@ exit:
1658} 1666}
1659 1667
1660/** 1668/**
1661 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1662 * from vlan id table.
1663 * @vp: Vpath handle.
1664 * @vid: Buffer to return vlan id
1665 *
1666 * Returns the next vlan id in the list for this vpath.
1667 * see also: vxge_hw_vpath_vid_get
1668 *
1669 */
1670enum vxge_hw_status
1671vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1672{
1673 u64 data;
1674 enum vxge_hw_status status = VXGE_HW_OK;
1675
1676 if (vp == NULL) {
1677 status = VXGE_HW_ERR_INVALID_HANDLE;
1678 goto exit;
1679 }
1680
1681 status = __vxge_hw_vpath_rts_table_get(vp,
1682 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1683 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1684 0, vid, &data);
1685
1686 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1687exit:
1688 return status;
1689}
1690
1691/**
1692 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath 1669 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1693 * to vlan id table. 1670 * to vlan id table.
1694 * @vp: Vpath handle. 1671 * @vp: Vpath handle.
@@ -1898,9 +1875,9 @@ exit:
1898 * Process vpath alarms. 1875 * Process vpath alarms.
1899 * 1876 *
1900 */ 1877 */
1901enum vxge_hw_status __vxge_hw_vpath_alarm_process( 1878static enum vxge_hw_status
1902 struct __vxge_hw_virtualpath *vpath, 1879__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1903 u32 skip_alarms) 1880 u32 skip_alarms)
1904{ 1881{
1905 u64 val64; 1882 u64 val64;
1906 u64 alarm_status; 1883 u64 alarm_status;
@@ -2265,36 +2242,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2265} 2242}
2266 2243
2267/** 2244/**
2268 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269 * @vp: Virtual Path handle.
2270 * @msix_id: MSI ID
2271 *
2272 * The function clears the msix interrupt for the given msix_id
2273 *
2274 * Returns: 0,
2275 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2276 * status.
2277 * See also:
2278 */
2279void
2280vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2281{
2282 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if (hldev->config.intr_mode ==
2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2287 &hldev->common_reg->
2288 clr_msix_one_shot_vec[msix_id%4]);
2289 } else {
2290 __vxge_hw_pio_mem_write32_upper(
2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2292 &hldev->common_reg->
2293 clear_msix_mask_vect[msix_id%4]);
2294 }
2295}
2296
2297/**
2298 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2245 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2299 * @vp: Virtual Path handle. 2246 * @vp: Virtual Path handle.
2300 * @msix_id: MSI ID 2247 * @msix_id: MSI ID
@@ -2316,22 +2263,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2316} 2263}
2317 2264
2318/** 2265/**
2319 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2320 * @vp: Virtual Path handle.
2321 *
2322 * The function masks all msix interrupt for the given vpath
2323 *
2324 */
2325void
2326vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2327{
2328
2329 __vxge_hw_pio_mem_write32_upper(
2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2332}
2333
2334/**
2335 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. 2266 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2336 * @vp: Virtual Path handle. 2267 * @vp: Virtual Path handle.
2337 * 2268 *
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d13798e..9890d4d596d0 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access(
1749 u64 *stat); 1749 u64 *stat);
1750 1750
1751enum vxge_hw_status 1751enum vxge_hw_status
1752vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
1753 struct vxge_hw_xmac_aggr_stats *aggr_stats);
1754
1755enum vxge_hw_status
1756vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
1757 struct vxge_hw_xmac_port_stats *port_stats);
1758
1759enum vxge_hw_status
1760vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, 1752vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1761 struct vxge_hw_xmac_stats *xmac_stats); 1753 struct vxge_hw_xmac_stats *xmac_stats);
1762 1754
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
2117#endif 2109#endif
2118}; 2110};
2119 2111
2120/* ========================= RING PRIVATE API ============================= */
2121u64
2122__vxge_hw_ring_first_block_address_get(
2123 struct __vxge_hw_ring *ringh);
2124
2125enum vxge_hw_status
2126__vxge_hw_ring_create(
2127 struct __vxge_hw_vpath_handle *vpath_handle,
2128 struct vxge_hw_ring_attr *attr);
2129
2130enum vxge_hw_status
2131__vxge_hw_ring_abort(
2132 struct __vxge_hw_ring *ringh);
2133
2134enum vxge_hw_status
2135__vxge_hw_ring_reset(
2136 struct __vxge_hw_ring *ringh);
2137
2138enum vxge_hw_status
2139__vxge_hw_ring_delete(
2140 struct __vxge_hw_vpath_handle *vpath_handle);
2141
2142/* ========================= FIFO PRIVATE API ============================= */ 2112/* ========================= FIFO PRIVATE API ============================= */
2143 2113
2144struct vxge_hw_fifo_attr; 2114struct vxge_hw_fifo_attr;
2145 2115
2146enum vxge_hw_status
2147__vxge_hw_fifo_create(
2148 struct __vxge_hw_vpath_handle *vpath_handle,
2149 struct vxge_hw_fifo_attr *attr);
2150
2151enum vxge_hw_status
2152__vxge_hw_fifo_abort(
2153 struct __vxge_hw_fifo *fifoh);
2154
2155enum vxge_hw_status
2156__vxge_hw_fifo_reset(
2157 struct __vxge_hw_fifo *ringh);
2158
2159enum vxge_hw_status
2160__vxge_hw_fifo_delete(
2161 struct __vxge_hw_vpath_handle *vpath_handle);
2162
2163struct vxge_hw_mempool_cbs { 2116struct vxge_hw_mempool_cbs {
2164 void (*item_func_alloc)( 2117 void (*item_func_alloc)(
2165 struct vxge_hw_mempool *mempoolh, 2118 struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
2169 u32 is_last); 2122 u32 is_last);
2170}; 2123};
2171 2124
2172void
2173__vxge_hw_mempool_destroy(
2174 struct vxge_hw_mempool *mempool);
2175
2176#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ 2125#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2177 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) 2126 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2178 2127
@@ -2195,61 +2144,10 @@ __vxge_hw_vpath_rts_table_set(
2195 u64 data2); 2144 u64 data2);
2196 2145
2197enum vxge_hw_status 2146enum vxge_hw_status
2198__vxge_hw_vpath_reset(
2199 struct __vxge_hw_device *devh,
2200 u32 vp_id);
2201
2202enum vxge_hw_status
2203__vxge_hw_vpath_sw_reset(
2204 struct __vxge_hw_device *devh,
2205 u32 vp_id);
2206
2207enum vxge_hw_status
2208__vxge_hw_vpath_enable( 2147__vxge_hw_vpath_enable(
2209 struct __vxge_hw_device *devh, 2148 struct __vxge_hw_device *devh,
2210 u32 vp_id); 2149 u32 vp_id);
2211 2150
2212void
2213__vxge_hw_vpath_prc_configure(
2214 struct __vxge_hw_device *devh,
2215 u32 vp_id);
2216
2217enum vxge_hw_status
2218__vxge_hw_vpath_kdfc_configure(
2219 struct __vxge_hw_device *devh,
2220 u32 vp_id);
2221
2222enum vxge_hw_status
2223__vxge_hw_vpath_mac_configure(
2224 struct __vxge_hw_device *devh,
2225 u32 vp_id);
2226
2227enum vxge_hw_status
2228__vxge_hw_vpath_tim_configure(
2229 struct __vxge_hw_device *devh,
2230 u32 vp_id);
2231
2232enum vxge_hw_status
2233__vxge_hw_vpath_initialize(
2234 struct __vxge_hw_device *devh,
2235 u32 vp_id);
2236
2237enum vxge_hw_status
2238__vxge_hw_vp_initialize(
2239 struct __vxge_hw_device *devh,
2240 u32 vp_id,
2241 struct vxge_hw_vp_config *config);
2242
2243void
2244__vxge_hw_vp_terminate(
2245 struct __vxge_hw_device *devh,
2246 u32 vp_id);
2247
2248enum vxge_hw_status
2249__vxge_hw_vpath_alarm_process(
2250 struct __vxge_hw_virtualpath *vpath,
2251 u32 skip_alarms);
2252
2253void vxge_hw_device_intr_enable( 2151void vxge_hw_device_intr_enable(
2254 struct __vxge_hw_device *devh); 2152 struct __vxge_hw_device *devh);
2255 2153
@@ -2321,11 +2219,6 @@ vxge_hw_vpath_vid_get(
2321 u64 *vid); 2219 u64 *vid);
2322 2220
2323enum vxge_hw_status 2221enum vxge_hw_status
2324vxge_hw_vpath_vid_get_next(
2325 struct __vxge_hw_vpath_handle *vpath_handle,
2326 u64 *vid);
2327
2328enum vxge_hw_status
2329vxge_hw_vpath_vid_delete( 2222vxge_hw_vpath_vid_delete(
2330 struct __vxge_hw_vpath_handle *vpath_handle, 2223 struct __vxge_hw_vpath_handle *vpath_handle,
2331 u64 vid); 2224 u64 vid);
@@ -2387,16 +2280,9 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2387void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2280void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2388 2281
2389void 2282void
2390vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
2391 int msix_id);
2392
2393void
2394vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, 2283vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2395 int msix_id); 2284 int msix_id);
2396 2285
2397void
2398vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
2399
2400enum vxge_hw_status vxge_hw_vpath_intr_enable( 2286enum vxge_hw_status vxge_hw_vpath_intr_enable(
2401 struct __vxge_hw_vpath_handle *vpath_handle); 2287 struct __vxge_hw_vpath_handle *vpath_handle);
2402 2288
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2415void 2301void
2416vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2302vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2417 2303
2418enum vxge_hw_status
2419vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
2420
2421void
2422vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
2423
2424void 2304void
2425vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2305vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2426 void **dtrh); 2306 void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2436void 2316void
2437vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2317vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2438 2318
2439/* ========================== PRIVATE API ================================= */
2440
2441enum vxge_hw_status
2442__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
2443
2444enum vxge_hw_status
2445__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
2446
2447enum vxge_hw_status
2448__vxge_hw_device_handle_error(
2449 struct __vxge_hw_device *hldev,
2450 u32 vp_id,
2451 enum vxge_hw_event type);
2452
2453#endif 2319#endif
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f1ae75d35d5d..8251946842e6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
3580 common->ah = sc->ah; 3580 common->ah = sc->ah;
3581 common->hw = hw; 3581 common->hw = hw;
3582 common->cachelsz = csz << 2; /* convert to bytes */ 3582 common->cachelsz = csz << 2; /* convert to bytes */
3583 spin_lock_init(&common->cc_lock);
3583 3584
3584 /* Initialize device */ 3585 /* Initialize device */
3585 ret = ath5k_hw_attach(sc); 3586 ret = ath5k_hw_attach(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index ec98ab50748a..a14a5e43cf56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
34 34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { 35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
38 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
39 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
40 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
37 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 41 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
38 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 42 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
39 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 43 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
99 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 103 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
100 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 104 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
101 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 105 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
106 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
107 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
108 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
109 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
110 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
111 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
112 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
113 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
114 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
115 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
116 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
117 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
118 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
119 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
120 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
121 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
122 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
123 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
124 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
125 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
126 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
127 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
128 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
129 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
102 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 130 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
103 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, 131 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
104 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 132 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
118 {0x00008014, 0x044c044c, 0x08980898}, 146 {0x00008014, 0x044c044c, 0x08980898},
119 {0x0000801c, 0x148ec02b, 0x148ec057}, 147 {0x0000801c, 0x148ec02b, 0x148ec057},
120 {0x00008318, 0x000044c0, 0x00008980}, 148 {0x00008318, 0x000044c0, 0x00008980},
121 {0x00009e00, 0x03721821, 0x03721821}, 149 {0x00009e00, 0x0372131c, 0x0372131c},
122 {0x0000a230, 0x0000000b, 0x00000016}, 150 {0x0000a230, 0x0000000b, 0x00000016},
123 {0x0000a254, 0x00000898, 0x00001130}, 151 {0x0000a254, 0x00000898, 0x00001130},
124}; 152};
@@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
595 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 623 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
596 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 624 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
597 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 625 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
598 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, 626 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
599 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 627 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
600 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 628 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
601 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, 629 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
602 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 630 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
603 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 631 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
604 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 632 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
605 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 633 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
606 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 634 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
635 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
607 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 636 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
608 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 637 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
609 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 638 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
624 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 653 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
625 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 654 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
626 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982}, 655 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
627 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 656 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
628 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 657 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
629 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 658 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
630 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 659 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
631 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 660 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
632 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 661 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
633 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 662 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
634 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 663 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
635 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 664 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
636 {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 665 {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
637 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 666 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
638 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 667 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
639 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 668 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
649 {0x00009814, 0x9280c00a}, 678 {0x00009814, 0x9280c00a},
650 {0x00009818, 0x00000000}, 679 {0x00009818, 0x00000000},
651 {0x0000981c, 0x00020028}, 680 {0x0000981c, 0x00020028},
652 {0x00009834, 0x5f3ca3de}, 681 {0x00009834, 0x6400a290},
653 {0x00009838, 0x0108ecff}, 682 {0x00009838, 0x0108ecff},
654 {0x0000983c, 0x14750600}, 683 {0x0000983c, 0x14750600},
655 {0x00009880, 0x201fff00}, 684 {0x00009880, 0x201fff00},
656 {0x00009884, 0x00001042}, 685 {0x00009884, 0x00001042},
657 {0x000098a4, 0x00200400}, 686 {0x000098a4, 0x00200400},
658 {0x000098b0, 0x52440bbe}, 687 {0x000098b0, 0x32840bbe},
659 {0x000098d0, 0x004b6a8e}, 688 {0x000098d0, 0x004b6a8e},
660 {0x000098d4, 0x00000820}, 689 {0x000098d4, 0x00000820},
661 {0x000098dc, 0x00000000}, 690 {0x000098dc, 0x00000000},
@@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
681 {0x00009e30, 0x06336f77}, 710 {0x00009e30, 0x06336f77},
682 {0x00009e34, 0x6af6532f}, 711 {0x00009e34, 0x6af6532f},
683 {0x00009e38, 0x0cc80c00}, 712 {0x00009e38, 0x0cc80c00},
684 {0x00009e3c, 0xcf946222},
685 {0x00009e40, 0x0d261820}, 713 {0x00009e40, 0x0d261820},
686 {0x00009e4c, 0x00001004}, 714 {0x00009e4c, 0x00001004},
687 {0x00009e50, 0x00ff03f1}, 715 {0x00009e50, 0x00ff03f1},
@@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
694 {0x0000a220, 0x00000000}, 722 {0x0000a220, 0x00000000},
695 {0x0000a224, 0x00000000}, 723 {0x0000a224, 0x00000000},
696 {0x0000a228, 0x10002310}, 724 {0x0000a228, 0x10002310},
697 {0x0000a22c, 0x01036a1e}, 725 {0x0000a22c, 0x01036a27},
698 {0x0000a23c, 0x00000000}, 726 {0x0000a23c, 0x00000000},
699 {0x0000a244, 0x0c000000}, 727 {0x0000a244, 0x0c000000},
700 {0x0000a2a0, 0x00000001}, 728 {0x0000a2a0, 0x00000001},
@@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
702 {0x0000a2c8, 0x00000000}, 730 {0x0000a2c8, 0x00000000},
703 {0x0000a2cc, 0x18c43433}, 731 {0x0000a2cc, 0x18c43433},
704 {0x0000a2d4, 0x00000000}, 732 {0x0000a2d4, 0x00000000},
705 {0x0000a2dc, 0x00000000},
706 {0x0000a2e0, 0x00000000},
707 {0x0000a2e4, 0x00000000},
708 {0x0000a2e8, 0x00000000},
709 {0x0000a2ec, 0x00000000}, 733 {0x0000a2ec, 0x00000000},
710 {0x0000a2f0, 0x00000000}, 734 {0x0000a2f0, 0x00000000},
711 {0x0000a2f4, 0x00000000}, 735 {0x0000a2f4, 0x00000000},
@@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
753 {0x0000a430, 0x1ce739ce}, 777 {0x0000a430, 0x1ce739ce},
754 {0x0000a434, 0x00000000}, 778 {0x0000a434, 0x00000000},
755 {0x0000a438, 0x00001801}, 779 {0x0000a438, 0x00001801},
756 {0x0000a43c, 0x00000000}, 780 {0x0000a43c, 0x00100000},
757 {0x0000a440, 0x00000000}, 781 {0x0000a440, 0x00000000},
758 {0x0000a444, 0x00000000}, 782 {0x0000a444, 0x00000000},
759 {0x0000a448, 0x06000080}, 783 {0x0000a448, 0x06000080},
760 {0x0000a44c, 0x00000001}, 784 {0x0000a44c, 0x00000001},
761 {0x0000a450, 0x00010000}, 785 {0x0000a450, 0x00010000},
762 {0x0000a458, 0x00000000}, 786 {0x0000a458, 0x00000000},
763 {0x0000a600, 0x00000000},
764 {0x0000a604, 0x00000000},
765 {0x0000a608, 0x00000000},
766 {0x0000a60c, 0x00000000},
767 {0x0000a610, 0x00000000},
768 {0x0000a614, 0x00000000},
769 {0x0000a618, 0x00000000},
770 {0x0000a61c, 0x00000000},
771 {0x0000a620, 0x00000000},
772 {0x0000a624, 0x00000000},
773 {0x0000a628, 0x00000000},
774 {0x0000a62c, 0x00000000},
775 {0x0000a630, 0x00000000},
776 {0x0000a634, 0x00000000},
777 {0x0000a638, 0x00000000},
778 {0x0000a63c, 0x00000000},
779 {0x0000a640, 0x00000000}, 787 {0x0000a640, 0x00000000},
780 {0x0000a644, 0x3fad9d74}, 788 {0x0000a644, 0x3fad9d74},
781 {0x0000a648, 0x0048060a}, 789 {0x0000a648, 0x0048060a},
782 {0x0000a64c, 0x00000637}, 790 {0x0000a64c, 0x00003c37},
783 {0x0000a670, 0x03020100}, 791 {0x0000a670, 0x03020100},
784 {0x0000a674, 0x09080504}, 792 {0x0000a674, 0x09080504},
785 {0x0000a678, 0x0d0c0b0a}, 793 {0x0000a678, 0x0d0c0b0a},
@@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
802 {0x0000a8f4, 0x00000000}, 810 {0x0000a8f4, 0x00000000},
803 {0x0000b2d0, 0x00000080}, 811 {0x0000b2d0, 0x00000080},
804 {0x0000b2d4, 0x00000000}, 812 {0x0000b2d4, 0x00000000},
805 {0x0000b2dc, 0x00000000},
806 {0x0000b2e0, 0x00000000},
807 {0x0000b2e4, 0x00000000},
808 {0x0000b2e8, 0x00000000},
809 {0x0000b2ec, 0x00000000}, 813 {0x0000b2ec, 0x00000000},
810 {0x0000b2f0, 0x00000000}, 814 {0x0000b2f0, 0x00000000},
811 {0x0000b2f4, 0x00000000}, 815 {0x0000b2f4, 0x00000000},
@@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
820 {0x0000b8f4, 0x00000000}, 824 {0x0000b8f4, 0x00000000},
821 {0x0000c2d0, 0x00000080}, 825 {0x0000c2d0, 0x00000080},
822 {0x0000c2d4, 0x00000000}, 826 {0x0000c2d4, 0x00000000},
823 {0x0000c2dc, 0x00000000},
824 {0x0000c2e0, 0x00000000},
825 {0x0000c2e4, 0x00000000},
826 {0x0000c2e8, 0x00000000},
827 {0x0000c2ec, 0x00000000}, 827 {0x0000c2ec, 0x00000000},
828 {0x0000c2f0, 0x00000000}, 828 {0x0000c2f0, 0x00000000},
829 {0x0000c2f4, 0x00000000}, 829 {0x0000c2f4, 0x00000000},
@@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
835 835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { 836static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
839 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
840 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
841 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
838 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 842 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 843 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 844 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
855 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 859 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
856 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 860 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 861 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 862 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 863 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 864 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 865 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
900 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 904 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 905 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 906 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
907 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
908 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
909 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
910 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
911 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
912 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
913 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
914 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
915 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
916 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
917 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
918 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
919 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
920 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
921 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
922 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
923 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
924 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
925 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
926 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
927 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
928 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
929 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
930 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, 931 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, 932 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, 933 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
913 941
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = { 942static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 943 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
944 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
945 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
946 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
947 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
916 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 948 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 949 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 950 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
933 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 965 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
934 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 966 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 967 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 968 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 969 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 970 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 971 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
978 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 1010 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 1011 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 1012 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1013 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1014 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1015 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1016 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1017 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1018 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1019 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
1020 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
1021 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
1022 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
1023 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
1024 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1025 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1026 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1027 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1028 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1029 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
1030 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
1031 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
1032 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1033 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
1034 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
1035 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
1036 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, 1037 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, 1038 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 1039 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
1151 {0x0000b074, 0x00000000}, 1207 {0x0000b074, 0x00000000},
1152 {0x0000b078, 0x00000000}, 1208 {0x0000b078, 0x00000000},
1153 {0x0000b07c, 0x00000000}, 1209 {0x0000b07c, 0x00000000},
1154 {0x0000b080, 0x32323232}, 1210 {0x0000b080, 0x2a2d2f32},
1155 {0x0000b084, 0x2f2f3232}, 1211 {0x0000b084, 0x21232328},
1156 {0x0000b088, 0x23282a2d}, 1212 {0x0000b088, 0x19191c1e},
1157 {0x0000b08c, 0x1c1e2123}, 1213 {0x0000b08c, 0x12141417},
1158 {0x0000b090, 0x14171919}, 1214 {0x0000b090, 0x07070e0e},
1159 {0x0000b094, 0x0e0e1214}, 1215 {0x0000b094, 0x03030305},
1160 {0x0000b098, 0x03050707}, 1216 {0x0000b098, 0x00000003},
1161 {0x0000b09c, 0x00030303}, 1217 {0x0000b09c, 0x00000000},
1162 {0x0000b0a0, 0x00000000}, 1218 {0x0000b0a0, 0x00000000},
1163 {0x0000b0a4, 0x00000000}, 1219 {0x0000b0a4, 0x00000000},
1164 {0x0000b0a8, 0x00000000}, 1220 {0x0000b0a8, 0x00000000},
@@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
1251 1307
1252static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = { 1308static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 1309 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1310 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
1311 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
1312 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
1313 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1254 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1314 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1255 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1315 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1256 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 1316 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1316 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1376 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1317 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1377 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1318 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1378 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
1379 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1380 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1381 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1382 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1383 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1384 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1385 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1386 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1387 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1388 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1389 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1390 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1391 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1392 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1393 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1394 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1395 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
1396 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
1397 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
1398 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1399 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
1400 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
1401 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
1402 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1319 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 1403 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1320 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, 1404 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
1321 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 1405 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
1414 {0x00008144, 0xffffffff}, 1498 {0x00008144, 0xffffffff},
1415 {0x00008168, 0x00000000}, 1499 {0x00008168, 0x00000000},
1416 {0x0000816c, 0x00000000}, 1500 {0x0000816c, 0x00000000},
1417 {0x00008170, 0x18486200},
1418 {0x00008174, 0x33332210},
1419 {0x00008178, 0x00000000},
1420 {0x0000817c, 0x00020000},
1421 {0x000081c0, 0x00000000}, 1501 {0x000081c0, 0x00000000},
1422 {0x000081c4, 0x33332210}, 1502 {0x000081c4, 0x33332210},
1423 {0x000081c8, 0x00000000}, 1503 {0x000081c8, 0x00000000},
1424 {0x000081cc, 0x00000000}, 1504 {0x000081cc, 0x00000000},
1425 {0x000081d4, 0x00000000},
1426 {0x000081ec, 0x00000000}, 1505 {0x000081ec, 0x00000000},
1427 {0x000081f0, 0x00000000}, 1506 {0x000081f0, 0x00000000},
1428 {0x000081f4, 0x00000000}, 1507 {0x000081f4, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 7c38229ba670..716db414c258 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
347 (((Y[6] - Y[3]) * 1 << scale_factor) + 347 (((Y[6] - Y[3]) * 1 << scale_factor) +
348 (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]); 348 (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
349 349
350 /* prevent division by zero */
351 if (G_fxp == 0)
352 return false;
353
350 Y_intercept = 354 Y_intercept =
351 (G_fxp * (x_est[0] - x_est[3]) + 355 (G_fxp * (x_est[0] - x_est[3]) +
352 (1 << scale_factor)) / (1 << scale_factor) + Y[3]; 356 (1 << scale_factor)) / (1 << scale_factor) + Y[3];
@@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
356 360
357 for (i = 0; i <= 3; i++) { 361 for (i = 0; i <= 3; i++) {
358 y_est[i] = i * 32; 362 y_est[i] = i * 32;
359
360 /* prevent division by zero */
361 if (G_fxp == 0)
362 return false;
363
364 x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp; 363 x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
365 } 364 }
366 365
366 if (y_est[max_index] == 0)
367 return false;
368
367 x_est_fxp1_nonlin = 369 x_est_fxp1_nonlin =
368 x_est[max_index] - ((1 << scale_factor) * y_est[max_index] + 370 x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
369 G_fxp) / G_fxp; 371 G_fxp) / G_fxp;
@@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
457 459
458 Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10); 460 Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
459 scale_B = scale_B / (1 << Q_scale_B); 461 scale_B = scale_B / (1 << Q_scale_B);
462 if (scale_B == 0)
463 return false;
460 Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10); 464 Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
461 Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10); 465 Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
462 beta_raw = beta_raw / (1 << Q_beta); 466 beta_raw = beta_raw / (1 << Q_beta);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 4ed010d4ef96..19891e7d49ae 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
370 ath_print(common, ATH_DBG_BSTUCK, 370 ath_print(common, ATH_DBG_BSTUCK,
371 "beacon is officially stuck\n"); 371 "beacon is officially stuck\n");
372 sc->sc_flags |= SC_OP_TSF_RESET; 372 sc->sc_flags |= SC_OP_TSF_RESET;
373 ath_reset(sc, false); 373 ath_reset(sc, true);
374 } 374 }
375 375
376 return; 376 return;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index bc6c4df9712c..95b41db0d86b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -577,6 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
577 common->hw = sc->hw; 577 common->hw = sc->hw;
578 common->priv = sc; 578 common->priv = sc;
579 common->debug_mask = ath9k_debug; 579 common->debug_mask = ath9k_debug;
580 spin_lock_init(&common->cc_lock);
580 581
581 spin_lock_init(&sc->wiphy_lock); 582 spin_lock_init(&sc->wiphy_lock);
582 spin_lock_init(&sc->sc_resetlock); 583 spin_lock_init(&sc->sc_resetlock);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3ff0e476c2b3..c6ec800d7a6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -182,6 +182,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
182 struct ath_cycle_counters *cc = &common->cc_survey; 182 struct ath_cycle_counters *cc = &common->cc_survey;
183 unsigned int div = common->clockrate * 1000; 183 unsigned int div = common->clockrate * 1000;
184 184
185 if (!ah->curchan)
186 return;
187
185 if (ah->power_mode == ATH9K_PM_AWAKE) 188 if (ah->power_mode == ATH9K_PM_AWAKE)
186 ath_hw_cycle_counters_update(common); 189 ath_hw_cycle_counters_update(common);
187 190
@@ -577,7 +580,7 @@ void ath_hw_check(struct work_struct *work)
577 580
578 msleep(1); 581 msleep(1);
579 } 582 }
580 ath_reset(sc, false); 583 ath_reset(sc, true);
581 584
582out: 585out:
583 ath9k_ps_restore(sc); 586 ath9k_ps_restore(sc);
@@ -595,7 +598,7 @@ void ath9k_tasklet(unsigned long data)
595 ath9k_ps_wakeup(sc); 598 ath9k_ps_wakeup(sc);
596 599
597 if (status & ATH9K_INT_FATAL) { 600 if (status & ATH9K_INT_FATAL) {
598 ath_reset(sc, false); 601 ath_reset(sc, true);
599 ath9k_ps_restore(sc); 602 ath9k_ps_restore(sc);
600 return; 603 return;
601 } 604 }
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d077186da870..30ef2dfc1ed2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
673 u16 aggr_limit = 0, al = 0, bpad = 0, 673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2; 674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
676 struct ieee80211_tx_info *tx_info;
676 677
677 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
678 679
@@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
699 break; 700 break;
700 } 701 }
701 702
703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
706 break;
707
702 /* do not exceed subframe limit */ 708 /* do not exceed subframe limit */
703 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
704 status = ATH_AGGR_LIMITED; 710 status = ATH_AGGR_LIMITED;
@@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2157 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2163 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2158 "tx hung, resetting the chip\n"); 2164 "tx hung, resetting the chip\n");
2159 ath9k_ps_wakeup(sc); 2165 ath9k_ps_wakeup(sc);
2160 ath_reset(sc, false); 2166 ath_reset(sc, true);
2161 ath9k_ps_restore(sc); 2167 ath9k_ps_restore(sc);
2162 } 2168 }
2163 2169
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index f78728c38294..568174c71b94 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -116,8 +116,9 @@ __regwrite_out : \
116} while (0); 116} while (0);
117 117
118 118
119#define carl9170_async_get_buf() \ 119#define carl9170_async_regwrite_get_buf() \
120do { \ 120do { \
121 __nreg = 0; \
121 __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \ 122 __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
122 CARL9170_MAX_CMD_PAYLOAD_LEN); \ 123 CARL9170_MAX_CMD_PAYLOAD_LEN); \
123 if (__cmd == NULL) { \ 124 if (__cmd == NULL) { \
@@ -128,38 +129,42 @@ do { \
128 129
129#define carl9170_async_regwrite_begin(carl) \ 130#define carl9170_async_regwrite_begin(carl) \
130do { \ 131do { \
131 int __nreg = 0, __err = 0; \
132 struct ar9170 *__carl = carl; \ 132 struct ar9170 *__carl = carl; \
133 struct carl9170_cmd *__cmd; \ 133 struct carl9170_cmd *__cmd; \
134 carl9170_async_get_buf(); \ 134 unsigned int __nreg; \
135 int __err = 0; \
136 carl9170_async_regwrite_get_buf(); \
137
138#define carl9170_async_regwrite_flush() \
139do { \
140 if (__cmd == NULL || __nreg == 0) \
141 break; \
142 \
143 if (IS_ACCEPTING_CMD(__carl) && __nreg) { \
144 __cmd->hdr.len = 8 * __nreg; \
145 __err = __carl9170_exec_cmd(__carl, __cmd, true); \
146 __cmd = NULL; \
147 break; \
148 } \
149 goto __async_regwrite_out; \
150} while (0)
135 151
136#define carl9170_async_regwrite(r, v) do { \ 152#define carl9170_async_regwrite(r, v) do { \
153 if (__cmd == NULL) \
154 carl9170_async_regwrite_get_buf(); \
137 __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \ 155 __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
138 __cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \ 156 __cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
139 __nreg++; \ 157 __nreg++; \
140 if ((__nreg >= PAYLOAD_MAX/2)) { \ 158 if ((__nreg >= PAYLOAD_MAX / 2)) \
141 if (IS_ACCEPTING_CMD(__carl)) { \ 159 carl9170_async_regwrite_flush(); \
142 __cmd->hdr.len = 8 * __nreg; \
143 __err = __carl9170_exec_cmd(__carl, __cmd, true);\
144 __cmd = NULL; \
145 carl9170_async_get_buf(); \
146 } else { \
147 goto __async_regwrite_out; \
148 } \
149 __nreg = 0; \
150 if (__err) \
151 goto __async_regwrite_out; \
152 } \
153} while (0) 160} while (0)
154 161
155#define carl9170_async_regwrite_finish() \ 162#define carl9170_async_regwrite_finish() do { \
156__async_regwrite_out : \ 163__async_regwrite_out : \
157 if (__err == 0 && __nreg) { \ 164 if (__cmd != NULL && __err == 0) \
158 __cmd->hdr.len = 8 * __nreg; \ 165 carl9170_async_regwrite_flush(); \
159 if (IS_ACCEPTING_CMD(__carl)) \ 166 kfree(__cmd); \
160 __err = __carl9170_exec_cmd(__carl, __cmd, true);\ 167} while (0) \
161 __nreg = 0; \
162 }
163 168
164#define carl9170_async_regwrite_result() \ 169#define carl9170_async_regwrite_result() \
165 __err; \ 170 __err; \
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 3cc99f3f7ab5..980ae70ea424 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -639,8 +639,8 @@ init:
639 if (err) 639 if (err)
640 goto unlock; 640 goto unlock;
641 } else { 641 } else {
642 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
643 rcu_read_unlock(); 642 rcu_read_unlock();
643 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
644 644
645 if (err) 645 if (err)
646 goto unlock; 646 goto unlock;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c7f6193934ea..d8607f4c144d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -591,16 +591,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
591 const bool free_buf) 591 const bool free_buf)
592{ 592{
593 struct urb *urb; 593 struct urb *urb;
594 int err = 0;
594 595
595 if (!IS_INITIALIZED(ar)) 596 if (!IS_INITIALIZED(ar)) {
596 return -EPERM; 597 err = -EPERM;
598 goto err_free;
599 }
597 600
598 if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) 601 if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
599 return -EINVAL; 602 err = -EINVAL;
603 goto err_free;
604 }
600 605
601 urb = usb_alloc_urb(0, GFP_ATOMIC); 606 urb = usb_alloc_urb(0, GFP_ATOMIC);
602 if (!urb) 607 if (!urb) {
603 return -ENOMEM; 608 err = -ENOMEM;
609 goto err_free;
610 }
604 611
605 usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, 612 usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
606 AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, 613 AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@@ -613,6 +620,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
613 usb_free_urb(urb); 620 usb_free_urb(urb);
614 621
615 return carl9170_usb_submit_cmd_urb(ar); 622 return carl9170_usb_submit_cmd_urb(ar);
623
624err_free:
625 if (free_buf)
626 kfree(cmd);
627
628 return err;
616} 629}
617 630
618int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, 631int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index dfec5496055e..e0f2d122e124 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
2964 (2 - i)); 2964 (2 - i));
2965 } 2965 }
2966 2966
2967 for (j = 0; i < 4; j++) { 2967 for (j = 0; j < 4; j++) {
2968 if (j < 3) { 2968 if (j < 3) {
2969 cur_lna = lna[j]; 2969 cur_lna = lna[j];
2970 cur_hpf1 = hpf1[j]; 2970 cur_hpf1 = hpf1[j];
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index db57aea629d9..2b078a995729 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1227 struct ieee80211_tx_info *info; 1227 struct ieee80211_tx_info *info;
1228 1228
1229 if (unlikely(!agg->wait_for_ba)) { 1229 if (unlikely(!agg->wait_for_ba)) {
1230 IWL_ERR(priv, "Received BA when not expected\n"); 1230 if (unlikely(ba_resp->bitmap))
1231 IWL_ERR(priv, "Received BA when not expected\n");
1231 return -EINVAL; 1232 return -EINVAL;
1232 } 1233 }
1233 1234
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/wl1251/Makefile
index 4fe246824db3..58b4f935a3f6 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/wl1251/Makefile
@@ -1,6 +1,8 @@
1wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \ 1wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
2 acx.o boot.o init.o debugfs.o io.o 2 acx.o boot.o init.o debugfs.o io.o
3wl1251_spi-objs += spi.o
4wl1251_sdio-objs += sdio.o
3 5
4obj-$(CONFIG_WL1251) += wl1251.o 6obj-$(CONFIG_WL1251) += wl1251.o
5obj-$(CONFIG_WL1251_SPI) += spi.o 7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
6obj-$(CONFIG_WL1251_SDIO) += sdio.o 8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 3a779ffba60b..7e8ca75d2dad 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -88,12 +88,6 @@ struct cn_queue_dev {
88 unsigned char name[CN_CBQ_NAMELEN]; 88 unsigned char name[CN_CBQ_NAMELEN];
89 89
90 struct workqueue_struct *cn_queue; 90 struct workqueue_struct *cn_queue;
91 /* Sent to kevent to create cn_queue only when needed */
92 struct work_struct wq_creation;
93 /* Tell if the wq_creation job is pending/completed */
94 atomic_t wq_requested;
95 /* Wait for cn_queue to be created */
96 wait_queue_head_t wq_created;
97 91
98 struct list_head queue_list; 92 struct list_head queue_list;
99 spinlock_t queue_lock; 93 spinlock_t queue_lock;
@@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
141int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); 135int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
142void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 136void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
143 137
144int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
145
146struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); 138struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
147void cn_queue_free_dev(struct cn_queue_dev *dev); 139void cn_queue_free_dev(struct cn_queue_dev *dev);
148 140
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fcd3dda86322..072652d94d9f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
585 table->ents[hash & table->mask] = RPS_NO_CPU; 585 table->ents[hash & table->mask] = RPS_NO_CPU;
586} 586}
587 587
588extern struct rps_sock_flow_table *rps_sock_flow_table; 588extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
589 589
590/* This structure contains an instance of an RX queue. */ 590/* This structure contains an instance of an RX queue. */
591struct netdev_rx_queue { 591struct netdev_rx_queue {
592 struct rps_map *rps_map; 592 struct rps_map __rcu *rps_map;
593 struct rps_dev_flow_table *rps_flow_table; 593 struct rps_dev_flow_table __rcu *rps_flow_table;
594 struct kobject kobj; 594 struct kobject kobj;
595 struct netdev_rx_queue *first; 595 struct netdev_rx_queue *first;
596 atomic_t count; 596 atomic_t count;
597} ____cacheline_aligned_in_smp; 597} ____cacheline_aligned_in_smp;
598#endif /* CONFIG_RPS */ 598#endif /* CONFIG_RPS */
599 599
@@ -944,7 +944,7 @@ struct net_device {
944 /* Protocol specific pointers */ 944 /* Protocol specific pointers */
945 945
946#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 946#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
947 struct vlan_group *vlgrp; /* VLAN group */ 947 struct vlan_group __rcu *vlgrp; /* VLAN group */
948#endif 948#endif
949#ifdef CONFIG_NET_DSA 949#ifdef CONFIG_NET_DSA
950 void *dsa_ptr; /* dsa specific data */ 950 void *dsa_ptr; /* dsa specific data */
@@ -952,7 +952,7 @@ struct net_device {
952 void *atalk_ptr; /* AppleTalk link */ 952 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 954 void *dn_ptr; /* DECnet specific data */
955 void *ip6_ptr; /* IPv6 specific data */ 955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 956 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 957 void *ax25_ptr; /* AX.25 specific data */
958 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, 958 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
@@ -1072,7 +1072,7 @@ struct net_device {
1072 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1072 struct pcpu_dstats __percpu *dstats; /* dummy stats */
1073 }; 1073 };
1074 /* GARP */ 1074 /* GARP */
1075 struct garp_port *garp_port; 1075 struct garp_port __rcu *garp_port;
1076 1076
1077 /* class/net/name entry */ 1077 /* class/net/name entry */
1078 struct device dev; 1078 struct device dev;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a6e047a04f79..7da5fa845959 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -472,11 +472,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
472int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); 472int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
473struct phy_device* get_phy_device(struct mii_bus *bus, int addr); 473struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
474int phy_device_register(struct phy_device *phy); 474int phy_device_register(struct phy_device *phy);
475int phy_clear_interrupt(struct phy_device *phydev);
476int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
477int phy_init_hw(struct phy_device *phydev); 475int phy_init_hw(struct phy_device *phydev);
478int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
479 u32 flags, phy_interface_t interface);
480struct phy_device * phy_attach(struct net_device *dev, 476struct phy_device * phy_attach(struct net_device *dev,
481 const char *bus_id, u32 flags, phy_interface_t interface); 477 const char *bus_id, u32 flags, phy_interface_t interface);
482struct phy_device *phy_find_first(struct mii_bus *bus); 478struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -492,17 +488,12 @@ void phy_start(struct phy_device *phydev);
492void phy_stop(struct phy_device *phydev); 488void phy_stop(struct phy_device *phydev);
493int phy_start_aneg(struct phy_device *phydev); 489int phy_start_aneg(struct phy_device *phydev);
494 490
495void phy_sanitize_settings(struct phy_device *phydev);
496int phy_stop_interrupts(struct phy_device *phydev); 491int phy_stop_interrupts(struct phy_device *phydev);
497int phy_enable_interrupts(struct phy_device *phydev);
498int phy_disable_interrupts(struct phy_device *phydev);
499 492
500static inline int phy_read_status(struct phy_device *phydev) { 493static inline int phy_read_status(struct phy_device *phydev) {
501 return phydev->drv->read_status(phydev); 494 return phydev->drv->read_status(phydev);
502} 495}
503 496
504int genphy_config_advert(struct phy_device *phydev);
505int genphy_setup_forced(struct phy_device *phydev);
506int genphy_restart_aneg(struct phy_device *phydev); 497int genphy_restart_aneg(struct phy_device *phydev);
507int genphy_config_aneg(struct phy_device *phydev); 498int genphy_config_aneg(struct phy_device *phydev);
508int genphy_update_link(struct phy_device *phydev); 499int genphy_update_link(struct phy_device *phydev);
@@ -511,8 +502,6 @@ int genphy_suspend(struct phy_device *phydev);
511int genphy_resume(struct phy_device *phydev); 502int genphy_resume(struct phy_device *phydev);
512void phy_driver_unregister(struct phy_driver *drv); 503void phy_driver_unregister(struct phy_driver *drv);
513int phy_driver_register(struct phy_driver *new_driver); 504int phy_driver_register(struct phy_driver *new_driver);
514void phy_prepare_link(struct phy_device *phydev,
515 void (*adjust_link)(struct net_device *));
516void phy_state_machine(struct work_struct *work); 505void phy_state_machine(struct work_struct *work);
517void phy_start_machine(struct phy_device *phydev, 506void phy_start_machine(struct phy_device *phydev,
518 void (*handler)(struct net_device *)); 507 void (*handler)(struct net_device *));
@@ -523,7 +512,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
523 struct ifreq *ifr, int cmd); 512 struct ifreq *ifr, int cmd);
524int phy_start_interrupts(struct phy_device *phydev); 513int phy_start_interrupts(struct phy_device *phydev);
525void phy_print_status(struct phy_device *phydev); 514void phy_print_status(struct phy_device *phydev);
526struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
527void phy_device_free(struct phy_device *phydev); 515void phy_device_free(struct phy_device *phydev);
528 516
529int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, 517int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
new file mode 100644
index 000000000000..5bcce55438cf
--- /dev/null
+++ b/include/net/caif/caif_shm.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#ifndef CAIF_SHM_H_
9#define CAIF_SHM_H_
10
11struct shmdev_layer {
12 u32 shm_base_addr;
13 u32 shm_total_sz;
14 u32 shm_id;
15 u32 shm_loopback;
16 void *hmbx;
17 int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
18 int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
19 struct shmdev_layer *pshm_dev, void *pshm_drv);
20 struct net_device *pshm_netdev;
21};
22
23extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
24extern void caif_shmcore_remove(struct net_device *pshm_netdev);
25
26#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index a217c838ec0d..ffe9cb719c0e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -95,7 +95,7 @@ struct dst_entry {
95 unsigned long lastuse; 95 unsigned long lastuse;
96 union { 96 union {
97 struct dst_entry *next; 97 struct dst_entry *next;
98 struct rtable *rt_next; 98 struct rtable __rcu *rt_next;
99 struct rt6_info *rt6_next; 99 struct rt6_info *rt6_next;
100 struct dn_route *dn_next; 100 struct dn_route *dn_next;
101 }; 101 };
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 106f3097d384..075f1e3a0fed 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -20,7 +20,7 @@ struct fib_rule {
20 u32 table; 20 u32 table;
21 u8 action; 21 u8 action;
22 u32 target; 22 u32 target;
23 struct fib_rule * ctarget; 23 struct fib_rule __rcu *ctarget;
24 char iifname[IFNAMSIZ]; 24 char iifname[IFNAMSIZ];
25 char oifname[IFNAMSIZ]; 25 char oifname[IFNAMSIZ];
26 struct rcu_head rcu; 26 struct rcu_head rcu;
diff --git a/include/net/garp.h b/include/net/garp.h
index 825f172caba9..f4c295984c45 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -107,7 +107,7 @@ struct garp_applicant {
107}; 107};
108 108
109struct garp_port { 109struct garp_port {
110 struct garp_applicant *applicants[GARP_APPLICATION_MAX + 1]; 110 struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
111}; 111};
112 112
113extern int garp_register_application(struct garp_application *app); 113extern int garp_register_application(struct garp_application *app);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 417d0c894f29..fe239bfe5f7f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,7 +15,7 @@
15 15
16struct inet_peer { 16struct inet_peer {
17 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 17 /* group together avl_left,avl_right,v4daddr to speedup lookups */
18 struct inet_peer *avl_left, *avl_right; 18 struct inet_peer __rcu *avl_left, *avl_right;
19 __be32 v4daddr; /* peer's address */ 19 __be32 v4daddr; /* peer's address */
20 __u32 avl_height; 20 __u32 avl_height;
21 struct list_head unused; 21 struct list_head unused;
diff --git a/include/net/ip.h b/include/net/ip.h
index dbee3fe260e1..86e2b182a0c0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -59,7 +59,7 @@ struct ipcm_cookie {
59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
60 60
61struct ip_ra_chain { 61struct ip_ra_chain {
62 struct ip_ra_chain *next; 62 struct ip_ra_chain __rcu *next;
63 struct sock *sk; 63 struct sock *sk;
64 union { 64 union {
65 void (*destructor)(struct sock *); 65 void (*destructor)(struct sock *);
@@ -68,7 +68,7 @@ struct ip_ra_chain {
68 struct rcu_head rcu; 68 struct rcu_head rcu;
69}; 69};
70 70
71extern struct ip_ra_chain *ip_ra_chain; 71extern struct ip_ra_chain __rcu *ip_ra_chain;
72 72
73/* IP flags. */ 73/* IP flags. */
74#define IP_CE 0x8000 /* Flag: "Congestion" */ 74#define IP_CE 0x8000 /* Flag: "Congestion" */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc94ec568a50..fc73e667b50e 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -13,7 +13,7 @@
13/* IPv6 tunnel */ 13/* IPv6 tunnel */
14 14
15struct ip6_tnl { 15struct ip6_tnl {
16 struct ip6_tnl *next; /* next tunnel in list */ 16 struct ip6_tnl __rcu *next; /* next tunnel in list */
17 struct net_device *dev; /* virtual device associated with tunnel */ 17 struct net_device *dev; /* virtual device associated with tunnel */
18 struct ip6_tnl_parm parms; /* tunnel configuration parameters */ 18 struct ip6_tnl_parm parms; /* tunnel configuration parameters */
19 struct flowi fl; /* flowi template for xmit */ 19 struct flowi fl; /* flowi template for xmit */
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 58abbf966b0c..a32654d52730 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -16,7 +16,7 @@ struct ip_tunnel_6rd_parm {
16}; 16};
17 17
18struct ip_tunnel { 18struct ip_tunnel {
19 struct ip_tunnel *next; 19 struct ip_tunnel __rcu *next;
20 struct net_device *dev; 20 struct net_device *dev;
21 21
22 int err_count; /* Number of arrived ICMP errors */ 22 int err_count; /* Number of arrived ICMP errors */
@@ -34,12 +34,12 @@ struct ip_tunnel {
34#ifdef CONFIG_IPV6_SIT_6RD 34#ifdef CONFIG_IPV6_SIT_6RD
35 struct ip_tunnel_6rd_parm ip6rd; 35 struct ip_tunnel_6rd_parm ip6rd;
36#endif 36#endif
37 struct ip_tunnel_prl_entry *prl; /* potential router list */ 37 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
38 unsigned int prl_count; /* # of entries in PRL */ 38 unsigned int prl_count; /* # of entries in PRL */
39}; 39};
40 40
41struct ip_tunnel_prl_entry { 41struct ip_tunnel_prl_entry {
42 struct ip_tunnel_prl_entry *next; 42 struct ip_tunnel_prl_entry __rcu *next;
43 __be32 addr; 43 __be32 addr;
44 u16 flags; 44 u16 flags;
45 struct rcu_head rcu_head; 45 struct rcu_head rcu_head;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 65af9a07cf76..1bf812b21fb7 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -88,7 +88,7 @@ struct net {
88#ifdef CONFIG_WEXT_CORE 88#ifdef CONFIG_WEXT_CORE
89 struct sk_buff_head wext_nlevents; 89 struct sk_buff_head wext_nlevents;
90#endif 90#endif
91 struct net_generic *gen; 91 struct net_generic __rcu *gen;
92 92
93 /* Note : following structs are cache line aligned */ 93 /* Note : following structs are cache line aligned */
94#ifdef CONFIG_XFRM 94#ifdef CONFIG_XFRM
diff --git a/include/net/protocol.h b/include/net/protocol.h
index f1effdd3c265..dc07495bce4c 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -89,10 +89,10 @@ struct inet_protosw {
89#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */ 89#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
90#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */ 90#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
91 91
92extern const struct net_protocol *inet_protos[MAX_INET_PROTOS]; 92extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
93 93
94#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 94#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
95extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; 95extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
96#endif 96#endif
97 97
98extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num); 98extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
diff --git a/include/net/sock.h b/include/net/sock.h
index 73a4f9702a65..c7a736228ca2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -301,7 +301,7 @@ struct sock {
301 const struct cred *sk_peer_cred; 301 const struct cred *sk_peer_cred;
302 long sk_rcvtimeo; 302 long sk_rcvtimeo;
303 long sk_sndtimeo; 303 long sk_sndtimeo;
304 struct sk_filter *sk_filter; 304 struct sk_filter __rcu *sk_filter;
305 void *sk_protinfo; 305 void *sk_protinfo;
306 struct timer_list sk_timer; 306 struct timer_list sk_timer;
307 ktime_t sk_stamp; 307 ktime_t sk_stamp;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f28d7c9b9f8d..bcfb6b24b019 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1264,7 +1264,7 @@ struct xfrm_tunnel {
1264 int (*handler)(struct sk_buff *skb); 1264 int (*handler)(struct sk_buff *skb);
1265 int (*err_handler)(struct sk_buff *skb, u32 info); 1265 int (*err_handler)(struct sk_buff *skb, u32 info);
1266 1266
1267 struct xfrm_tunnel *next; 1267 struct xfrm_tunnel __rcu *next;
1268 int priority; 1268 int priority;
1269}; 1269};
1270 1270
@@ -1272,7 +1272,7 @@ struct xfrm6_tunnel {
1272 int (*handler)(struct sk_buff *skb); 1272 int (*handler)(struct sk_buff *skb);
1273 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1273 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1274 u8 type, u8 code, int offset, __be32 info); 1274 u8 type, u8 code, int offset, __be32 info);
1275 struct xfrm6_tunnel *next; 1275 struct xfrm6_tunnel __rcu *next;
1276 int priority; 1276 int priority;
1277}; 1277};
1278 1278
diff --git a/net/802/garp.c b/net/802/garp.c
index 941f2a324d3a..c1df2dad8c6b 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev,
346 const struct garp_application *appl, 346 const struct garp_application *appl,
347 const void *data, u8 len, u8 type) 347 const void *data, u8 len, u8 type)
348{ 348{
349 struct garp_port *port = dev->garp_port; 349 struct garp_port *port = rtnl_dereference(dev->garp_port);
350 struct garp_applicant *app = port->applicants[appl->type]; 350 struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
351 struct garp_attr *attr; 351 struct garp_attr *attr;
352 352
353 spin_lock_bh(&app->lock); 353 spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev,
366 const struct garp_application *appl, 366 const struct garp_application *appl,
367 const void *data, u8 len, u8 type) 367 const void *data, u8 len, u8 type)
368{ 368{
369 struct garp_port *port = dev->garp_port; 369 struct garp_port *port = rtnl_dereference(dev->garp_port);
370 struct garp_applicant *app = port->applicants[appl->type]; 370 struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
371 struct garp_attr *attr; 371 struct garp_attr *attr;
372 372
373 spin_lock_bh(&app->lock); 373 spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev)
546 546
547static void garp_release_port(struct net_device *dev) 547static void garp_release_port(struct net_device *dev)
548{ 548{
549 struct garp_port *port = dev->garp_port; 549 struct garp_port *port = rtnl_dereference(dev->garp_port);
550 unsigned int i; 550 unsigned int i;
551 551
552 for (i = 0; i <= GARP_APPLICATION_MAX; i++) { 552 for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
553 if (port->applicants[i]) 553 if (rtnl_dereference(port->applicants[i]))
554 return; 554 return;
555 } 555 }
556 rcu_assign_pointer(dev->garp_port, NULL); 556 rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
565 565
566 ASSERT_RTNL(); 566 ASSERT_RTNL();
567 567
568 if (!dev->garp_port) { 568 if (!rtnl_dereference(dev->garp_port)) {
569 err = garp_init_port(dev); 569 err = garp_init_port(dev);
570 if (err < 0) 570 if (err < 0)
571 goto err1; 571 goto err1;
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant);
601 601
602void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) 602void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
603{ 603{
604 struct garp_port *port = dev->garp_port; 604 struct garp_port *port = rtnl_dereference(dev->garp_port);
605 struct garp_applicant *app = port->applicants[appl->type]; 605 struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
606 606
607 ASSERT_RTNL(); 607 ASSERT_RTNL();
608 608
diff --git a/net/802/stp.c b/net/802/stp.c
index 53c8f77f0ccd..978c30b1b36b 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -21,8 +21,8 @@
21#define GARP_ADDR_MAX 0x2F 21#define GARP_ADDR_MAX 0x2F
22#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) 22#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
23 23
24static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; 24static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
25static const struct stp_proto *stp_proto __read_mostly; 25static const struct stp_proto __rcu *stp_proto __read_mostly;
26 26
27static struct llc_sap *sap __read_mostly; 27static struct llc_sap *sap __read_mostly;
28static unsigned int sap_registered; 28static unsigned int sap_registered;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 05b867e43757..52077ca22072 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
112 112
113 ASSERT_RTNL(); 113 ASSERT_RTNL();
114 114
115 grp = real_dev->vlgrp; 115 grp = rtnl_dereference(real_dev->vlgrp);
116 BUG_ON(!grp); 116 BUG_ON(!grp);
117 117
118 /* Take it out of our own structures, but be sure to interlock with 118 /* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev)
177 struct vlan_group *grp, *ngrp = NULL; 177 struct vlan_group *grp, *ngrp = NULL;
178 int err; 178 int err;
179 179
180 grp = real_dev->vlgrp; 180 grp = rtnl_dereference(real_dev->vlgrp);
181 if (!grp) { 181 if (!grp) {
182 ngrp = grp = vlan_group_alloc(real_dev); 182 ngrp = grp = vlan_group_alloc(real_dev);
183 if (!grp) 183 if (!grp)
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
385 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); 385 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
386 } 386 }
387 387
388 grp = dev->vlgrp; 388 grp = rtnl_dereference(dev->vlgrp);
389 if (!grp) 389 if (!grp)
390 goto out; 390 goto out;
391 391
diff --git a/net/core/dev.c b/net/core/dev.c
index 78b5a89b0f40..35dfb8318483 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
1685 1685
1686static bool can_checksum_protocol(unsigned long features, __be16 protocol) 1686static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1687{ 1687{
1688 return ((features & NETIF_F_GEN_CSUM) || 1688 return ((features & NETIF_F_NO_CSUM) ||
1689 ((features & NETIF_F_IP_CSUM) && 1689 ((features & NETIF_F_V4_CSUM) &&
1690 protocol == htons(ETH_P_IP)) || 1690 protocol == htons(ETH_P_IP)) ||
1691 ((features & NETIF_F_IPV6_CSUM) && 1691 ((features & NETIF_F_V6_CSUM) &&
1692 protocol == htons(ETH_P_IPV6)) || 1692 protocol == htons(ETH_P_IPV6)) ||
1693 ((features & NETIF_F_FCOE_CRC) && 1693 ((features & NETIF_F_FCOE_CRC) &&
1694 protocol == htons(ETH_P_FCOE))); 1694 protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1696 1696
1697static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) 1697static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1698{ 1698{
1699 __be16 protocol = skb->protocol;
1699 int features = dev->features; 1700 int features = dev->features;
1700 1701
1701 if (vlan_tx_tag_present(skb)) 1702 if (vlan_tx_tag_present(skb)) {
1702 features &= dev->vlan_features; 1703 features &= dev->vlan_features;
1703 1704 } else if (protocol == htons(ETH_P_8021Q)) {
1704 if (can_checksum_protocol(features, skb->protocol))
1705 return true;
1706
1707 if (skb->protocol == htons(ETH_P_8021Q)) {
1708 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1705 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1709 if (can_checksum_protocol(dev->features & dev->vlan_features, 1706 protocol = veh->h_vlan_encapsulated_proto;
1710 veh->h_vlan_encapsulated_proto)) 1707 features &= dev->vlan_features;
1711 return true;
1712 } 1708 }
1713 1709
1714 return false; 1710 return can_checksum_protocol(features, protocol);
1715} 1711}
1716 1712
1717/** 1713/**
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2213} 2209}
2214 2210
2215static DEFINE_PER_CPU(int, xmit_recursion); 2211static DEFINE_PER_CPU(int, xmit_recursion);
2216#define RECURSION_LIMIT 3 2212#define RECURSION_LIMIT 10
2217 2213
2218/** 2214/**
2219 * dev_queue_xmit - transmit a buffer 2215 * dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2413#ifdef CONFIG_RPS 2409#ifdef CONFIG_RPS
2414 2410
2415/* One global table that all flow-based protocols share. */ 2411/* One global table that all flow-based protocols share. */
2416struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; 2412struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2417EXPORT_SYMBOL(rps_sock_flow_table); 2413EXPORT_SYMBOL(rps_sock_flow_table);
2418 2414
2419/* 2415/*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2425 struct rps_dev_flow **rflowp) 2421 struct rps_dev_flow **rflowp)
2426{ 2422{
2427 struct netdev_rx_queue *rxqueue; 2423 struct netdev_rx_queue *rxqueue;
2428 struct rps_map *map = NULL; 2424 struct rps_map *map;
2429 struct rps_dev_flow_table *flow_table; 2425 struct rps_dev_flow_table *flow_table;
2430 struct rps_sock_flow_table *sock_flow_table; 2426 struct rps_sock_flow_table *sock_flow_table;
2431 int cpu = -1; 2427 int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2444 } else 2440 } else
2445 rxqueue = dev->_rx; 2441 rxqueue = dev->_rx;
2446 2442
2447 if (rxqueue->rps_map) { 2443 map = rcu_dereference(rxqueue->rps_map);
2448 map = rcu_dereference(rxqueue->rps_map); 2444 if (map) {
2449 if (map && map->len == 1) { 2445 if (map->len == 1) {
2450 tcpu = map->cpus[0]; 2446 tcpu = map->cpus[0];
2451 if (cpu_online(tcpu)) 2447 if (cpu_online(tcpu))
2452 cpu = tcpu; 2448 cpu = tcpu;
2453 goto done; 2449 goto done;
2454 } 2450 }
2455 } else if (!rxqueue->rps_flow_table) { 2451 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
2456 goto done; 2452 goto done;
2457 } 2453 }
2458 2454
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
5416 /* paranoia */ 5412 /* paranoia */
5417 BUG_ON(netdev_refcnt_read(dev)); 5413 BUG_ON(netdev_refcnt_read(dev));
5418 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5414 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5419 WARN_ON(dev->ip6_ptr); 5415 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
5420 WARN_ON(dev->dn_ptr); 5416 WARN_ON(dev->dn_ptr);
5421 5417
5422 if (dev->destructor) 5418 if (dev->destructor)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc3f253ba6c..82a4369ae150 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
351 351
352 list_for_each_entry(r, &ops->rules_list, list) { 352 list_for_each_entry(r, &ops->rules_list, list) {
353 if (r->pref == rule->target) { 353 if (r->pref == rule->target) {
354 rule->ctarget = r; 354 RCU_INIT_POINTER(rule->ctarget, r);
355 break; 355 break;
356 } 356 }
357 } 357 }
358 358
359 if (rule->ctarget == NULL) 359 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
360 unresolved = 1; 360 unresolved = 1;
361 } else if (rule->action == FR_ACT_GOTO) 361 } else if (rule->action == FR_ACT_GOTO)
362 goto errout_free; 362 goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
373 373
374 fib_rule_get(rule); 374 fib_rule_get(rule);
375 375
376 if (last)
377 list_add_rcu(&rule->list, &last->list);
378 else
379 list_add_rcu(&rule->list, &ops->rules_list);
380
376 if (ops->unresolved_rules) { 381 if (ops->unresolved_rules) {
377 /* 382 /*
378 * There are unresolved goto rules in the list, check if 383 * There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
381 list_for_each_entry(r, &ops->rules_list, list) { 386 list_for_each_entry(r, &ops->rules_list, list) {
382 if (r->action == FR_ACT_GOTO && 387 if (r->action == FR_ACT_GOTO &&
383 r->target == rule->pref) { 388 r->target == rule->pref) {
384 BUG_ON(r->ctarget != NULL); 389 BUG_ON(rtnl_dereference(r->ctarget) != NULL);
385 rcu_assign_pointer(r->ctarget, rule); 390 rcu_assign_pointer(r->ctarget, rule);
386 if (--ops->unresolved_rules == 0) 391 if (--ops->unresolved_rules == 0)
387 break; 392 break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
395 if (unresolved) 400 if (unresolved)
396 ops->unresolved_rules++; 401 ops->unresolved_rules++;
397 402
398 if (last)
399 list_add_rcu(&rule->list, &last->list);
400 else
401 list_add_rcu(&rule->list, &ops->rules_list);
402
403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); 403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
404 flush_route_cache(ops); 404 flush_route_cache(ops);
405 rules_ops_put(ops); 405 rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
487 */ 487 */
488 if (ops->nr_goto_rules > 0) { 488 if (ops->nr_goto_rules > 0) {
489 list_for_each_entry(tmp, &ops->rules_list, list) { 489 list_for_each_entry(tmp, &ops->rules_list, list) {
490 if (tmp->ctarget == rule) { 490 if (rtnl_dereference(tmp->ctarget) == rule) {
491 rcu_assign_pointer(tmp->ctarget, NULL); 491 rcu_assign_pointer(tmp->ctarget, NULL);
492 ops->unresolved_rules++; 492 ops->unresolved_rules++;
493 } 493 }
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
545 frh->action = rule->action; 545 frh->action = rule->action;
546 frh->flags = rule->flags; 546 frh->flags = rule->flags;
547 547
548 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) 548 if (rule->action == FR_ACT_GOTO &&
549 rcu_dereference_raw(rule->ctarget) == NULL)
549 frh->flags |= FIB_RULE_UNRESOLVED; 550 frh->flags |= FIB_RULE_UNRESOLVED;
550 551
551 if (rule->iifname[0]) { 552 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7adf50352918..7beaec36b541 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
89 rcu_read_lock_bh(); 89 rcu_read_lock_bh();
90 filter = rcu_dereference_bh(sk->sk_filter); 90 filter = rcu_dereference_bh(sk->sk_filter);
91 if (filter) { 91 if (filter) {
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
93 filter->len); 93
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 } 95 }
96 rcu_read_unlock_bh(); 96 rcu_read_unlock_bh();
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b143173e3eb2..a5ff5a89f376 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
598 } 598 }
599 599
600 spin_lock(&rps_map_lock); 600 spin_lock(&rps_map_lock);
601 old_map = queue->rps_map; 601 old_map = rcu_dereference_protected(queue->rps_map,
602 lockdep_is_held(&rps_map_lock));
602 rcu_assign_pointer(queue->rps_map, map); 603 rcu_assign_pointer(queue->rps_map, map);
603 spin_unlock(&rps_map_lock); 604 spin_unlock(&rps_map_lock);
604 605
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
677 table = NULL; 678 table = NULL;
678 679
679 spin_lock(&rps_dev_flow_lock); 680 spin_lock(&rps_dev_flow_lock);
680 old_table = queue->rps_flow_table; 681 old_table = rcu_dereference_protected(queue->rps_flow_table,
682 lockdep_is_held(&rps_dev_flow_lock));
681 rcu_assign_pointer(queue->rps_flow_table, table); 683 rcu_assign_pointer(queue->rps_flow_table, table);
682 spin_unlock(&rps_dev_flow_lock); 684 spin_unlock(&rps_dev_flow_lock);
683 685
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
705{ 707{
706 struct netdev_rx_queue *queue = to_rx_queue(kobj); 708 struct netdev_rx_queue *queue = to_rx_queue(kobj);
707 struct netdev_rx_queue *first = queue->first; 709 struct netdev_rx_queue *first = queue->first;
710 struct rps_map *map;
711 struct rps_dev_flow_table *flow_table;
708 712
709 if (queue->rps_map)
710 call_rcu(&queue->rps_map->rcu, rps_map_release);
711 713
712 if (queue->rps_flow_table) 714 map = rcu_dereference_raw(queue->rps_map);
713 call_rcu(&queue->rps_flow_table->rcu, 715 if (map)
714 rps_dev_flow_table_release); 716 call_rcu(&map->rcu, rps_map_release);
717
718 flow_table = rcu_dereference_raw(queue->rps_flow_table);
719 if (flow_table)
720 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
715 721
716 if (atomic_dec_and_test(&first->count)) 722 if (atomic_dec_and_test(&first->count))
717 kfree(first); 723 kfree(first);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index c988e685433a..3f860261c5ee 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
42 BUG_ON(!mutex_is_locked(&net_mutex)); 42 BUG_ON(!mutex_is_locked(&net_mutex));
43 BUG_ON(id == 0); 43 BUG_ON(id == 0);
44 44
45 ng = old_ng = net->gen; 45 old_ng = rcu_dereference_protected(net->gen,
46 lockdep_is_held(&net_mutex));
47 ng = old_ng;
46 if (old_ng->len >= id) 48 if (old_ng->len >= id)
47 goto assign; 49 goto assign;
48 50
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2c0df0f95b3d..679b797d06b1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -771,10 +771,10 @@ done:
771static unsigned long num_arg(const char __user * user_buffer, 771static unsigned long num_arg(const char __user * user_buffer,
772 unsigned long maxlen, unsigned long *num) 772 unsigned long maxlen, unsigned long *num)
773{ 773{
774 int i = 0; 774 int i;
775 *num = 0; 775 *num = 0;
776 776
777 for (; i < maxlen; i++) { 777 for (i = 0; i < maxlen; i++) {
778 char c; 778 char c;
779 if (get_user(c, &user_buffer[i])) 779 if (get_user(c, &user_buffer[i]))
780 return -EFAULT; 780 return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
789 789
790static int strn_len(const char __user * user_buffer, unsigned int maxlen) 790static int strn_len(const char __user * user_buffer, unsigned int maxlen)
791{ 791{
792 int i = 0; 792 int i;
793 793
794 for (; i < maxlen; i++) { 794 for (i = 0; i < maxlen; i++) {
795 char c; 795 char c;
796 if (get_user(c, &user_buffer[i])) 796 if (get_user(c, &user_buffer[i]))
797 return -EFAULT; 797 return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
846{ 846{
847 struct seq_file *seq = file->private_data; 847 struct seq_file *seq = file->private_data;
848 struct pktgen_dev *pkt_dev = seq->private; 848 struct pktgen_dev *pkt_dev = seq->private;
849 int i = 0, max, len; 849 int i, max, len;
850 char name[16], valstr[32]; 850 char name[16], valstr[32];
851 unsigned long value = 0; 851 unsigned long value = 0;
852 char *pg_result = NULL; 852 char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
860 return -EINVAL; 860 return -EINVAL;
861 } 861 }
862 862
863 max = count - i; 863 max = count;
864 tmp = count_trail_chars(&user_buffer[i], max); 864 tmp = count_trail_chars(user_buffer, max);
865 if (tmp < 0) { 865 if (tmp < 0) {
866 pr_warning("illegal format\n"); 866 pr_warning("illegal format\n");
867 return tmp; 867 return tmp;
868 } 868 }
869 i += tmp; 869 i = tmp;
870 870
871 /* Read variable name */ 871 /* Read variable name */
872 872
@@ -1764,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file,
1764{ 1764{
1765 struct seq_file *seq = file->private_data; 1765 struct seq_file *seq = file->private_data;
1766 struct pktgen_thread *t = seq->private; 1766 struct pktgen_thread *t = seq->private;
1767 int i = 0, max, len, ret; 1767 int i, max, len, ret;
1768 char name[40]; 1768 char name[40];
1769 char *pg_result; 1769 char *pg_result;
1770 1770
@@ -1773,12 +1773,12 @@ static ssize_t pktgen_thread_write(struct file *file,
1773 return -EINVAL; 1773 return -EINVAL;
1774 } 1774 }
1775 1775
1776 max = count - i; 1776 max = count;
1777 len = count_trail_chars(&user_buffer[i], max); 1777 len = count_trail_chars(user_buffer, max);
1778 if (len < 0) 1778 if (len < 0)
1779 return len; 1779 return len;
1780 1780
1781 i += len; 1781 i = len;
1782 1782
1783 /* Read variable name */ 1783 /* Read variable name */
1784 1784
@@ -1975,7 +1975,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
1975 const char *ifname) 1975 const char *ifname)
1976{ 1976{
1977 char b[IFNAMSIZ+5]; 1977 char b[IFNAMSIZ+5];
1978 int i = 0; 1978 int i;
1979 1979
1980 for (i = 0; ifname[i] != '@'; i++) { 1980 for (i = 0; ifname[i] != '@'; i++) {
1981 if (i == IFNAMSIZ) 1981 if (i == IFNAMSIZ)
@@ -2519,8 +2519,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
2519{ 2519{
2520 if (pkt_dev->cflows) { 2520 if (pkt_dev->cflows) {
2521 /* let go of the SAs if we have them */ 2521 /* let go of the SAs if we have them */
2522 int i = 0; 2522 int i;
2523 for (; i < pkt_dev->cflows; i++) { 2523 for (i = 0; i < pkt_dev->cflows; i++) {
2524 struct xfrm_state *x = pkt_dev->flows[i].x; 2524 struct xfrm_state *x = pkt_dev->flows[i].x;
2525 if (x) { 2525 if (x) {
2526 xfrm_state_put(x); 2526 xfrm_state_put(x);
diff --git a/net/core/sock.c b/net/core/sock.c
index 11db43632df8..3eed5424e659 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1225 sock_reset_flag(newsk, SOCK_DONE); 1225 sock_reset_flag(newsk, SOCK_DONE);
1226 skb_queue_head_init(&newsk->sk_error_queue); 1226 skb_queue_head_init(&newsk->sk_error_queue);
1227 1227
1228 filter = newsk->sk_filter; 1228 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1229 if (filter != NULL) 1229 if (filter != NULL)
1230 sk_filter_charge(newsk, filter); 1230 sk_filter_charge(newsk, filter);
1231 1231
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 01eee5d984be..385b6095fdc4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
34 34
35 mutex_lock(&sock_flow_mutex); 35 mutex_lock(&sock_flow_mutex);
36 36
37 orig_sock_table = rps_sock_flow_table; 37 orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
38 lockdep_is_held(&sock_flow_mutex));
38 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; 39 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
39 40
40 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); 41 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 43e1c594ce8f..b232375a0b75 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
120 struct fib_node *f; 120 struct fib_node *f;
121 121
122 hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) { 122 hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
123 struct hlist_head __rcu *new_head; 123 struct hlist_head *new_head;
124 124
125 hlist_del_rcu(&f->fn_hash); 125 hlist_del_rcu(&f->fn_hash);
126 126
127 new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; 127 new_head = rcu_dereference_protected(fz->fz_hash, 1) +
128 fn_hash(f->fn_key, fz);
128 hlist_add_head_rcu(&f->fn_hash, new_head); 129 hlist_add_head_rcu(&f->fn_hash, new_head);
129 } 130 }
130 } 131 }
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz)
179 memcpy(&nfz, fz, sizeof(nfz)); 180 memcpy(&nfz, fz, sizeof(nfz));
180 181
181 write_seqlock_bh(&fz->fz_lock); 182 write_seqlock_bh(&fz->fz_lock);
182 old_ht = fz->fz_hash; 183 old_ht = rcu_dereference_protected(fz->fz_hash, 1);
183 nfz.fz_hash = ht; 184 RCU_INIT_POINTER(nfz.fz_hash, ht);
184 nfz.fz_hashmask = new_hashmask; 185 nfz.fz_hashmask = new_hashmask;
185 nfz.fz_divisor = new_divisor; 186 nfz.fz_divisor = new_divisor;
186 fn_rebuild_zone(&nfz, old_ht, old_divisor); 187 fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z)
236 seqlock_init(&fz->fz_lock); 237 seqlock_init(&fz->fz_lock);
237 fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1; 238 fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
238 fz->fz_hashmask = fz->fz_divisor - 1; 239 fz->fz_hashmask = fz->fz_divisor - 1;
239 fz->fz_hash = fz->fz_embedded_hash; 240 RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
240 fz->fz_order = z; 241 fz->fz_order = z;
241 fz->fz_revorder = 32 - z; 242 fz->fz_revorder = 32 - z;
242 fz->fz_mask = inet_make_mask(z); 243 fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb,
272 for (fz = rcu_dereference(t->fn_zone_list); 273 for (fz = rcu_dereference(t->fn_zone_list);
273 fz != NULL; 274 fz != NULL;
274 fz = rcu_dereference(fz->fz_next)) { 275 fz = rcu_dereference(fz->fz_next)) {
275 struct hlist_head __rcu *head; 276 struct hlist_head *head;
276 struct hlist_node *node; 277 struct hlist_node *node;
277 struct fib_node *f; 278 struct fib_node *f;
278 __be32 k; 279 __be32 k;
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb,
282 seq = read_seqbegin(&fz->fz_lock); 283 seq = read_seqbegin(&fz->fz_lock);
283 k = fz_key(flp->fl4_dst, fz); 284 k = fz_key(flp->fl4_dst, fz);
284 285
285 head = &fz->fz_hash[fn_hash(k, fz)]; 286 head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
286 hlist_for_each_entry_rcu(f, node, head, fn_hash) { 287 hlist_for_each_entry_rcu(f, node, head, fn_hash) {
287 if (f->fn_key != k) 288 if (f->fn_key != k)
288 continue; 289 continue;
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb,
311 struct fib_info *last_resort; 312 struct fib_info *last_resort;
312 struct fn_hash *t = (struct fn_hash *)tb->tb_data; 313 struct fn_hash *t = (struct fn_hash *)tb->tb_data;
313 struct fn_zone *fz = t->fn_zones[0]; 314 struct fn_zone *fz = t->fn_zones[0];
315 struct hlist_head *head;
314 316
315 if (fz == NULL) 317 if (fz == NULL)
316 return; 318 return;
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb,
320 order = -1; 322 order = -1;
321 323
322 rcu_read_lock(); 324 rcu_read_lock();
323 hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) { 325 head = rcu_dereference(fz->fz_hash);
326 hlist_for_each_entry_rcu(f, node, head, fn_hash) {
324 struct fib_alias *fa; 327 struct fib_alias *fa;
325 328
326 list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) { 329 list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@ out:
374/* Insert node F to FZ. */ 377/* Insert node F to FZ. */
375static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f) 378static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
376{ 379{
377 struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; 380 struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
378 381
379 hlist_add_head_rcu(&f->fn_hash, head); 382 hlist_add_head_rcu(&f->fn_hash, head);
380} 383}
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
382/* Return the node in FZ matching KEY. */ 385/* Return the node in FZ matching KEY. */
383static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) 386static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
384{ 387{
385 struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)]; 388 struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
386 struct hlist_node *node; 389 struct hlist_node *node;
387 struct fib_node *f; 390 struct fib_node *f;
388 391
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
662 665
663static int fn_flush_list(struct fn_zone *fz, int idx) 666static int fn_flush_list(struct fn_zone *fz, int idx)
664{ 667{
665 struct hlist_head *head = &fz->fz_hash[idx]; 668 struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
666 struct hlist_node *node, *n; 669 struct hlist_node *node, *n;
667 struct fib_node *f; 670 struct fib_node *f;
668 int found = 0; 671 int found = 0;
@@ -761,14 +764,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
761 struct fn_zone *fz) 764 struct fn_zone *fz)
762{ 765{
763 int h, s_h; 766 int h, s_h;
767 struct hlist_head *head = rcu_dereference(fz->fz_hash);
764 768
765 if (fz->fz_hash == NULL) 769 if (head == NULL)
766 return skb->len; 770 return skb->len;
767 s_h = cb->args[3]; 771 s_h = cb->args[3];
768 for (h = s_h; h < fz->fz_divisor; h++) { 772 for (h = s_h; h < fz->fz_divisor; h++) {
769 if (hlist_empty(&fz->fz_hash[h])) 773 if (hlist_empty(head + h))
770 continue; 774 continue;
771 if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) { 775 if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
772 cb->args[3] = h; 776 cb->args[3] = h;
773 return -1; 777 return -1;
774 } 778 }
@@ -872,7 +876,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
872 if (!iter->zone->fz_nent) 876 if (!iter->zone->fz_nent)
873 continue; 877 continue;
874 878
875 iter->hash_head = iter->zone->fz_hash; 879 iter->hash_head = rcu_dereference(iter->zone->fz_hash);
876 maxslot = iter->zone->fz_divisor; 880 maxslot = iter->zone->fz_divisor;
877 881
878 for (iter->bucket = 0; iter->bucket < maxslot; 882 for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +961,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
957 goto out; 961 goto out;
958 962
959 iter->bucket = 0; 963 iter->bucket = 0;
960 iter->hash_head = iter->zone->fz_hash; 964 iter->hash_head = rcu_dereference(iter->zone->fz_hash);
961 965
962 hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) { 966 hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
963 list_for_each_entry(fa, &fn->fn_alias, fa_list) { 967 list_for_each_entry(fa, &fn->fn_alias, fa_list) {
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index caea6885fdbd..c6933f2ea310 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -22,7 +22,7 @@
22#include <net/gre.h> 22#include <net/gre.h>
23 23
24 24
25static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly; 25static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
26static DEFINE_SPINLOCK(gre_proto_lock); 26static DEFINE_SPINLOCK(gre_proto_lock);
27 27
28int gre_add_protocol(const struct gre_protocol *proto, u8 version) 28int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
51 goto err_out; 51 goto err_out;
52 52
53 spin_lock(&gre_proto_lock); 53 spin_lock(&gre_proto_lock);
54 if (gre_proto[version] != proto) 54 if (rcu_dereference_protected(gre_proto[version],
55 lockdep_is_held(&gre_proto_lock)) != proto)
55 goto err_out_unlock; 56 goto err_out_unlock;
56 rcu_assign_pointer(gre_proto[version], NULL); 57 rcu_assign_pointer(gre_proto[version], NULL);
57 spin_unlock(&gre_proto_lock); 58 spin_unlock(&gre_proto_lock);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9ffa24b9a804..9e94d7cf4f8a 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly;
72#define node_height(x) x->avl_height 72#define node_height(x) x->avl_height
73 73
74#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) 74#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
75static const struct inet_peer peer_fake_node = { 76static const struct inet_peer peer_fake_node = {
76 .avl_left = peer_avl_empty, 77 .avl_left = peer_avl_empty_rcu,
77 .avl_right = peer_avl_empty, 78 .avl_right = peer_avl_empty_rcu,
78 .avl_height = 0 79 .avl_height = 0
79}; 80};
80 81
81static struct { 82static struct {
82 struct inet_peer *root; 83 struct inet_peer __rcu *root;
83 spinlock_t lock; 84 spinlock_t lock;
84 int total; 85 int total;
85} peers = { 86} peers = {
86 .root = peer_avl_empty, 87 .root = peer_avl_empty_rcu,
87 .lock = __SPIN_LOCK_UNLOCKED(peers.lock), 88 .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
88 .total = 0, 89 .total = 0,
89}; 90};
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p)
156 */ 157 */
157#define lookup(_daddr, _stack) \ 158#define lookup(_daddr, _stack) \
158({ \ 159({ \
159 struct inet_peer *u, **v; \ 160 struct inet_peer *u; \
161 struct inet_peer __rcu **v; \
160 \ 162 \
161 stackptr = _stack; \ 163 stackptr = _stack; \
162 *stackptr++ = &peers.root; \ 164 *stackptr++ = &peers.root; \
163 for (u = peers.root; u != peer_avl_empty; ) { \ 165 for (u = rcu_dereference_protected(peers.root, \
166 lockdep_is_held(&peers.lock)); \
167 u != peer_avl_empty; ) { \
164 if (_daddr == u->v4daddr) \ 168 if (_daddr == u->v4daddr) \
165 break; \ 169 break; \
166 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ 170 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p)
168 else \ 172 else \
169 v = &u->avl_right; \ 173 v = &u->avl_right; \
170 *stackptr++ = v; \ 174 *stackptr++ = v; \
171 u = *v; \ 175 u = rcu_dereference_protected(*v, \
176 lockdep_is_held(&peers.lock)); \
172 } \ 177 } \
173 u; \ 178 u; \
174}) 179})
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
209/* Called with local BH disabled and the pool lock held. */ 214/* Called with local BH disabled and the pool lock held. */
210#define lookup_rightempty(start) \ 215#define lookup_rightempty(start) \
211({ \ 216({ \
212 struct inet_peer *u, **v; \ 217 struct inet_peer *u; \
218 struct inet_peer __rcu **v; \
213 *stackptr++ = &start->avl_left; \ 219 *stackptr++ = &start->avl_left; \
214 v = &start->avl_left; \ 220 v = &start->avl_left; \
215 for (u = *v; u->avl_right != peer_avl_empty; ) { \ 221 for (u = rcu_dereference_protected(*v, \
222 lockdep_is_held(&peers.lock)); \
223 u->avl_right != peer_avl_empty_rcu; ) { \
216 v = &u->avl_right; \ 224 v = &u->avl_right; \
217 *stackptr++ = v; \ 225 *stackptr++ = v; \
218 u = *v; \ 226 u = rcu_dereference_protected(*v, \
227 lockdep_is_held(&peers.lock)); \
219 } \ 228 } \
220 u; \ 229 u; \
221}) 230})
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
224 * Variable names are the proof of operation correctness. 233 * Variable names are the proof of operation correctness.
225 * Look into mm/map_avl.c for more detail description of the ideas. 234 * Look into mm/map_avl.c for more detail description of the ideas.
226 */ 235 */
227static void peer_avl_rebalance(struct inet_peer **stack[], 236static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
228 struct inet_peer ***stackend) 237 struct inet_peer __rcu ***stackend)
229{ 238{
230 struct inet_peer **nodep, *node, *l, *r; 239 struct inet_peer __rcu **nodep;
240 struct inet_peer *node, *l, *r;
231 int lh, rh; 241 int lh, rh;
232 242
233 while (stackend > stack) { 243 while (stackend > stack) {
234 nodep = *--stackend; 244 nodep = *--stackend;
235 node = *nodep; 245 node = rcu_dereference_protected(*nodep,
236 l = node->avl_left; 246 lockdep_is_held(&peers.lock));
237 r = node->avl_right; 247 l = rcu_dereference_protected(node->avl_left,
248 lockdep_is_held(&peers.lock));
249 r = rcu_dereference_protected(node->avl_right,
250 lockdep_is_held(&peers.lock));
238 lh = node_height(l); 251 lh = node_height(l);
239 rh = node_height(r); 252 rh = node_height(r);
240 if (lh > rh + 1) { /* l: RH+2 */ 253 if (lh > rh + 1) { /* l: RH+2 */
241 struct inet_peer *ll, *lr, *lrl, *lrr; 254 struct inet_peer *ll, *lr, *lrl, *lrr;
242 int lrh; 255 int lrh;
243 ll = l->avl_left; 256 ll = rcu_dereference_protected(l->avl_left,
244 lr = l->avl_right; 257 lockdep_is_held(&peers.lock));
258 lr = rcu_dereference_protected(l->avl_right,
259 lockdep_is_held(&peers.lock));
245 lrh = node_height(lr); 260 lrh = node_height(lr);
246 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 261 if (lrh <= node_height(ll)) { /* ll: RH+1 */
247 node->avl_left = lr; /* lr: RH or RH+1 */ 262 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
248 node->avl_right = r; /* r: RH */ 263 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
249 node->avl_height = lrh + 1; /* RH+1 or RH+2 */ 264 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
250 l->avl_left = ll; /* ll: RH+1 */ 265 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
251 l->avl_right = node; /* node: RH+1 or RH+2 */ 266 RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
252 l->avl_height = node->avl_height + 1; 267 l->avl_height = node->avl_height + 1;
253 *nodep = l; 268 RCU_INIT_POINTER(*nodep, l);
254 } else { /* ll: RH, lr: RH+1 */ 269 } else { /* ll: RH, lr: RH+1 */
255 lrl = lr->avl_left; /* lrl: RH or RH-1 */ 270 lrl = rcu_dereference_protected(lr->avl_left,
256 lrr = lr->avl_right; /* lrr: RH or RH-1 */ 271 lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
257 node->avl_left = lrr; /* lrr: RH or RH-1 */ 272 lrr = rcu_dereference_protected(lr->avl_right,
258 node->avl_right = r; /* r: RH */ 273 lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
274 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
275 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
259 node->avl_height = rh + 1; /* node: RH+1 */ 276 node->avl_height = rh + 1; /* node: RH+1 */
260 l->avl_left = ll; /* ll: RH */ 277 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
261 l->avl_right = lrl; /* lrl: RH or RH-1 */ 278 RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
262 l->avl_height = rh + 1; /* l: RH+1 */ 279 l->avl_height = rh + 1; /* l: RH+1 */
263 lr->avl_left = l; /* l: RH+1 */ 280 RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
264 lr->avl_right = node; /* node: RH+1 */ 281 RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
265 lr->avl_height = rh + 2; 282 lr->avl_height = rh + 2;
266 *nodep = lr; 283 RCU_INIT_POINTER(*nodep, lr);
267 } 284 }
268 } else if (rh > lh + 1) { /* r: LH+2 */ 285 } else if (rh > lh + 1) { /* r: LH+2 */
269 struct inet_peer *rr, *rl, *rlr, *rll; 286 struct inet_peer *rr, *rl, *rlr, *rll;
270 int rlh; 287 int rlh;
271 rr = r->avl_right; 288 rr = rcu_dereference_protected(r->avl_right,
272 rl = r->avl_left; 289 lockdep_is_held(&peers.lock));
290 rl = rcu_dereference_protected(r->avl_left,
291 lockdep_is_held(&peers.lock));
273 rlh = node_height(rl); 292 rlh = node_height(rl);
274 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 293 if (rlh <= node_height(rr)) { /* rr: LH+1 */
275 node->avl_right = rl; /* rl: LH or LH+1 */ 294 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
276 node->avl_left = l; /* l: LH */ 295 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
277 node->avl_height = rlh + 1; /* LH+1 or LH+2 */ 296 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
278 r->avl_right = rr; /* rr: LH+1 */ 297 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
279 r->avl_left = node; /* node: LH+1 or LH+2 */ 298 RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
280 r->avl_height = node->avl_height + 1; 299 r->avl_height = node->avl_height + 1;
281 *nodep = r; 300 RCU_INIT_POINTER(*nodep, r);
282 } else { /* rr: RH, rl: RH+1 */ 301 } else { /* rr: RH, rl: RH+1 */
283 rlr = rl->avl_right; /* rlr: LH or LH-1 */ 302 rlr = rcu_dereference_protected(rl->avl_right,
284 rll = rl->avl_left; /* rll: LH or LH-1 */ 303 lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
285 node->avl_right = rll; /* rll: LH or LH-1 */ 304 rll = rcu_dereference_protected(rl->avl_left,
286 node->avl_left = l; /* l: LH */ 305 lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
306 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
307 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
287 node->avl_height = lh + 1; /* node: LH+1 */ 308 node->avl_height = lh + 1; /* node: LH+1 */
288 r->avl_right = rr; /* rr: LH */ 309 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
289 r->avl_left = rlr; /* rlr: LH or LH-1 */ 310 RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
290 r->avl_height = lh + 1; /* r: LH+1 */ 311 r->avl_height = lh + 1; /* r: LH+1 */
291 rl->avl_right = r; /* r: LH+1 */ 312 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
292 rl->avl_left = node; /* node: LH+1 */ 313 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
293 rl->avl_height = lh + 2; 314 rl->avl_height = lh + 2;
294 *nodep = rl; 315 RCU_INIT_POINTER(*nodep, rl);
295 } 316 }
296 } else { 317 } else {
297 node->avl_height = (lh > rh ? lh : rh) + 1; 318 node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
303#define link_to_pool(n) \ 324#define link_to_pool(n) \
304do { \ 325do { \
305 n->avl_height = 1; \ 326 n->avl_height = 1; \
306 n->avl_left = peer_avl_empty; \ 327 n->avl_left = peer_avl_empty_rcu; \
307 n->avl_right = peer_avl_empty; \ 328 n->avl_right = peer_avl_empty_rcu; \
308 smp_wmb(); /* lockless readers can catch us now */ \ 329 /* lockless readers can catch us now */ \
309 **--stackptr = n; \ 330 rcu_assign_pointer(**--stackptr, n); \
310 peer_avl_rebalance(stack, stackptr); \ 331 peer_avl_rebalance(stack, stackptr); \
311} while (0) 332} while (0)
312 333
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p)
330 * We use refcnt=-1 to alert lockless readers this entry is deleted. 351 * We use refcnt=-1 to alert lockless readers this entry is deleted.
331 */ 352 */
332 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 353 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
333 struct inet_peer **stack[PEER_MAXDEPTH]; 354 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
334 struct inet_peer ***stackptr, ***delp; 355 struct inet_peer __rcu ***stackptr, ***delp;
335 if (lookup(p->v4daddr, stack) != p) 356 if (lookup(p->v4daddr, stack) != p)
336 BUG(); 357 BUG();
337 delp = stackptr - 1; /* *delp[0] == p */ 358 delp = stackptr - 1; /* *delp[0] == p */
338 if (p->avl_left == peer_avl_empty) { 359 if (p->avl_left == peer_avl_empty_rcu) {
339 *delp[0] = p->avl_right; 360 *delp[0] = p->avl_right;
340 --stackptr; 361 --stackptr;
341 } else { 362 } else {
342 /* look for a node to insert instead of p */ 363 /* look for a node to insert instead of p */
343 struct inet_peer *t; 364 struct inet_peer *t;
344 t = lookup_rightempty(p); 365 t = lookup_rightempty(p);
345 BUG_ON(*stackptr[-1] != t); 366 BUG_ON(rcu_dereference_protected(*stackptr[-1],
367 lockdep_is_held(&peers.lock)) != t);
346 **--stackptr = t->avl_left; 368 **--stackptr = t->avl_left;
347 /* t is removed, t->v4daddr > x->v4daddr for any 369 /* t is removed, t->v4daddr > x->v4daddr for any
348 * x in p->avl_left subtree. 370 * x in p->avl_left subtree.
349 * Put t in the old place of p. */ 371 * Put t in the old place of p. */
350 *delp[0] = t; 372 RCU_INIT_POINTER(*delp[0], t);
351 t->avl_left = p->avl_left; 373 t->avl_left = p->avl_left;
352 t->avl_right = p->avl_right; 374 t->avl_right = p->avl_right;
353 t->avl_height = p->avl_height; 375 t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl)
414struct inet_peer *inet_getpeer(__be32 daddr, int create) 436struct inet_peer *inet_getpeer(__be32 daddr, int create)
415{ 437{
416 struct inet_peer *p; 438 struct inet_peer *p;
417 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 439 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
418 440
419 /* Look up for the address quickly, lockless. 441 /* Look up for the address quickly, lockless.
420 * Because of a concurrent writer, we might not find an existing entry. 442 * Because of a concurrent writer, we might not find an existing entry.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ffcbe369b7..01087e035b7d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1072 break; 1072 break;
1073 } 1073 }
1074 ipgre_tunnel_unlink(ign, t); 1074 ipgre_tunnel_unlink(ign, t);
1075 synchronize_net();
1075 t->parms.iph.saddr = p.iph.saddr; 1076 t->parms.iph.saddr = p.iph.saddr;
1076 t->parms.iph.daddr = p.iph.daddr; 1077 t->parms.iph.daddr = p.iph.daddr;
1077 t->parms.i_key = p.i_key; 1078 t->parms.i_key = p.i_key;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64b70ad162e3..3948c86e59ca 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
238 but receiver should be enough clever f.e. to forward mtrace requests, 238 but receiver should be enough clever f.e. to forward mtrace requests,
239 sent to multicast group to reach destination designated router. 239 sent to multicast group to reach destination designated router.
240 */ 240 */
241struct ip_ra_chain *ip_ra_chain; 241struct ip_ra_chain __rcu *ip_ra_chain;
242static DEFINE_SPINLOCK(ip_ra_lock); 242static DEFINE_SPINLOCK(ip_ra_lock);
243 243
244 244
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head)
253int ip_ra_control(struct sock *sk, unsigned char on, 253int ip_ra_control(struct sock *sk, unsigned char on,
254 void (*destructor)(struct sock *)) 254 void (*destructor)(struct sock *))
255{ 255{
256 struct ip_ra_chain *ra, *new_ra, **rap; 256 struct ip_ra_chain *ra, *new_ra;
257 struct ip_ra_chain __rcu **rap;
257 258
258 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) 259 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
259 return -EINVAL; 260 return -EINVAL;
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on,
261 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 262 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
262 263
263 spin_lock_bh(&ip_ra_lock); 264 spin_lock_bh(&ip_ra_lock);
264 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { 265 for (rap = &ip_ra_chain;
266 (ra = rcu_dereference_protected(*rap,
267 lockdep_is_held(&ip_ra_lock))) != NULL;
268 rap = &ra->next) {
265 if (ra->sk == sk) { 269 if (ra->sk == sk) {
266 if (on) { 270 if (on) {
267 spin_unlock_bh(&ip_ra_lock); 271 spin_unlock_bh(&ip_ra_lock);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e9b816e6cd73..cd300aaee78f 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
676 } 676 }
677 t = netdev_priv(dev); 677 t = netdev_priv(dev);
678 ipip_tunnel_unlink(ipn, t); 678 ipip_tunnel_unlink(ipn, t);
679 synchronize_net();
679 t->parms.iph.saddr = p.iph.saddr; 680 t->parms.iph.saddr = p.iph.saddr;
680 t->parms.iph.daddr = p.iph.daddr; 681 t->parms.iph.daddr = p.iph.daddr;
681 memcpy(dev->dev_addr, &p.iph.saddr, 4); 682 memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 65699c24411c..9ae5c01cd0b2 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,7 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <net/protocol.h> 29#include <net/protocol.h>
30 30
31const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly; 31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
32 32
33/* 33/*
34 * Add a protocol handler to the hash tables 34 * Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
38{ 38{
39 int hash = protocol & (MAX_INET_PROTOS - 1); 39 int hash = protocol & (MAX_INET_PROTOS - 1);
40 40
41 return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1; 41 return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
42 NULL, prot) ? 0 : -1;
42} 43}
43EXPORT_SYMBOL(inet_add_protocol); 44EXPORT_SYMBOL(inet_add_protocol);
44 45
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
50{ 51{
51 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 52 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
52 53
53 ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1; 54 ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
55 prot, NULL) == prot) ? 0 : -1;
54 56
55 synchronize_net(); 57 synchronize_net();
56 58
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d6cb2bfcd8e1..987bf9adb318 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
198 */ 198 */
199 199
200struct rt_hash_bucket { 200struct rt_hash_bucket {
201 struct rtable *chain; 201 struct rtable __rcu *chain;
202}; 202};
203 203
204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ 204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
280 struct rtable *r = NULL; 280 struct rtable *r = NULL;
281 281
282 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 282 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
283 if (!rt_hash_table[st->bucket].chain) 283 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
284 continue; 284 continue;
285 rcu_read_lock_bh(); 285 rcu_read_lock_bh();
286 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 286 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
300{ 300{
301 struct rt_cache_iter_state *st = seq->private; 301 struct rt_cache_iter_state *st = seq->private;
302 302
303 r = r->dst.rt_next; 303 r = rcu_dereference_bh(r->dst.rt_next);
304 while (!r) { 304 while (!r) {
305 rcu_read_unlock_bh(); 305 rcu_read_unlock_bh();
306 do { 306 do {
307 if (--st->bucket < 0) 307 if (--st->bucket < 0)
308 return NULL; 308 return NULL;
309 } while (!rt_hash_table[st->bucket].chain); 309 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
310 rcu_read_lock_bh(); 310 rcu_read_lock_bh();
311 r = rt_hash_table[st->bucket].chain; 311 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
312 } 312 }
313 return rcu_dereference_bh(r); 313 return r;
314} 314}
315 315
316static struct rtable *rt_cache_get_next(struct seq_file *seq, 316static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
721 for (i = 0; i <= rt_hash_mask; i++) { 721 for (i = 0; i <= rt_hash_mask; i++) {
722 if (process_context && need_resched()) 722 if (process_context && need_resched())
723 cond_resched(); 723 cond_resched();
724 rth = rt_hash_table[i].chain; 724 rth = rcu_dereference_raw(rt_hash_table[i].chain);
725 if (!rth) 725 if (!rth)
726 continue; 726 continue;
727 727
728 spin_lock_bh(rt_hash_lock_addr(i)); 728 spin_lock_bh(rt_hash_lock_addr(i));
729#ifdef CONFIG_NET_NS 729#ifdef CONFIG_NET_NS
730 { 730 {
731 struct rtable ** prev, * p; 731 struct rtable __rcu **prev;
732 struct rtable *p;
732 733
733 rth = rt_hash_table[i].chain; 734 rth = rcu_dereference_protected(rt_hash_table[i].chain,
735 lockdep_is_held(rt_hash_lock_addr(i)));
734 736
735 /* defer releasing the head of the list after spin_unlock */ 737 /* defer releasing the head of the list after spin_unlock */
736 for (tail = rth; tail; tail = tail->dst.rt_next) 738 for (tail = rth; tail;
739 tail = rcu_dereference_protected(tail->dst.rt_next,
740 lockdep_is_held(rt_hash_lock_addr(i))))
737 if (!rt_is_expired(tail)) 741 if (!rt_is_expired(tail))
738 break; 742 break;
739 if (rth != tail) 743 if (rth != tail)
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
741 745
742 /* call rt_free on entries after the tail requiring flush */ 746 /* call rt_free on entries after the tail requiring flush */
743 prev = &rt_hash_table[i].chain; 747 prev = &rt_hash_table[i].chain;
744 for (p = *prev; p; p = next) { 748 for (p = rcu_dereference_protected(*prev,
745 next = p->dst.rt_next; 749 lockdep_is_held(rt_hash_lock_addr(i)));
750 p != NULL;
751 p = next) {
752 next = rcu_dereference_protected(p->dst.rt_next,
753 lockdep_is_held(rt_hash_lock_addr(i)));
746 if (!rt_is_expired(p)) { 754 if (!rt_is_expired(p)) {
747 prev = &p->dst.rt_next; 755 prev = &p->dst.rt_next;
748 } else { 756 } else {
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
752 } 760 }
753 } 761 }
754#else 762#else
755 rth = rt_hash_table[i].chain; 763 rth = rcu_dereference_protected(rt_hash_table[i].chain,
756 rt_hash_table[i].chain = NULL; 764 lockdep_is_held(rt_hash_lock_addr(i)));
765 rcu_assign_pointer(rt_hash_table[i].chain, NULL);
757 tail = NULL; 766 tail = NULL;
758#endif 767#endif
759 spin_unlock_bh(rt_hash_lock_addr(i)); 768 spin_unlock_bh(rt_hash_lock_addr(i));
760 769
761 for (; rth != tail; rth = next) { 770 for (; rth != tail; rth = next) {
762 next = rth->dst.rt_next; 771 next = rcu_dereference_protected(rth->dst.rt_next, 1);
763 rt_free(rth); 772 rt_free(rth);
764 } 773 }
765 } 774 }
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
790 while (aux != rth) { 799 while (aux != rth) {
791 if (compare_hash_inputs(&aux->fl, &rth->fl)) 800 if (compare_hash_inputs(&aux->fl, &rth->fl))
792 return 0; 801 return 0;
793 aux = aux->dst.rt_next; 802 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
794 } 803 }
795 return ONE; 804 return ONE;
796} 805}
@@ -799,7 +808,8 @@ static void rt_check_expire(void)
799{ 808{
800 static unsigned int rover; 809 static unsigned int rover;
801 unsigned int i = rover, goal; 810 unsigned int i = rover, goal;
802 struct rtable *rth, **rthp; 811 struct rtable *rth;
812 struct rtable __rcu **rthp;
803 unsigned long samples = 0; 813 unsigned long samples = 0;
804 unsigned long sum = 0, sum2 = 0; 814 unsigned long sum = 0, sum2 = 0;
805 unsigned long delta; 815 unsigned long delta;
@@ -825,11 +835,12 @@ static void rt_check_expire(void)
825 835
826 samples++; 836 samples++;
827 837
828 if (*rthp == NULL) 838 if (rcu_dereference_raw(*rthp) == NULL)
829 continue; 839 continue;
830 length = 0; 840 length = 0;
831 spin_lock_bh(rt_hash_lock_addr(i)); 841 spin_lock_bh(rt_hash_lock_addr(i));
832 while ((rth = *rthp) != NULL) { 842 while ((rth = rcu_dereference_protected(*rthp,
843 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
833 prefetch(rth->dst.rt_next); 844 prefetch(rth->dst.rt_next);
834 if (rt_is_expired(rth)) { 845 if (rt_is_expired(rth)) {
835 *rthp = rth->dst.rt_next; 846 *rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
941 static unsigned long last_gc; 952 static unsigned long last_gc;
942 static int rover; 953 static int rover;
943 static int equilibrium; 954 static int equilibrium;
944 struct rtable *rth, **rthp; 955 struct rtable *rth;
956 struct rtable __rcu **rthp;
945 unsigned long now = jiffies; 957 unsigned long now = jiffies;
946 int goal; 958 int goal;
947 int entries = dst_entries_get_fast(&ipv4_dst_ops); 959 int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
995 k = (k + 1) & rt_hash_mask; 1007 k = (k + 1) & rt_hash_mask;
996 rthp = &rt_hash_table[k].chain; 1008 rthp = &rt_hash_table[k].chain;
997 spin_lock_bh(rt_hash_lock_addr(k)); 1009 spin_lock_bh(rt_hash_lock_addr(k));
998 while ((rth = *rthp) != NULL) { 1010 while ((rth = rcu_dereference_protected(*rthp,
1011 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
999 if (!rt_is_expired(rth) && 1012 if (!rt_is_expired(rth) &&
1000 !rt_may_expire(rth, tmo, expire)) { 1013 !rt_may_expire(rth, tmo, expire)) {
1001 tmo >>= 1; 1014 tmo >>= 1;
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
1071 1084
1072 while (rth) { 1085 while (rth) {
1073 length += has_noalias(head, rth); 1086 length += has_noalias(head, rth);
1074 rth = rth->dst.rt_next; 1087 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1075 } 1088 }
1076 return length >> FRACT_BITS; 1089 return length >> FRACT_BITS;
1077} 1090}
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
1079static int rt_intern_hash(unsigned hash, struct rtable *rt, 1092static int rt_intern_hash(unsigned hash, struct rtable *rt,
1080 struct rtable **rp, struct sk_buff *skb, int ifindex) 1093 struct rtable **rp, struct sk_buff *skb, int ifindex)
1081{ 1094{
1082 struct rtable *rth, **rthp; 1095 struct rtable *rth, *cand;
1096 struct rtable __rcu **rthp, **candp;
1083 unsigned long now; 1097 unsigned long now;
1084 struct rtable *cand, **candp;
1085 u32 min_score; 1098 u32 min_score;
1086 int chain_length; 1099 int chain_length;
1087 int attempts = !in_softirq(); 1100 int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@ restart:
1128 rthp = &rt_hash_table[hash].chain; 1141 rthp = &rt_hash_table[hash].chain;
1129 1142
1130 spin_lock_bh(rt_hash_lock_addr(hash)); 1143 spin_lock_bh(rt_hash_lock_addr(hash));
1131 while ((rth = *rthp) != NULL) { 1144 while ((rth = rcu_dereference_protected(*rthp,
1145 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1132 if (rt_is_expired(rth)) { 1146 if (rt_is_expired(rth)) {
1133 *rthp = rth->dst.rt_next; 1147 *rthp = rth->dst.rt_next;
1134 rt_free(rth); 1148 rt_free(rth);
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
1324 1338
1325static void rt_del(unsigned hash, struct rtable *rt) 1339static void rt_del(unsigned hash, struct rtable *rt)
1326{ 1340{
1327 struct rtable **rthp, *aux; 1341 struct rtable __rcu **rthp;
1342 struct rtable *aux;
1328 1343
1329 rthp = &rt_hash_table[hash].chain; 1344 rthp = &rt_hash_table[hash].chain;
1330 spin_lock_bh(rt_hash_lock_addr(hash)); 1345 spin_lock_bh(rt_hash_lock_addr(hash));
1331 ip_rt_put(rt); 1346 ip_rt_put(rt);
1332 while ((aux = *rthp) != NULL) { 1347 while ((aux = rcu_dereference_protected(*rthp,
1348 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1333 if (aux == rt || rt_is_expired(aux)) { 1349 if (aux == rt || rt_is_expired(aux)) {
1334 *rthp = aux->dst.rt_next; 1350 *rthp = aux->dst.rt_next;
1335 rt_free(aux); 1351 rt_free(aux);
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1346{ 1362{
1347 int i, k; 1363 int i, k;
1348 struct in_device *in_dev = __in_dev_get_rcu(dev); 1364 struct in_device *in_dev = __in_dev_get_rcu(dev);
1349 struct rtable *rth, **rthp; 1365 struct rtable *rth;
1366 struct rtable __rcu **rthp;
1350 __be32 skeys[2] = { saddr, 0 }; 1367 __be32 skeys[2] = { saddr, 0 };
1351 int ikeys[2] = { dev->ifindex, 0 }; 1368 int ikeys[2] = { dev->ifindex, 0 };
1352 struct netevent_redirect netevent; 1369 struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1379 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], 1396 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1380 rt_genid(net)); 1397 rt_genid(net));
1381 1398
1382 rthp=&rt_hash_table[hash].chain; 1399 rthp = &rt_hash_table[hash].chain;
1383 1400
1384 while ((rth = rcu_dereference(*rthp)) != NULL) { 1401 while ((rth = rcu_dereference(*rthp)) != NULL) {
1385 struct rtable *rt; 1402 struct rtable *rt;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 9a17bd2a0a37..ac3b3ee4b07c 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,27 +14,32 @@
14#include <net/protocol.h> 14#include <net/protocol.h>
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16 16
17static struct xfrm_tunnel *tunnel4_handlers __read_mostly; 17static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
18static struct xfrm_tunnel *tunnel64_handlers __read_mostly; 18static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
19static DEFINE_MUTEX(tunnel4_mutex); 19static DEFINE_MUTEX(tunnel4_mutex);
20 20
21static inline struct xfrm_tunnel **fam_handlers(unsigned short family) 21static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
22{ 22{
23 return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; 23 return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
24} 24}
25 25
26int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) 26int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
27{ 27{
28 struct xfrm_tunnel **pprev; 28 struct xfrm_tunnel __rcu **pprev;
29 struct xfrm_tunnel *t;
30
29 int ret = -EEXIST; 31 int ret = -EEXIST;
30 int priority = handler->priority; 32 int priority = handler->priority;
31 33
32 mutex_lock(&tunnel4_mutex); 34 mutex_lock(&tunnel4_mutex);
33 35
34 for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { 36 for (pprev = fam_handlers(family);
35 if ((*pprev)->priority > priority) 37 (t = rcu_dereference_protected(*pprev,
38 lockdep_is_held(&tunnel4_mutex))) != NULL;
39 pprev = &t->next) {
40 if (t->priority > priority)
36 break; 41 break;
37 if ((*pprev)->priority == priority) 42 if (t->priority == priority)
38 goto err; 43 goto err;
39 } 44 }
40 45
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register);
52 57
53int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) 58int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
54{ 59{
55 struct xfrm_tunnel **pprev; 60 struct xfrm_tunnel __rcu **pprev;
61 struct xfrm_tunnel *t;
56 int ret = -ENOENT; 62 int ret = -ENOENT;
57 63
58 mutex_lock(&tunnel4_mutex); 64 mutex_lock(&tunnel4_mutex);
59 65
60 for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { 66 for (pprev = fam_handlers(family);
61 if (*pprev == handler) { 67 (t = rcu_dereference_protected(*pprev,
68 lockdep_is_held(&tunnel4_mutex))) != NULL;
69 pprev = &t->next) {
70 if (t == handler) {
62 *pprev = handler->next; 71 *pprev = handler->next;
63 ret = 0; 72 ret = 0;
64 break; 73 break;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b3f7e8cf18ac..28cb2d733a3c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1413 } 1413 }
1414 } 1414 }
1415 1415
1416 if (sk->sk_filter) { 1416 if (rcu_dereference_raw(sk->sk_filter)) {
1417 if (udp_lib_checksum_complete(skb)) 1417 if (udp_lib_checksum_complete(skb))
1418 goto drop; 1418 goto drop;
1419 } 1419 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ec7a91d9e865..e048ec62d109 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
836{ 836{
837 struct inet6_dev *idev = ifp->idev; 837 struct inet6_dev *idev = ifp->idev;
838 struct in6_addr addr, *tmpaddr; 838 struct in6_addr addr, *tmpaddr;
839 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp; 839 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
840 unsigned long regen_advance; 840 unsigned long regen_advance;
841 int tmp_plen; 841 int tmp_plen;
842 int ret = 0; 842 int ret = 0;
@@ -886,12 +886,13 @@ retry:
886 goto out; 886 goto out;
887 } 887 }
888 memcpy(&addr.s6_addr[8], idev->rndid, 8); 888 memcpy(&addr.s6_addr[8], idev->rndid, 8);
889 age = (jiffies - ifp->tstamp) / HZ;
889 tmp_valid_lft = min_t(__u32, 890 tmp_valid_lft = min_t(__u32,
890 ifp->valid_lft, 891 ifp->valid_lft,
891 idev->cnf.temp_valid_lft); 892 idev->cnf.temp_valid_lft + age);
892 tmp_prefered_lft = min_t(__u32, 893 tmp_prefered_lft = min_t(__u32,
893 ifp->prefered_lft, 894 ifp->prefered_lft,
894 idev->cnf.temp_prefered_lft - 895 idev->cnf.temp_prefered_lft + age -
895 idev->cnf.max_desync_factor); 896 idev->cnf.max_desync_factor);
896 tmp_plen = ifp->prefix_len; 897 tmp_plen = ifp->prefix_len;
897 max_addresses = idev->cnf.max_addresses; 898 max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1426{ 1427{
1427 struct inet6_dev *idev = ifp->idev; 1428 struct inet6_dev *idev = ifp->idev;
1428 1429
1429 if (addrconf_dad_end(ifp)) 1430 if (addrconf_dad_end(ifp)) {
1431 in6_ifa_put(ifp);
1430 return; 1432 return;
1433 }
1431 1434
1432 if (net_ratelimit()) 1435 if (net_ratelimit())
1433 printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", 1436 printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@ ok:
2021 ipv6_ifa_notify(0, ift); 2024 ipv6_ifa_notify(0, ift);
2022 } 2025 }
2023 2026
2024 if (create && in6_dev->cnf.use_tempaddr > 0) { 2027 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2025 /* 2028 /*
2026 * When a new public address is created as described in [ADDRCONF], 2029 * When a new public address is created as described in [ADDRCONF],
2027 * also create a new temporary address. 2030 * also create a new temporary address. Also create a temporary
2031 * address if it's enabled but no temporary address currently exists.
2028 */ 2032 */
2029 read_unlock_bh(&in6_dev->lock); 2033 read_unlock_bh(&in6_dev->lock);
2030 ipv6_create_tempaddr(ifp, NULL); 2034 ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c2c0f89397b1..2a59610c2a58 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1284 t = netdev_priv(dev); 1284 t = netdev_priv(dev);
1285 1285
1286 ip6_tnl_unlink(ip6n, t); 1286 ip6_tnl_unlink(ip6n, t);
1287 synchronize_net();
1287 err = ip6_tnl_change(t, &p); 1288 err = ip6_tnl_change(t, &p);
1288 ip6_tnl_link(ip6n, t); 1289 ip6_tnl_link(ip6n, t);
1289 netdev_state_change(dev); 1290 netdev_state_change(dev);
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
1371 dev->flags |= IFF_NOARP; 1372 dev->flags |= IFF_NOARP;
1372 dev->addr_len = sizeof(struct in6_addr); 1373 dev->addr_len = sizeof(struct in6_addr);
1373 dev->features |= NETIF_F_NETNS_LOCAL; 1374 dev->features |= NETIF_F_NETNS_LOCAL;
1375 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1374} 1376}
1375 1377
1376 1378
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0553867a317f..d1770e061c08 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
343 break; 343 break;
344 344
345 case IPV6_TRANSPARENT: 345 case IPV6_TRANSPARENT:
346 if (!capable(CAP_NET_ADMIN)) {
347 retv = -EPERM;
348 break;
349 }
346 if (optlen < sizeof(int)) 350 if (optlen < sizeof(int))
347 goto e_inval; 351 goto e_inval;
348 /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ 352 /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 44d2eeac089b..448464844a25 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -5,10 +5,15 @@
5menu "IPv6: Netfilter Configuration" 5menu "IPv6: Netfilter Configuration"
6 depends on INET && IPV6 && NETFILTER 6 depends on INET && IPV6 && NETFILTER
7 7
8config NF_DEFRAG_IPV6
9 tristate
10 default n
11
8config NF_CONNTRACK_IPV6 12config NF_CONNTRACK_IPV6
9 tristate "IPv6 connection tracking support" 13 tristate "IPv6 connection tracking support"
10 depends on INET && IPV6 && NF_CONNTRACK 14 depends on INET && IPV6 && NF_CONNTRACK
11 default m if NETFILTER_ADVANCED=n 15 default m if NETFILTER_ADVANCED=n
16 select NF_DEFRAG_IPV6
12 ---help--- 17 ---help---
13 Connection tracking keeps a record of what packets have passed 18 Connection tracking keeps a record of what packets have passed
14 through your machine, in order to figure out how they are related 19 through your machine, in order to figure out how they are related
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 3f8e4a3d83ce..0a432c9b0795 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
12 12
13# objects for l3 independent conntrack 13# objects for l3 independent conntrack
14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o 14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
15nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
16 15
17# l3 independent conntrack 16# l3 independent conntrack
18obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o 17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
19 18
19# defrag
20nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
21obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
22
20# matches 23# matches
21obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o 24obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
22obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o 25obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 489d71b844ac..3a3f129a44cb 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void)
625 inet_frags_init_net(&nf_init_frags); 625 inet_frags_init_net(&nf_init_frags);
626 inet_frags_init(&nf_frags); 626 inet_frags_init(&nf_frags);
627 627
628#ifdef CONFIG_SYSCTL
628 nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, 629 nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
629 nf_ct_frag6_sysctl_table); 630 nf_ct_frag6_sysctl_table);
630 if (!nf_ct_frag6_sysctl_header) { 631 if (!nf_ct_frag6_sysctl_header) {
631 inet_frags_fini(&nf_frags); 632 inet_frags_fini(&nf_frags);
632 return -ENOMEM; 633 return -ENOMEM;
633 } 634 }
635#endif
634 636
635 return 0; 637 return 0;
636} 638}
637 639
638void nf_ct_frag6_cleanup(void) 640void nf_ct_frag6_cleanup(void)
639{ 641{
642#ifdef CONFIG_SYSCTL
640 unregister_sysctl_table(nf_ct_frag6_sysctl_header); 643 unregister_sysctl_table(nf_ct_frag6_sysctl_header);
641 nf_ct_frag6_sysctl_header = NULL; 644 nf_ct_frag6_sysctl_header = NULL;
642 645#endif
643 inet_frags_fini(&nf_frags); 646 inet_frags_fini(&nf_frags);
644 647
645 nf_init_frags.low_thresh = 0; 648 nf_init_frags.low_thresh = 0;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9bb936ae2452..9a7978fdc02a 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,13 +25,14 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27 27
28const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly; 28const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
29 29
30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
31{ 31{
32 int hash = protocol & (MAX_INET_PROTOS - 1); 32 int hash = protocol & (MAX_INET_PROTOS - 1);
33 33
34 return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1; 34 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
35 NULL, prot) ? 0 : -1;
35} 36}
36EXPORT_SYMBOL(inet6_add_protocol); 37EXPORT_SYMBOL(inet6_add_protocol);
37 38
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
43{ 44{
44 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 45 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
45 46
46 ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1; 47 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
48 prot, NULL) == prot) ? 0 : -1;
47 49
48 synchronize_net(); 50 synchronize_net();
49 51
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 45e6efb7f171..86c39526ba5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
373 373
374static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 374static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
375{ 375{
376 if ((raw6_sk(sk)->checksum || sk->sk_filter) && 376 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
377 skb_checksum_complete(skb)) { 377 skb_checksum_complete(skb)) {
378 atomic_inc(&sk->sk_drops); 378 atomic_inc(&sk->sk_drops);
379 kfree_skb(skb); 379 kfree_skb(skb);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 367a6cc584cc..d6bfaec3bbbf 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
963 } 963 }
964 t = netdev_priv(dev); 964 t = netdev_priv(dev);
965 ipip6_tunnel_unlink(sitn, t); 965 ipip6_tunnel_unlink(sitn, t);
966 synchronize_net();
966 t->parms.iph.saddr = p.iph.saddr; 967 t->parms.iph.saddr = p.iph.saddr;
967 t->parms.iph.daddr = p.iph.daddr; 968 t->parms.iph.daddr = p.iph.daddr;
968 memcpy(dev->dev_addr, &p.iph.saddr, 4); 969 memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index d9864725d0c6..4f3cec12aa85 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,23 +30,26 @@
30#include <net/protocol.h> 30#include <net/protocol.h>
31#include <net/xfrm.h> 31#include <net/xfrm.h>
32 32
33static struct xfrm6_tunnel *tunnel6_handlers __read_mostly; 33static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
34static struct xfrm6_tunnel *tunnel46_handlers __read_mostly; 34static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
35static DEFINE_MUTEX(tunnel6_mutex); 35static DEFINE_MUTEX(tunnel6_mutex);
36 36
37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) 37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
38{ 38{
39 struct xfrm6_tunnel **pprev; 39 struct xfrm6_tunnel __rcu **pprev;
40 struct xfrm6_tunnel *t;
40 int ret = -EEXIST; 41 int ret = -EEXIST;
41 int priority = handler->priority; 42 int priority = handler->priority;
42 43
43 mutex_lock(&tunnel6_mutex); 44 mutex_lock(&tunnel6_mutex);
44 45
45 for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; 46 for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
46 *pprev; pprev = &(*pprev)->next) { 47 (t = rcu_dereference_protected(*pprev,
47 if ((*pprev)->priority > priority) 48 lockdep_is_held(&tunnel6_mutex))) != NULL;
49 pprev = &t->next) {
50 if (t->priority > priority)
48 break; 51 break;
49 if ((*pprev)->priority == priority) 52 if (t->priority == priority)
50 goto err; 53 goto err;
51 } 54 }
52 55
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register);
65 68
66int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) 69int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
67{ 70{
68 struct xfrm6_tunnel **pprev; 71 struct xfrm6_tunnel __rcu **pprev;
72 struct xfrm6_tunnel *t;
69 int ret = -ENOENT; 73 int ret = -ENOENT;
70 74
71 mutex_lock(&tunnel6_mutex); 75 mutex_lock(&tunnel6_mutex);
72 76
73 for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers; 77 for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
74 *pprev; pprev = &(*pprev)->next) { 78 (t = rcu_dereference_protected(*pprev,
75 if (*pprev == handler) { 79 lockdep_is_held(&tunnel6_mutex))) != NULL;
80 pprev = &t->next) {
81 if (t == handler) {
76 *pprev = handler->next; 82 *pprev = handler->next;
77 ret = 0; 83 ret = 0;
78 break; 84 break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c84dad432114..91def93bec85 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
527 } 527 }
528 } 528 }
529 529
530 if (sk->sk_filter) { 530 if (rcu_dereference_raw(sk->sk_filter)) {
531 if (udp_lib_checksum_complete(skb)) 531 if (udp_lib_checksum_complete(skb))
532 goto drop; 532 goto drop;
533 } 533 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1712af1c7b3f..c64ce0a0bb03 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,10 @@ struct l2tp_net {
111 spinlock_t l2tp_session_hlist_lock; 111 spinlock_t l2tp_session_hlist_lock;
112}; 112};
113 113
114static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
115static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
116static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
117
114static inline struct l2tp_net *l2tp_pernet(struct net *net) 118static inline struct l2tp_net *l2tp_pernet(struct net *net)
115{ 119{
116 BUG_ON(!net); 120 BUG_ON(!net);
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
118 return net_generic(net, l2tp_net_id); 122 return net_generic(net, l2tp_net_id);
119} 123}
120 124
125
126/* Tunnel reference counts. Incremented per session that is added to
127 * the tunnel.
128 */
129static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
130{
131 atomic_inc(&tunnel->ref_count);
132}
133
134static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
135{
136 if (atomic_dec_and_test(&tunnel->ref_count))
137 l2tp_tunnel_free(tunnel);
138}
139#ifdef L2TP_REFCNT_DEBUG
140#define l2tp_tunnel_inc_refcount(_t) do { \
141 printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
142 l2tp_tunnel_inc_refcount_1(_t); \
143 } while (0)
144#define l2tp_tunnel_dec_refcount(_t) do { \
145 printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
146 l2tp_tunnel_dec_refcount_1(_t); \
147 } while (0)
148#else
149#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
150#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
151#endif
152
121/* Session hash global list for L2TPv3. 153/* Session hash global list for L2TPv3.
122 * The session_id SHOULD be random according to RFC3931, but several 154 * The session_id SHOULD be random according to RFC3931, but several
123 * L2TP implementations use incrementing session_ids. So we do a real 155 * L2TP implementations use incrementing session_ids. So we do a real
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common);
699 * Returns 1 if the packet was not a good data packet and could not be 731 * Returns 1 if the packet was not a good data packet and could not be
700 * forwarded. All such packets are passed up to userspace to deal with. 732 * forwarded. All such packets are passed up to userspace to deal with.
701 */ 733 */
702int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, 734static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
703 int (*payload_hook)(struct sk_buff *skb)) 735 int (*payload_hook)(struct sk_buff *skb))
704{ 736{
705 struct l2tp_session *session = NULL; 737 struct l2tp_session *session = NULL;
706 unsigned char *ptr, *optr; 738 unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@ error:
812 844
813 return 1; 845 return 1;
814} 846}
815EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
816 847
817/* UDP encapsulation receive handler. See net/ipv4/udp.c. 848/* UDP encapsulation receive handler. See net/ipv4/udp.c.
818 * Return codes: 849 * Return codes:
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
922 return bufp - optr; 953 return bufp - optr;
923} 954}
924 955
925int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len) 956static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
957 size_t data_len)
926{ 958{
927 struct l2tp_tunnel *tunnel = session->tunnel; 959 struct l2tp_tunnel *tunnel = session->tunnel;
928 unsigned int len = skb->len; 960 unsigned int len = skb->len;
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat
970 1002
971 return 0; 1003 return 0;
972} 1004}
973EXPORT_SYMBOL_GPL(l2tp_xmit_core);
974 1005
975/* Automatically called when the skb is freed. 1006/* Automatically called when the skb is freed.
976 */ 1007 */
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1089 * The tunnel context is deleted only when all session sockets have been 1120 * The tunnel context is deleted only when all session sockets have been
1090 * closed. 1121 * closed.
1091 */ 1122 */
1092void l2tp_tunnel_destruct(struct sock *sk) 1123static void l2tp_tunnel_destruct(struct sock *sk)
1093{ 1124{
1094 struct l2tp_tunnel *tunnel; 1125 struct l2tp_tunnel *tunnel;
1095 1126
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk)
1128end: 1159end:
1129 return; 1160 return;
1130} 1161}
1131EXPORT_SYMBOL(l2tp_tunnel_destruct);
1132 1162
1133/* When the tunnel is closed, all the attached sessions need to go too. 1163/* When the tunnel is closed, all the attached sessions need to go too.
1134 */ 1164 */
1135void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1165static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1136{ 1166{
1137 int hash; 1167 int hash;
1138 struct hlist_node *walk; 1168 struct hlist_node *walk;
@@ -1193,12 +1223,11 @@ again:
1193 } 1223 }
1194 write_unlock_bh(&tunnel->hlist_lock); 1224 write_unlock_bh(&tunnel->hlist_lock);
1195} 1225}
1196EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1197 1226
1198/* Really kill the tunnel. 1227/* Really kill the tunnel.
1199 * Come here only when all sessions have been cleared from the tunnel. 1228 * Come here only when all sessions have been cleared from the tunnel.
1200 */ 1229 */
1201void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) 1230static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1202{ 1231{
1203 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1232 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1204 1233
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1217 atomic_dec(&l2tp_tunnel_count); 1246 atomic_dec(&l2tp_tunnel_count);
1218 kfree(tunnel); 1247 kfree(tunnel);
1219} 1248}
1220EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
1221 1249
1222/* Create a socket for the tunnel, if one isn't set up by 1250/* Create a socket for the tunnel, if one isn't set up by
1223 * userspace. This is used for static tunnels where there is no 1251 * userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
1512/* We come here whenever a session's send_seq, cookie_len or 1540/* We come here whenever a session's send_seq, cookie_len or
1513 * l2specific_len parameters are set. 1541 * l2specific_len parameters are set.
1514 */ 1542 */
1515void l2tp_session_set_header_len(struct l2tp_session *session, int version) 1543static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1516{ 1544{
1517 if (version == L2TP_HDR_VER_2) { 1545 if (version == L2TP_HDR_VER_2) {
1518 session->hdr_len = 6; 1546 session->hdr_len = 6;
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1525 } 1553 }
1526 1554
1527} 1555}
1528EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1529 1556
1530struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1557struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1531{ 1558{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index f0f318edd3f1..a16a48e79fab 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i
231extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 231extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
232extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); 232extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
233extern int l2tp_session_delete(struct l2tp_session *session); 233extern int l2tp_session_delete(struct l2tp_session *session);
234extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
235extern void l2tp_session_free(struct l2tp_session *session); 234extern void l2tp_session_free(struct l2tp_session *session);
236extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); 235extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
237extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
238extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 236extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
239 237
240extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
241extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); 238extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
242extern void l2tp_tunnel_destruct(struct sock *sk);
243extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
244extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
245 239
246extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); 240extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
247extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); 241extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
248 242
249/* Tunnel reference counts. Incremented per session that is added to
250 * the tunnel.
251 */
252static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
253{
254 atomic_inc(&tunnel->ref_count);
255}
256
257static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
258{
259 if (atomic_dec_and_test(&tunnel->ref_count))
260 l2tp_tunnel_free(tunnel);
261}
262#ifdef L2TP_REFCNT_DEBUG
263#define l2tp_tunnel_inc_refcount(_t) do { \
264 printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
265 l2tp_tunnel_inc_refcount_1(_t); \
266 } while (0)
267#define l2tp_tunnel_dec_refcount(_t) do { \
268 printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
269 l2tp_tunnel_dec_refcount_1(_t); \
270 } while (0)
271#else
272#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
273#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
274#endif
275
276/* Session reference counts. Incremented when code obtains a reference 243/* Session reference counts. Incremented when code obtains a reference
277 * to a session. 244 * to a session.
278 */ 245 */
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 1c770c0644d1..0bf6a59545ab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -576,7 +576,7 @@ out:
576 return copied; 576 return copied;
577} 577}
578 578
579struct proto l2tp_ip_prot = { 579static struct proto l2tp_ip_prot = {
580 .name = "L2TP/IP", 580 .name = "L2TP/IP",
581 .owner = THIS_MODULE, 581 .owner = THIS_MODULE,
582 .init = l2tp_ip_open, 582 .init = l2tp_ip_open,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ff60c022f51d..239c4836a946 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -456,6 +456,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
456 if (!sta) 456 if (!sta)
457 return NULL; 457 return NULL;
458 458
459 sta->last_rx = jiffies;
459 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 460 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
460 461
461 /* make sure mandatory rates are always added */ 462 /* make sure mandatory rates are always added */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22bc42b18991..6b322fa681f5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -748,7 +748,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
748 hw->queues = IEEE80211_MAX_QUEUES; 748 hw->queues = IEEE80211_MAX_QUEUES;
749 749
750 local->workqueue = 750 local->workqueue =
751 create_singlethread_workqueue(wiphy_name(local->hw.wiphy)); 751 alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
752 if (!local->workqueue) { 752 if (!local->workqueue) {
753 result = -ENOMEM; 753 result = -ENOMEM;
754 goto fail_workqueue; 754 goto fail_workqueue;
@@ -962,12 +962,6 @@ static void __exit ieee80211_exit(void)
962 rc80211_minstrel_ht_exit(); 962 rc80211_minstrel_ht_exit();
963 rc80211_minstrel_exit(); 963 rc80211_minstrel_exit();
964 964
965 /*
966 * For key todo, it'll be empty by now but the work
967 * might still be scheduled.
968 */
969 flush_scheduled_work();
970
971 if (mesh_allocated) 965 if (mesh_allocated)
972 ieee80211s_stop(); 966 ieee80211s_stop();
973 967
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 809cf230d251..33f76993da08 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -329,6 +329,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
329 * if needed. 329 * if needed.
330 */ 330 */
331 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 331 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
332 /* Skip invalid rates */
333 if (info->control.rates[i].idx < 0)
334 break;
332 /* Rate masking supports only legacy rates for now */ 335 /* Rate masking supports only legacy rates for now */
333 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 336 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
334 continue; 337 continue;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 43288259f4a1..1534f2b44caf 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY
525 depends on NETFILTER_XTABLES 525 depends on NETFILTER_XTABLES
526 depends on NETFILTER_ADVANCED 526 depends on NETFILTER_ADVANCED
527 select NF_DEFRAG_IPV4 527 select NF_DEFRAG_IPV4
528 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
528 help 529 help
529 This option adds a `TPROXY' target, which is somewhat similar to 530 This option adds a `TPROXY' target, which is somewhat similar to
530 REDIRECT. It can only be used in the mangle table and is useful 531 REDIRECT. It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET
927 depends on NETFILTER_ADVANCED 928 depends on NETFILTER_ADVANCED
928 depends on !NF_CONNTRACK || NF_CONNTRACK 929 depends on !NF_CONNTRACK || NF_CONNTRACK
929 select NF_DEFRAG_IPV4 930 select NF_DEFRAG_IPV4
931 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
930 help 932 help
931 This option adds a `socket' match, which can be used to match 933 This option adds a `socket' match, which can be used to match
932 packets for which a TCP or UDP socket lookup finds a valid socket. 934 packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 19c482caf30b..640678f47a2a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -21,7 +21,9 @@
21#include <linux/netfilter_ipv4/ip_tables.h> 21#include <linux/netfilter_ipv4/ip_tables.h>
22 22
23#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 23#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
24#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 24
25#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
26#define XT_TPROXY_HAVE_IPV6 1
25#include <net/if_inet6.h> 27#include <net/if_inet6.h>
26#include <net/addrconf.h> 28#include <net/addrconf.h>
27#include <linux/netfilter_ipv6/ip6_tables.h> 29#include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
172 return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); 174 return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
173} 175}
174 176
175#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 177#ifdef XT_TPROXY_HAVE_IPV6
176 178
177static inline const struct in6_addr * 179static inline const struct in6_addr *
178tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, 180tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
372 .hooks = 1 << NF_INET_PRE_ROUTING, 374 .hooks = 1 << NF_INET_PRE_ROUTING,
373 .me = THIS_MODULE, 375 .me = THIS_MODULE,
374 }, 376 },
375#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 377#ifdef XT_TPROXY_HAVE_IPV6
376 { 378 {
377 .name = "TPROXY", 379 .name = "TPROXY",
378 .family = NFPROTO_IPV6, 380 .family = NFPROTO_IPV6,
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
391static int __init tproxy_tg_init(void) 393static int __init tproxy_tg_init(void)
392{ 394{
393 nf_defrag_ipv4_enable(); 395 nf_defrag_ipv4_enable();
394#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 396#ifdef XT_TPROXY_HAVE_IPV6
395 nf_defrag_ipv6_enable(); 397 nf_defrag_ipv6_enable();
396#endif 398#endif
397 399
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 2dbd4c857735..d94a858dc52a 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -14,7 +14,6 @@
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ip_tables.h> 16#include <linux/netfilter_ipv4/ip_tables.h>
17#include <linux/netfilter_ipv6/ip6_tables.h>
18#include <net/tcp.h> 17#include <net/tcp.h>
19#include <net/udp.h> 18#include <net/udp.h>
20#include <net/icmp.h> 19#include <net/icmp.h>
@@ -22,7 +21,12 @@
22#include <net/inet_sock.h> 21#include <net/inet_sock.h>
23#include <net/netfilter/nf_tproxy_core.h> 22#include <net/netfilter/nf_tproxy_core.h>
24#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 23#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
24
25#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
26#define XT_SOCKET_HAVE_IPV6 1
27#include <linux/netfilter_ipv6/ip6_tables.h>
25#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 28#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
29#endif
26 30
27#include <linux/netfilter/xt_socket.h> 31#include <linux/netfilter/xt_socket.h>
28 32
@@ -186,7 +190,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
186 return socket_match(skb, par, par->matchinfo); 190 return socket_match(skb, par, par->matchinfo);
187} 191}
188 192
189#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 193#ifdef XT_SOCKET_HAVE_IPV6
190 194
191static int 195static int
192extract_icmp6_fields(const struct sk_buff *skb, 196extract_icmp6_fields(const struct sk_buff *skb,
@@ -331,7 +335,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
331 (1 << NF_INET_LOCAL_IN), 335 (1 << NF_INET_LOCAL_IN),
332 .me = THIS_MODULE, 336 .me = THIS_MODULE,
333 }, 337 },
334#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 338#ifdef XT_SOCKET_HAVE_IPV6
335 { 339 {
336 .name = "socket", 340 .name = "socket",
337 .revision = 1, 341 .revision = 1,
@@ -348,7 +352,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
348static int __init socket_mt_init(void) 352static int __init socket_mt_init(void)
349{ 353{
350 nf_defrag_ipv4_enable(); 354 nf_defrag_ipv4_enable();
351#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 355#ifdef XT_SOCKET_HAVE_IPV6
352 nf_defrag_ipv6_enable(); 356 nf_defrag_ipv6_enable();
353#endif 357#endif
354 358
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index cd96ed3ccee4..478181d53c55 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -83,9 +83,9 @@ struct netlink_sock {
83 struct module *module; 83 struct module *module;
84}; 84};
85 85
86struct listeners_rcu_head { 86struct listeners {
87 struct rcu_head rcu_head; 87 struct rcu_head rcu;
88 void *ptr; 88 unsigned long masks[0];
89}; 89};
90 90
91#define NETLINK_KERNEL_SOCKET 0x1 91#define NETLINK_KERNEL_SOCKET 0x1
@@ -119,7 +119,7 @@ struct nl_pid_hash {
119struct netlink_table { 119struct netlink_table {
120 struct nl_pid_hash hash; 120 struct nl_pid_hash hash;
121 struct hlist_head mc_list; 121 struct hlist_head mc_list;
122 unsigned long *listeners; 122 struct listeners __rcu *listeners;
123 unsigned int nl_nonroot; 123 unsigned int nl_nonroot;
124 unsigned int groups; 124 unsigned int groups;
125 struct mutex *cb_mutex; 125 struct mutex *cb_mutex;
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk)
338 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 338 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
339 mask |= nlk_sk(sk)->groups[i]; 339 mask |= nlk_sk(sk)->groups[i];
340 } 340 }
341 tbl->listeners[i] = mask; 341 tbl->listeners->masks[i] = mask;
342 } 342 }
343 /* this function is only called with the netlink table "grabbed", which 343 /* this function is only called with the netlink table "grabbed", which
344 * makes sure updates are visible before bind or setsockopt return. */ 344 * makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast);
936int netlink_has_listeners(struct sock *sk, unsigned int group) 936int netlink_has_listeners(struct sock *sk, unsigned int group)
937{ 937{
938 int res = 0; 938 int res = 0;
939 unsigned long *listeners; 939 struct listeners *listeners;
940 940
941 BUG_ON(!netlink_is_kernel(sk)); 941 BUG_ON(!netlink_is_kernel(sk));
942 942
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
944 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 944 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
945 945
946 if (group - 1 < nl_table[sk->sk_protocol].groups) 946 if (group - 1 < nl_table[sk->sk_protocol].groups)
947 res = test_bit(group - 1, listeners); 947 res = test_bit(group - 1, listeners->masks);
948 948
949 rcu_read_unlock(); 949 rcu_read_unlock();
950 950
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1498 struct socket *sock; 1498 struct socket *sock;
1499 struct sock *sk; 1499 struct sock *sk;
1500 struct netlink_sock *nlk; 1500 struct netlink_sock *nlk;
1501 unsigned long *listeners = NULL; 1501 struct listeners *listeners = NULL;
1502 1502
1503 BUG_ON(!nl_table); 1503 BUG_ON(!nl_table);
1504 1504
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1523 if (groups < 32) 1523 if (groups < 32)
1524 groups = 32; 1524 groups = 32;
1525 1525
1526 listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), 1526 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1527 GFP_KERNEL);
1528 if (!listeners) 1527 if (!listeners)
1529 goto out_sock_release; 1528 goto out_sock_release;
1530 1529
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1541 netlink_table_grab(); 1540 netlink_table_grab();
1542 if (!nl_table[unit].registered) { 1541 if (!nl_table[unit].registered) {
1543 nl_table[unit].groups = groups; 1542 nl_table[unit].groups = groups;
1544 nl_table[unit].listeners = listeners; 1543 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1545 nl_table[unit].cb_mutex = cb_mutex; 1544 nl_table[unit].cb_mutex = cb_mutex;
1546 nl_table[unit].module = module; 1545 nl_table[unit].module = module;
1547 nl_table[unit].registered = 1; 1546 nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk)
1572EXPORT_SYMBOL(netlink_kernel_release); 1571EXPORT_SYMBOL(netlink_kernel_release);
1573 1572
1574 1573
1575static void netlink_free_old_listeners(struct rcu_head *rcu_head) 1574static void listeners_free_rcu(struct rcu_head *head)
1576{ 1575{
1577 struct listeners_rcu_head *lrh; 1576 kfree(container_of(head, struct listeners, rcu));
1578
1579 lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
1580 kfree(lrh->ptr);
1581} 1577}
1582 1578
1583int __netlink_change_ngroups(struct sock *sk, unsigned int groups) 1579int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1584{ 1580{
1585 unsigned long *listeners, *old = NULL; 1581 struct listeners *new, *old;
1586 struct listeners_rcu_head *old_rcu_head;
1587 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 1582 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1588 1583
1589 if (groups < 32) 1584 if (groups < 32)
1590 groups = 32; 1585 groups = 32;
1591 1586
1592 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { 1587 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1593 listeners = kzalloc(NLGRPSZ(groups) + 1588 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1594 sizeof(struct listeners_rcu_head), 1589 if (!new)
1595 GFP_ATOMIC);
1596 if (!listeners)
1597 return -ENOMEM; 1590 return -ENOMEM;
1598 old = tbl->listeners; 1591 old = rcu_dereference_raw(tbl->listeners);
1599 memcpy(listeners, old, NLGRPSZ(tbl->groups)); 1592 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1600 rcu_assign_pointer(tbl->listeners, listeners); 1593 rcu_assign_pointer(tbl->listeners, new);
1601 /* 1594
1602 * Free the old memory after an RCU grace period so we 1595 call_rcu(&old->rcu, listeners_free_rcu);
1603 * don't leak it. We use call_rcu() here in order to be
1604 * able to call this function from atomic contexts. The
1605 * allocation of this memory will have reserved enough
1606 * space for struct listeners_rcu_head at the end.
1607 */
1608 old_rcu_head = (void *)(tbl->listeners +
1609 NLGRPLONGS(tbl->groups));
1610 old_rcu_head->ptr = old;
1611 call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
1612 } 1596 }
1613 tbl->groups = groups; 1597 tbl->groups = groups;
1614 1598
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net)
2104 2088
2105static void __init netlink_add_usersock_entry(void) 2089static void __init netlink_add_usersock_entry(void)
2106{ 2090{
2107 unsigned long *listeners; 2091 struct listeners *listeners;
2108 int groups = 32; 2092 int groups = 32;
2109 2093
2110 listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), 2094 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2111 GFP_KERNEL);
2112 if (!listeners) 2095 if (!listeners)
2113 panic("netlink_add_usersock_entry: Cannot allocate listneres\n"); 2096 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2114 2097
2115 netlink_table_grab(); 2098 netlink_table_grab();
2116 2099
2117 nl_table[NETLINK_USERSOCK].groups = groups; 2100 nl_table[NETLINK_USERSOCK].groups = groups;
2118 nl_table[NETLINK_USERSOCK].listeners = listeners; 2101 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2119 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2102 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2120 nl_table[NETLINK_USERSOCK].registered = 1; 2103 nl_table[NETLINK_USERSOCK].registered = 1;
2121 2104
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d14bbf960c18..4b9f8912526c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1167,7 +1167,7 @@ static int ignore_request(struct wiphy *wiphy,
1167 return 0; 1167 return 0;
1168 return -EALREADY; 1168 return -EALREADY;
1169 } 1169 }
1170 return REG_INTERSECT; 1170 return 0;
1171 case NL80211_REGDOM_SET_BY_DRIVER: 1171 case NL80211_REGDOM_SET_BY_DRIVER:
1172 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1172 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
1173 if (regdom_changes(pending_request->alpha2)) 1173 if (regdom_changes(pending_request->alpha2))