diff options
61 files changed, 1662 insertions, 341 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 53326fed6c81..ab26bbc2a1d3 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -58,8 +58,8 @@ | |||
58 | #include "bnx2_fw.h" | 58 | #include "bnx2_fw.h" |
59 | 59 | ||
60 | #define DRV_MODULE_NAME "bnx2" | 60 | #define DRV_MODULE_NAME "bnx2" |
61 | #define DRV_MODULE_VERSION "2.0.8" | 61 | #define DRV_MODULE_VERSION "2.0.9" |
62 | #define DRV_MODULE_RELDATE "Feb 15, 2010" | 62 | #define DRV_MODULE_RELDATE "April 27, 2010" |
63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" | 63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" |
64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" | 64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" |
65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" | 65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" |
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp) | |||
651 | } | 651 | } |
652 | 652 | ||
653 | static void | 653 | static void |
654 | bnx2_netif_stop(struct bnx2 *bp) | 654 | bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) |
655 | { | 655 | { |
656 | bnx2_cnic_stop(bp); | 656 | if (stop_cnic) |
657 | bnx2_cnic_stop(bp); | ||
657 | if (netif_running(bp->dev)) { | 658 | if (netif_running(bp->dev)) { |
658 | int i; | 659 | int i; |
659 | 660 | ||
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp) | |||
671 | } | 672 | } |
672 | 673 | ||
673 | static void | 674 | static void |
674 | bnx2_netif_start(struct bnx2 *bp) | 675 | bnx2_netif_start(struct bnx2 *bp, bool start_cnic) |
675 | { | 676 | { |
676 | if (atomic_dec_and_test(&bp->intr_sem)) { | 677 | if (atomic_dec_and_test(&bp->intr_sem)) { |
677 | if (netif_running(bp->dev)) { | 678 | if (netif_running(bp->dev)) { |
678 | netif_tx_wake_all_queues(bp->dev); | 679 | netif_tx_wake_all_queues(bp->dev); |
679 | bnx2_napi_enable(bp); | 680 | bnx2_napi_enable(bp); |
680 | bnx2_enable_int(bp); | 681 | bnx2_enable_int(bp); |
681 | bnx2_cnic_start(bp); | 682 | if (start_cnic) |
683 | bnx2_cnic_start(bp); | ||
682 | } | 684 | } |
683 | } | 685 | } |
684 | } | 686 | } |
@@ -4758,8 +4760,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
4758 | rc = bnx2_alloc_bad_rbuf(bp); | 4760 | rc = bnx2_alloc_bad_rbuf(bp); |
4759 | } | 4761 | } |
4760 | 4762 | ||
4761 | if (bp->flags & BNX2_FLAG_USING_MSIX) | 4763 | if (bp->flags & BNX2_FLAG_USING_MSIX) { |
4762 | bnx2_setup_msix_tbl(bp); | 4764 | bnx2_setup_msix_tbl(bp); |
4765 | /* Prevent MSIX table reads and write from timing out */ | ||
4766 | REG_WR(bp, BNX2_MISC_ECO_HW_CTL, | ||
4767 | BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); | ||
4768 | } | ||
4763 | 4769 | ||
4764 | return rc; | 4770 | return rc; |
4765 | } | 4771 | } |
@@ -6272,12 +6278,12 @@ bnx2_reset_task(struct work_struct *work) | |||
6272 | return; | 6278 | return; |
6273 | } | 6279 | } |
6274 | 6280 | ||
6275 | bnx2_netif_stop(bp); | 6281 | bnx2_netif_stop(bp, true); |
6276 | 6282 | ||
6277 | bnx2_init_nic(bp, 1); | 6283 | bnx2_init_nic(bp, 1); |
6278 | 6284 | ||
6279 | atomic_set(&bp->intr_sem, 1); | 6285 | atomic_set(&bp->intr_sem, 1); |
6280 | bnx2_netif_start(bp); | 6286 | bnx2_netif_start(bp, true); |
6281 | rtnl_unlock(); | 6287 | rtnl_unlock(); |
6282 | } | 6288 | } |
6283 | 6289 | ||
@@ -6319,7 +6325,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6319 | struct bnx2 *bp = netdev_priv(dev); | 6325 | struct bnx2 *bp = netdev_priv(dev); |
6320 | 6326 | ||
6321 | if (netif_running(dev)) | 6327 | if (netif_running(dev)) |
6322 | bnx2_netif_stop(bp); | 6328 | bnx2_netif_stop(bp, false); |
6323 | 6329 | ||
6324 | bp->vlgrp = vlgrp; | 6330 | bp->vlgrp = vlgrp; |
6325 | 6331 | ||
@@ -6330,7 +6336,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6330 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) | 6336 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) |
6331 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); | 6337 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); |
6332 | 6338 | ||
6333 | bnx2_netif_start(bp); | 6339 | bnx2_netif_start(bp, false); |
6334 | } | 6340 | } |
6335 | #endif | 6341 | #endif |
6336 | 6342 | ||
@@ -7050,9 +7056,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | |||
7050 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; | 7056 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; |
7051 | 7057 | ||
7052 | if (netif_running(bp->dev)) { | 7058 | if (netif_running(bp->dev)) { |
7053 | bnx2_netif_stop(bp); | 7059 | bnx2_netif_stop(bp, true); |
7054 | bnx2_init_nic(bp, 0); | 7060 | bnx2_init_nic(bp, 0); |
7055 | bnx2_netif_start(bp); | 7061 | bnx2_netif_start(bp, true); |
7056 | } | 7062 | } |
7057 | 7063 | ||
7058 | return 0; | 7064 | return 0; |
@@ -7082,7 +7088,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7082 | /* Reset will erase chipset stats; save them */ | 7088 | /* Reset will erase chipset stats; save them */ |
7083 | bnx2_save_stats(bp); | 7089 | bnx2_save_stats(bp); |
7084 | 7090 | ||
7085 | bnx2_netif_stop(bp); | 7091 | bnx2_netif_stop(bp, true); |
7086 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | 7092 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); |
7087 | bnx2_free_skbs(bp); | 7093 | bnx2_free_skbs(bp); |
7088 | bnx2_free_mem(bp); | 7094 | bnx2_free_mem(bp); |
@@ -7110,7 +7116,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7110 | bnx2_setup_cnic_irq_info(bp); | 7116 | bnx2_setup_cnic_irq_info(bp); |
7111 | mutex_unlock(&bp->cnic_lock); | 7117 | mutex_unlock(&bp->cnic_lock); |
7112 | #endif | 7118 | #endif |
7113 | bnx2_netif_start(bp); | 7119 | bnx2_netif_start(bp, true); |
7114 | } | 7120 | } |
7115 | return 0; | 7121 | return 0; |
7116 | } | 7122 | } |
@@ -7363,7 +7369,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7363 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 7369 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
7364 | int i; | 7370 | int i; |
7365 | 7371 | ||
7366 | bnx2_netif_stop(bp); | 7372 | bnx2_netif_stop(bp, true); |
7367 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); | 7373 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); |
7368 | bnx2_free_skbs(bp); | 7374 | bnx2_free_skbs(bp); |
7369 | 7375 | ||
@@ -7382,7 +7388,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7382 | bnx2_shutdown_chip(bp); | 7388 | bnx2_shutdown_chip(bp); |
7383 | else { | 7389 | else { |
7384 | bnx2_init_nic(bp, 1); | 7390 | bnx2_init_nic(bp, 1); |
7385 | bnx2_netif_start(bp); | 7391 | bnx2_netif_start(bp, true); |
7386 | } | 7392 | } |
7387 | 7393 | ||
7388 | /* wait for link up */ | 7394 | /* wait for link up */ |
@@ -8376,7 +8382,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
8376 | return 0; | 8382 | return 0; |
8377 | 8383 | ||
8378 | flush_scheduled_work(); | 8384 | flush_scheduled_work(); |
8379 | bnx2_netif_stop(bp); | 8385 | bnx2_netif_stop(bp, true); |
8380 | netif_device_detach(dev); | 8386 | netif_device_detach(dev); |
8381 | del_timer_sync(&bp->timer); | 8387 | del_timer_sync(&bp->timer); |
8382 | bnx2_shutdown_chip(bp); | 8388 | bnx2_shutdown_chip(bp); |
@@ -8398,7 +8404,7 @@ bnx2_resume(struct pci_dev *pdev) | |||
8398 | bnx2_set_power_state(bp, PCI_D0); | 8404 | bnx2_set_power_state(bp, PCI_D0); |
8399 | netif_device_attach(dev); | 8405 | netif_device_attach(dev); |
8400 | bnx2_init_nic(bp, 1); | 8406 | bnx2_init_nic(bp, 1); |
8401 | bnx2_netif_start(bp); | 8407 | bnx2_netif_start(bp, true); |
8402 | return 0; | 8408 | return 0; |
8403 | } | 8409 | } |
8404 | 8410 | ||
@@ -8425,7 +8431,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, | |||
8425 | } | 8431 | } |
8426 | 8432 | ||
8427 | if (netif_running(dev)) { | 8433 | if (netif_running(dev)) { |
8428 | bnx2_netif_stop(bp); | 8434 | bnx2_netif_stop(bp, true); |
8429 | del_timer_sync(&bp->timer); | 8435 | del_timer_sync(&bp->timer); |
8430 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); | 8436 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); |
8431 | } | 8437 | } |
@@ -8482,7 +8488,7 @@ static void bnx2_io_resume(struct pci_dev *pdev) | |||
8482 | 8488 | ||
8483 | rtnl_lock(); | 8489 | rtnl_lock(); |
8484 | if (netif_running(dev)) | 8490 | if (netif_running(dev)) |
8485 | bnx2_netif_start(bp); | 8491 | bnx2_netif_start(bp, true); |
8486 | 8492 | ||
8487 | netif_device_attach(dev); | 8493 | netif_device_attach(dev); |
8488 | rtnl_unlock(); | 8494 | rtnl_unlock(); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 904bd6bf3199..d13760dc27f8 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -5020,6 +5020,9 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |||
5020 | reg16 &= ~state; | 5020 | reg16 &= ~state; |
5021 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); | 5021 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
5022 | 5022 | ||
5023 | if (!pdev->bus->self) | ||
5024 | return; | ||
5025 | |||
5023 | pos = pci_pcie_cap(pdev->bus->self); | 5026 | pos = pci_pcie_cap(pdev->bus->self); |
5024 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); | 5027 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); |
5025 | reg16 &= ~state; | 5028 | reg16 &= ~state; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 0cef967499d3..5267c27e3174 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1567,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
1567 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | 1567 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1568 | gfar_write(®s->dmactrl, tempval); | 1568 | gfar_write(®s->dmactrl, tempval); |
1569 | 1569 | ||
1570 | while (!(gfar_read(®s->ievent) & | 1570 | spin_event_timeout(((gfar_read(®s->ievent) & |
1571 | (IEVENT_GRSC | IEVENT_GTSC))) | 1571 | (IEVENT_GRSC | IEVENT_GTSC)) == |
1572 | cpu_relax(); | 1572 | (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); |
1573 | } | 1573 | } |
1574 | } | 1574 | } |
1575 | 1575 | ||
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ec6bcc0660c6..79c35ae3718c 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -104,10 +104,6 @@ | |||
104 | #define MAX_EMULATION_MAC_ADDRS 16 | 104 | #define MAX_EMULATION_MAC_ADDRS 16 |
105 | #define VMDQ_P(p) ((p) + adapter->num_vfs) | 105 | #define VMDQ_P(p) ((p) + adapter->num_vfs) |
106 | 106 | ||
107 | #define IXGBE_SUBDEV_ID_82598AF_MEZZ 0x0049 | ||
108 | #define IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ 0x004a | ||
109 | #define IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ 0x004b | ||
110 | |||
111 | struct vf_data_storage { | 107 | struct vf_data_storage { |
112 | unsigned char vf_mac_addresses[ETH_ALEN]; | 108 | unsigned char vf_mac_addresses[ETH_ALEN]; |
113 | u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; | 109 | u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index ff59f88dc7a1..2ae5a5159ce4 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -4314,9 +4314,6 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4314 | int err = 0; | 4314 | int err = 0; |
4315 | int vector, v_budget; | 4315 | int vector, v_budget; |
4316 | 4316 | ||
4317 | if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) | ||
4318 | goto try_msi; | ||
4319 | |||
4320 | /* | 4317 | /* |
4321 | * It's easy to be greedy for MSI-X vectors, but it really | 4318 | * It's easy to be greedy for MSI-X vectors, but it really |
4322 | * doesn't do us much good if we have a lot more vectors | 4319 | * doesn't do us much good if we have a lot more vectors |
@@ -4348,7 +4345,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4348 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 4345 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
4349 | goto out; | 4346 | goto out; |
4350 | } | 4347 | } |
4351 | try_msi: | 4348 | |
4352 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 4349 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4353 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 4350 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
4354 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4351 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
@@ -4629,18 +4626,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4629 | adapter->ring_feature[RING_F_RSS].indices = rss; | 4626 | adapter->ring_feature[RING_F_RSS].indices = rss; |
4630 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 4627 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
4631 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 4628 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
4632 | adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE; | ||
4633 | if (adapter->hw.device_id == IXGBE_DEV_ID_82598AF_DUAL_PORT) { | ||
4634 | switch (adapter->hw.subsystem_device_id) { | ||
4635 | case IXGBE_SUBDEV_ID_82598AF_MEZZ: | ||
4636 | case IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ: | ||
4637 | case IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ: | ||
4638 | adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE; | ||
4639 | break; | ||
4640 | default: | ||
4641 | break; | ||
4642 | } | ||
4643 | } | ||
4644 | if (hw->mac.type == ixgbe_mac_82598EB) { | 4629 | if (hw->mac.type == ixgbe_mac_82598EB) { |
4645 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | 4630 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
4646 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | 4631 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index d97e1fd234ba..1c4110df343e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -37,6 +37,7 @@ | |||
37 | struct macvtap_queue { | 37 | struct macvtap_queue { |
38 | struct sock sk; | 38 | struct sock sk; |
39 | struct socket sock; | 39 | struct socket sock; |
40 | struct socket_wq wq; | ||
40 | struct macvlan_dev *vlan; | 41 | struct macvlan_dev *vlan; |
41 | struct file *file; | 42 | struct file *file; |
42 | unsigned int flags; | 43 | unsigned int flags; |
@@ -242,12 +243,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = { | |||
242 | 243 | ||
243 | static void macvtap_sock_write_space(struct sock *sk) | 244 | static void macvtap_sock_write_space(struct sock *sk) |
244 | { | 245 | { |
246 | wait_queue_head_t *wqueue; | ||
247 | |||
245 | if (!sock_writeable(sk) || | 248 | if (!sock_writeable(sk) || |
246 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | 249 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
247 | return; | 250 | return; |
248 | 251 | ||
249 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) | 252 | wqueue = sk_sleep(sk); |
250 | wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND); | 253 | if (wqueue && waitqueue_active(wqueue)) |
254 | wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); | ||
251 | } | 255 | } |
252 | 256 | ||
253 | static int macvtap_open(struct inode *inode, struct file *file) | 257 | static int macvtap_open(struct inode *inode, struct file *file) |
@@ -272,7 +276,8 @@ static int macvtap_open(struct inode *inode, struct file *file) | |||
272 | if (!q) | 276 | if (!q) |
273 | goto out; | 277 | goto out; |
274 | 278 | ||
275 | init_waitqueue_head(&q->sock.wait); | 279 | q->sock.wq = &q->wq; |
280 | init_waitqueue_head(&q->wq.wait); | ||
276 | q->sock.type = SOCK_RAW; | 281 | q->sock.type = SOCK_RAW; |
277 | q->sock.state = SS_CONNECTED; | 282 | q->sock.state = SS_CONNECTED; |
278 | q->sock.file = file; | 283 | q->sock.file = file; |
@@ -308,7 +313,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait) | |||
308 | goto out; | 313 | goto out; |
309 | 314 | ||
310 | mask = 0; | 315 | mask = 0; |
311 | poll_wait(file, &q->sock.wait, wait); | 316 | poll_wait(file, &q->wq.wait, wait); |
312 | 317 | ||
313 | if (!skb_queue_empty(&q->sk.sk_receive_queue)) | 318 | if (!skb_queue_empty(&q->sk.sk_receive_queue)) |
314 | mask |= POLLIN | POLLRDNORM; | 319 | mask |= POLLIN | POLLRDNORM; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 408f3d7b1545..949ac1a12537 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1804,23 +1804,30 @@ static void media_check(u_long arg) | |||
1804 | SMC_SELECT_BANK(1); | 1804 | SMC_SELECT_BANK(1); |
1805 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; | 1805 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; |
1806 | 1806 | ||
1807 | SMC_SELECT_BANK(saved_bank); | ||
1808 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1809 | |||
1807 | /* Check for pending interrupt with watchdog flag set: with | 1810 | /* Check for pending interrupt with watchdog flag set: with |
1808 | this, we can limp along even if the interrupt is blocked */ | 1811 | this, we can limp along even if the interrupt is blocked */ |
1809 | if (smc->watchdog++ && ((i>>8) & i)) { | 1812 | if (smc->watchdog++ && ((i>>8) & i)) { |
1810 | if (!smc->fast_poll) | 1813 | if (!smc->fast_poll) |
1811 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); | 1814 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); |
1815 | local_irq_save(flags); | ||
1812 | smc_interrupt(dev->irq, dev); | 1816 | smc_interrupt(dev->irq, dev); |
1817 | local_irq_restore(flags); | ||
1813 | smc->fast_poll = HZ; | 1818 | smc->fast_poll = HZ; |
1814 | } | 1819 | } |
1815 | if (smc->fast_poll) { | 1820 | if (smc->fast_poll) { |
1816 | smc->fast_poll--; | 1821 | smc->fast_poll--; |
1817 | smc->media.expires = jiffies + HZ/100; | 1822 | smc->media.expires = jiffies + HZ/100; |
1818 | add_timer(&smc->media); | 1823 | add_timer(&smc->media); |
1819 | SMC_SELECT_BANK(saved_bank); | ||
1820 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1821 | return; | 1824 | return; |
1822 | } | 1825 | } |
1823 | 1826 | ||
1827 | spin_lock_irqsave(&smc->lock, flags); | ||
1828 | |||
1829 | saved_bank = inw(ioaddr + BANK_SELECT); | ||
1830 | |||
1824 | if (smc->cfg & CFG_MII_SELECT) { | 1831 | if (smc->cfg & CFG_MII_SELECT) { |
1825 | if (smc->mii_if.phy_id < 0) | 1832 | if (smc->mii_if.phy_id < 0) |
1826 | goto reschedule; | 1833 | goto reschedule; |
@@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1978 | unsigned int ioaddr = dev->base_addr; | 1985 | unsigned int ioaddr = dev->base_addr; |
1979 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 1986 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1980 | int ret; | 1987 | int ret; |
1988 | unsigned long flags; | ||
1981 | 1989 | ||
1982 | spin_lock_irq(&smc->lock); | 1990 | spin_lock_irqsave(&smc->lock, flags); |
1983 | SMC_SELECT_BANK(3); | 1991 | SMC_SELECT_BANK(3); |
1984 | if (smc->cfg & CFG_MII_SELECT) | 1992 | if (smc->cfg & CFG_MII_SELECT) |
1985 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); | 1993 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); |
1986 | else | 1994 | else |
1987 | ret = smc_netdev_get_ecmd(dev, ecmd); | 1995 | ret = smc_netdev_get_ecmd(dev, ecmd); |
1988 | SMC_SELECT_BANK(saved_bank); | 1996 | SMC_SELECT_BANK(saved_bank); |
1989 | spin_unlock_irq(&smc->lock); | 1997 | spin_unlock_irqrestore(&smc->lock, flags); |
1990 | return ret; | 1998 | return ret; |
1991 | } | 1999 | } |
1992 | 2000 | ||
@@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1996 | unsigned int ioaddr = dev->base_addr; | 2004 | unsigned int ioaddr = dev->base_addr; |
1997 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2005 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1998 | int ret; | 2006 | int ret; |
2007 | unsigned long flags; | ||
1999 | 2008 | ||
2000 | spin_lock_irq(&smc->lock); | 2009 | spin_lock_irqsave(&smc->lock, flags); |
2001 | SMC_SELECT_BANK(3); | 2010 | SMC_SELECT_BANK(3); |
2002 | if (smc->cfg & CFG_MII_SELECT) | 2011 | if (smc->cfg & CFG_MII_SELECT) |
2003 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); | 2012 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); |
2004 | else | 2013 | else |
2005 | ret = smc_netdev_set_ecmd(dev, ecmd); | 2014 | ret = smc_netdev_set_ecmd(dev, ecmd); |
2006 | SMC_SELECT_BANK(saved_bank); | 2015 | SMC_SELECT_BANK(saved_bank); |
2007 | spin_unlock_irq(&smc->lock); | 2016 | spin_unlock_irqrestore(&smc->lock, flags); |
2008 | return ret; | 2017 | return ret; |
2009 | } | 2018 | } |
2010 | 2019 | ||
@@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev) | |||
2014 | unsigned int ioaddr = dev->base_addr; | 2023 | unsigned int ioaddr = dev->base_addr; |
2015 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2024 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2016 | u32 ret; | 2025 | u32 ret; |
2026 | unsigned long flags; | ||
2017 | 2027 | ||
2018 | spin_lock_irq(&smc->lock); | 2028 | spin_lock_irqsave(&smc->lock, flags); |
2019 | SMC_SELECT_BANK(3); | 2029 | SMC_SELECT_BANK(3); |
2020 | ret = smc_link_ok(dev); | 2030 | ret = smc_link_ok(dev); |
2021 | SMC_SELECT_BANK(saved_bank); | 2031 | SMC_SELECT_BANK(saved_bank); |
2022 | spin_unlock_irq(&smc->lock); | 2032 | spin_unlock_irqrestore(&smc->lock, flags); |
2023 | return ret; | 2033 | return ret; |
2024 | } | 2034 | } |
2025 | 2035 | ||
@@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
2056 | int rc = 0; | 2066 | int rc = 0; |
2057 | u16 saved_bank; | 2067 | u16 saved_bank; |
2058 | unsigned int ioaddr = dev->base_addr; | 2068 | unsigned int ioaddr = dev->base_addr; |
2069 | unsigned long flags; | ||
2059 | 2070 | ||
2060 | if (!netif_running(dev)) | 2071 | if (!netif_running(dev)) |
2061 | return -EINVAL; | 2072 | return -EINVAL; |
2062 | 2073 | ||
2063 | spin_lock_irq(&smc->lock); | 2074 | spin_lock_irqsave(&smc->lock, flags); |
2064 | saved_bank = inw(ioaddr + BANK_SELECT); | 2075 | saved_bank = inw(ioaddr + BANK_SELECT); |
2065 | SMC_SELECT_BANK(3); | 2076 | SMC_SELECT_BANK(3); |
2066 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); | 2077 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); |
2067 | SMC_SELECT_BANK(saved_bank); | 2078 | SMC_SELECT_BANK(saved_bank); |
2068 | spin_unlock_irq(&smc->lock); | 2079 | spin_unlock_irqrestore(&smc->lock, flags); |
2069 | return rc; | 2080 | return rc; |
2070 | } | 2081 | } |
2071 | 2082 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index fc5938ba3d78..a527e37728cd 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -88,6 +88,11 @@ config LSI_ET1011C_PHY | |||
88 | ---help--- | 88 | ---help--- |
89 | Supports the LSI ET1011C PHY. | 89 | Supports the LSI ET1011C PHY. |
90 | 90 | ||
91 | config MICREL_PHY | ||
92 | tristate "Driver for Micrel PHYs" | ||
93 | ---help--- | ||
94 | Supports the KSZ9021, VSC8201, KS8001 PHYs. | ||
95 | |||
91 | config FIXED_PHY | 96 | config FIXED_PHY |
92 | bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" | 97 | bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" |
93 | depends on PHYLIB=y | 98 | depends on PHYLIB=y |
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 1342585af381..13bebab65d02 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile | |||
@@ -20,4 +20,5 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o | |||
20 | obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o | 20 | obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o |
21 | obj-$(CONFIG_NATIONAL_PHY) += national.o | 21 | obj-$(CONFIG_NATIONAL_PHY) += national.o |
22 | obj-$(CONFIG_STE10XP) += ste10Xp.o | 22 | obj-$(CONFIG_STE10XP) += ste10Xp.o |
23 | obj-$(CONFIG_MICREL_PHY) += micrel.o | ||
23 | obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o | 24 | obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c new file mode 100644 index 000000000000..68dd107bad24 --- /dev/null +++ b/drivers/net/phy/micrel.c | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * drivers/net/phy/micrel.c | ||
3 | * | ||
4 | * Driver for Micrel PHYs | ||
5 | * | ||
6 | * Author: David J. Choi | ||
7 | * | ||
8 | * Copyright (c) 2010 Micrel, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | * Support : ksz9021 , vsc8201, ks8001 | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/phy.h> | ||
21 | |||
22 | #define PHY_ID_KSZ9021 0x00221611 | ||
23 | #define PHY_ID_VSC8201 0x000FC413 | ||
24 | #define PHY_ID_KS8001 0x0022161A | ||
25 | |||
26 | |||
27 | static int kszphy_config_init(struct phy_device *phydev) | ||
28 | { | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | |||
33 | static struct phy_driver ks8001_driver = { | ||
34 | .phy_id = PHY_ID_KS8001, | ||
35 | .phy_id_mask = 0x00fffff0, | ||
36 | .features = PHY_BASIC_FEATURES, | ||
37 | .flags = PHY_POLL, | ||
38 | .config_init = kszphy_config_init, | ||
39 | .config_aneg = genphy_config_aneg, | ||
40 | .read_status = genphy_read_status, | ||
41 | .driver = { .owner = THIS_MODULE,}, | ||
42 | }; | ||
43 | |||
44 | static struct phy_driver vsc8201_driver = { | ||
45 | .phy_id = PHY_ID_VSC8201, | ||
46 | .name = "Micrel VSC8201", | ||
47 | .phy_id_mask = 0x00fffff0, | ||
48 | .features = PHY_BASIC_FEATURES, | ||
49 | .flags = PHY_POLL, | ||
50 | .config_init = kszphy_config_init, | ||
51 | .config_aneg = genphy_config_aneg, | ||
52 | .read_status = genphy_read_status, | ||
53 | .driver = { .owner = THIS_MODULE,}, | ||
54 | }; | ||
55 | |||
56 | static struct phy_driver ksz9021_driver = { | ||
57 | .phy_id = PHY_ID_KSZ9021, | ||
58 | .phy_id_mask = 0x000fff10, | ||
59 | .name = "Micrel KSZ9021 Gigabit PHY", | ||
60 | .features = PHY_GBIT_FEATURES | SUPPORTED_Pause, | ||
61 | .flags = PHY_POLL, | ||
62 | .config_init = kszphy_config_init, | ||
63 | .config_aneg = genphy_config_aneg, | ||
64 | .read_status = genphy_read_status, | ||
65 | .driver = { .owner = THIS_MODULE, }, | ||
66 | }; | ||
67 | |||
68 | static int __init ksphy_init(void) | ||
69 | { | ||
70 | int ret; | ||
71 | |||
72 | ret = phy_driver_register(&ks8001_driver); | ||
73 | if (ret) | ||
74 | goto err1; | ||
75 | ret = phy_driver_register(&vsc8201_driver); | ||
76 | if (ret) | ||
77 | goto err2; | ||
78 | |||
79 | ret = phy_driver_register(&ksz9021_driver); | ||
80 | if (ret) | ||
81 | goto err3; | ||
82 | return 0; | ||
83 | |||
84 | err3: | ||
85 | phy_driver_unregister(&vsc8201_driver); | ||
86 | err2: | ||
87 | phy_driver_unregister(&ks8001_driver); | ||
88 | err1: | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static void __exit ksphy_exit(void) | ||
93 | { | ||
94 | phy_driver_unregister(&ks8001_driver); | ||
95 | phy_driver_unregister(&vsc8201_driver); | ||
96 | phy_driver_unregister(&ksz9021_driver); | ||
97 | } | ||
98 | |||
99 | module_init(ksphy_init); | ||
100 | module_exit(ksphy_exit); | ||
101 | |||
102 | MODULE_DESCRIPTION("Micrel PHY driver"); | ||
103 | MODULE_AUTHOR("David J. Choi"); | ||
104 | MODULE_LICENSE("GPL"); | ||
105 | |||
106 | static struct mdio_device_id micrel_tbl[] = { | ||
107 | { PHY_ID_KSZ9021, 0x000fff10 }, | ||
108 | { PHY_ID_VSC8201, 0x00fffff0 }, | ||
109 | { PHY_ID_KS8001, 0x00fffff0 }, | ||
110 | { } | ||
111 | }; | ||
112 | |||
113 | MODULE_DEVICE_TABLE(mdio, micrel_tbl); | ||
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 35f195329fdd..5441688daba7 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -405,6 +405,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf, | |||
405 | DECLARE_WAITQUEUE(wait, current); | 405 | DECLARE_WAITQUEUE(wait, current); |
406 | ssize_t ret; | 406 | ssize_t ret; |
407 | struct sk_buff *skb = NULL; | 407 | struct sk_buff *skb = NULL; |
408 | struct iovec iov; | ||
408 | 409 | ||
409 | ret = count; | 410 | ret = count; |
410 | 411 | ||
@@ -448,7 +449,9 @@ static ssize_t ppp_read(struct file *file, char __user *buf, | |||
448 | if (skb->len > count) | 449 | if (skb->len > count) |
449 | goto outf; | 450 | goto outf; |
450 | ret = -EFAULT; | 451 | ret = -EFAULT; |
451 | if (copy_to_user(buf, skb->data, skb->len)) | 452 | iov.iov_base = buf; |
453 | iov.iov_len = count; | ||
454 | if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) | ||
452 | goto outf; | 455 | goto outf; |
453 | ret = skb->len; | 456 | ret = skb->len; |
454 | 457 | ||
@@ -1567,13 +1570,22 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) | |||
1567 | struct channel *pch = chan->ppp; | 1570 | struct channel *pch = chan->ppp; |
1568 | int proto; | 1571 | int proto; |
1569 | 1572 | ||
1570 | if (!pch || skb->len == 0) { | 1573 | if (!pch) { |
1571 | kfree_skb(skb); | 1574 | kfree_skb(skb); |
1572 | return; | 1575 | return; |
1573 | } | 1576 | } |
1574 | 1577 | ||
1575 | proto = PPP_PROTO(skb); | ||
1576 | read_lock_bh(&pch->upl); | 1578 | read_lock_bh(&pch->upl); |
1579 | if (!pskb_may_pull(skb, 2)) { | ||
1580 | kfree_skb(skb); | ||
1581 | if (pch->ppp) { | ||
1582 | ++pch->ppp->dev->stats.rx_length_errors; | ||
1583 | ppp_receive_error(pch->ppp); | ||
1584 | } | ||
1585 | goto done; | ||
1586 | } | ||
1587 | |||
1588 | proto = PPP_PROTO(skb); | ||
1577 | if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { | 1589 | if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { |
1578 | /* put it on the channel queue */ | 1590 | /* put it on the channel queue */ |
1579 | skb_queue_tail(&pch->file.rq, skb); | 1591 | skb_queue_tail(&pch->file.rq, skb); |
@@ -1585,6 +1597,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) | |||
1585 | } else { | 1597 | } else { |
1586 | ppp_do_recv(pch->ppp, skb, pch); | 1598 | ppp_do_recv(pch->ppp, skb, pch); |
1587 | } | 1599 | } |
1600 | |||
1601 | done: | ||
1588 | read_unlock_bh(&pch->upl); | 1602 | read_unlock_bh(&pch->upl); |
1589 | } | 1603 | } |
1590 | 1604 | ||
@@ -1617,7 +1631,8 @@ ppp_input_error(struct ppp_channel *chan, int code) | |||
1617 | static void | 1631 | static void |
1618 | ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | 1632 | ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) |
1619 | { | 1633 | { |
1620 | if (pskb_may_pull(skb, 2)) { | 1634 | /* note: a 0-length skb is used as an error indication */ |
1635 | if (skb->len > 0) { | ||
1621 | #ifdef CONFIG_PPP_MULTILINK | 1636 | #ifdef CONFIG_PPP_MULTILINK |
1622 | /* XXX do channel-level decompression here */ | 1637 | /* XXX do channel-level decompression here */ |
1623 | if (PPP_PROTO(skb) == PPP_MP) | 1638 | if (PPP_PROTO(skb) == PPP_MP) |
@@ -1625,15 +1640,10 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | |||
1625 | else | 1640 | else |
1626 | #endif /* CONFIG_PPP_MULTILINK */ | 1641 | #endif /* CONFIG_PPP_MULTILINK */ |
1627 | ppp_receive_nonmp_frame(ppp, skb); | 1642 | ppp_receive_nonmp_frame(ppp, skb); |
1628 | return; | 1643 | } else { |
1644 | kfree_skb(skb); | ||
1645 | ppp_receive_error(ppp); | ||
1629 | } | 1646 | } |
1630 | |||
1631 | if (skb->len > 0) | ||
1632 | /* note: a 0-length skb is used as an error indication */ | ||
1633 | ++ppp->dev->stats.rx_length_errors; | ||
1634 | |||
1635 | kfree_skb(skb); | ||
1636 | ppp_receive_error(ppp); | ||
1637 | } | 1647 | } |
1638 | 1648 | ||
1639 | static void | 1649 | static void |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index ba4770a6c2e4..fec3c29b2ea8 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -2256,17 +2256,36 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2256 | 2256 | ||
2257 | sc->mii_bus = mdiobus_alloc(); | 2257 | sc->mii_bus = mdiobus_alloc(); |
2258 | if (sc->mii_bus == NULL) { | 2258 | if (sc->mii_bus == NULL) { |
2259 | sbmac_uninitctx(sc); | 2259 | err = -ENOMEM; |
2260 | return -ENOMEM; | 2260 | goto uninit_ctx; |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | sc->mii_bus->name = sbmac_mdio_string; | ||
2264 | snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); | ||
2265 | sc->mii_bus->priv = sc; | ||
2266 | sc->mii_bus->read = sbmac_mii_read; | ||
2267 | sc->mii_bus->write = sbmac_mii_write; | ||
2268 | sc->mii_bus->irq = sc->phy_irq; | ||
2269 | for (i = 0; i < PHY_MAX_ADDR; ++i) | ||
2270 | sc->mii_bus->irq[i] = SBMAC_PHY_INT; | ||
2271 | |||
2272 | sc->mii_bus->parent = &pldev->dev; | ||
2273 | /* | ||
2274 | * Probe PHY address | ||
2275 | */ | ||
2276 | err = mdiobus_register(sc->mii_bus); | ||
2277 | if (err) { | ||
2278 | printk(KERN_ERR "%s: unable to register MDIO bus\n", | ||
2279 | dev->name); | ||
2280 | goto free_mdio; | ||
2281 | } | ||
2282 | dev_set_drvdata(&pldev->dev, sc->mii_bus); | ||
2283 | |||
2263 | err = register_netdev(dev); | 2284 | err = register_netdev(dev); |
2264 | if (err) { | 2285 | if (err) { |
2265 | printk(KERN_ERR "%s.%d: unable to register netdev\n", | 2286 | printk(KERN_ERR "%s.%d: unable to register netdev\n", |
2266 | sbmac_string, idx); | 2287 | sbmac_string, idx); |
2267 | mdiobus_free(sc->mii_bus); | 2288 | goto unreg_mdio; |
2268 | sbmac_uninitctx(sc); | ||
2269 | return err; | ||
2270 | } | 2289 | } |
2271 | 2290 | ||
2272 | pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); | 2291 | pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); |
@@ -2282,19 +2301,15 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2282 | pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", | 2301 | pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", |
2283 | dev->name, base, eaddr); | 2302 | dev->name, base, eaddr); |
2284 | 2303 | ||
2285 | sc->mii_bus->name = sbmac_mdio_string; | ||
2286 | snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); | ||
2287 | sc->mii_bus->priv = sc; | ||
2288 | sc->mii_bus->read = sbmac_mii_read; | ||
2289 | sc->mii_bus->write = sbmac_mii_write; | ||
2290 | sc->mii_bus->irq = sc->phy_irq; | ||
2291 | for (i = 0; i < PHY_MAX_ADDR; ++i) | ||
2292 | sc->mii_bus->irq[i] = SBMAC_PHY_INT; | ||
2293 | |||
2294 | sc->mii_bus->parent = &pldev->dev; | ||
2295 | dev_set_drvdata(&pldev->dev, sc->mii_bus); | ||
2296 | |||
2297 | return 0; | 2304 | return 0; |
2305 | unreg_mdio: | ||
2306 | mdiobus_unregister(sc->mii_bus); | ||
2307 | dev_set_drvdata(&pldev->dev, NULL); | ||
2308 | free_mdio: | ||
2309 | mdiobus_free(sc->mii_bus); | ||
2310 | uninit_ctx: | ||
2311 | sbmac_uninitctx(sc); | ||
2312 | return err; | ||
2298 | } | 2313 | } |
2299 | 2314 | ||
2300 | 2315 | ||
@@ -2320,16 +2335,6 @@ static int sbmac_open(struct net_device *dev) | |||
2320 | goto out_err; | 2335 | goto out_err; |
2321 | } | 2336 | } |
2322 | 2337 | ||
2323 | /* | ||
2324 | * Probe PHY address | ||
2325 | */ | ||
2326 | err = mdiobus_register(sc->mii_bus); | ||
2327 | if (err) { | ||
2328 | printk(KERN_ERR "%s: unable to register MDIO bus\n", | ||
2329 | dev->name); | ||
2330 | goto out_unirq; | ||
2331 | } | ||
2332 | |||
2333 | sc->sbm_speed = sbmac_speed_none; | 2338 | sc->sbm_speed = sbmac_speed_none; |
2334 | sc->sbm_duplex = sbmac_duplex_none; | 2339 | sc->sbm_duplex = sbmac_duplex_none; |
2335 | sc->sbm_fc = sbmac_fc_none; | 2340 | sc->sbm_fc = sbmac_fc_none; |
@@ -2360,11 +2365,7 @@ static int sbmac_open(struct net_device *dev) | |||
2360 | return 0; | 2365 | return 0; |
2361 | 2366 | ||
2362 | out_unregister: | 2367 | out_unregister: |
2363 | mdiobus_unregister(sc->mii_bus); | ||
2364 | |||
2365 | out_unirq: | ||
2366 | free_irq(dev->irq, dev); | 2368 | free_irq(dev->irq, dev); |
2367 | |||
2368 | out_err: | 2369 | out_err: |
2369 | return err; | 2370 | return err; |
2370 | } | 2371 | } |
@@ -2553,9 +2554,6 @@ static int sbmac_close(struct net_device *dev) | |||
2553 | 2554 | ||
2554 | phy_disconnect(sc->phy_dev); | 2555 | phy_disconnect(sc->phy_dev); |
2555 | sc->phy_dev = NULL; | 2556 | sc->phy_dev = NULL; |
2556 | |||
2557 | mdiobus_unregister(sc->mii_bus); | ||
2558 | |||
2559 | free_irq(dev->irq, dev); | 2557 | free_irq(dev->irq, dev); |
2560 | 2558 | ||
2561 | sbdma_emptyring(&(sc->sbm_txdma)); | 2559 | sbdma_emptyring(&(sc->sbm_txdma)); |
@@ -2662,6 +2660,7 @@ static int __exit sbmac_remove(struct platform_device *pldev) | |||
2662 | 2660 | ||
2663 | unregister_netdev(dev); | 2661 | unregister_netdev(dev); |
2664 | sbmac_uninitctx(sc); | 2662 | sbmac_uninitctx(sc); |
2663 | mdiobus_unregister(sc->mii_bus); | ||
2665 | mdiobus_free(sc->mii_bus); | 2664 | mdiobus_free(sc->mii_bus); |
2666 | iounmap(sc->sbm_base); | 2665 | iounmap(sc->sbm_base); |
2667 | free_netdev(dev); | 2666 | free_netdev(dev); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index bc75ef683c9f..156460527231 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1870,6 +1870,7 @@ out: | |||
1870 | } | 1870 | } |
1871 | 1871 | ||
1872 | if (disabled) { | 1872 | if (disabled) { |
1873 | dev_close(efx->net_dev); | ||
1873 | EFX_ERR(efx, "has been disabled\n"); | 1874 | EFX_ERR(efx, "has been disabled\n"); |
1874 | efx->state = STATE_DISABLED; | 1875 | efx->state = STATE_DISABLED; |
1875 | } else { | 1876 | } else { |
@@ -1893,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data) | |||
1893 | } | 1894 | } |
1894 | 1895 | ||
1895 | rtnl_lock(); | 1896 | rtnl_lock(); |
1896 | if (efx_reset(efx, efx->reset_pending)) | 1897 | (void)efx_reset(efx, efx->reset_pending); |
1897 | dev_close(efx->net_dev); | ||
1898 | rtnl_unlock(); | 1898 | rtnl_unlock(); |
1899 | } | 1899 | } |
1900 | 1900 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index f7df24dce38a..655b697b45b2 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1326,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
1326 | 1326 | ||
1327 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); | 1327 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); |
1328 | 1328 | ||
1329 | falcon_probe_board(efx, board_rev); | 1329 | rc = falcon_probe_board(efx, board_rev); |
1330 | if (rc) | ||
1331 | goto fail2; | ||
1330 | 1332 | ||
1331 | kfree(nvconfig); | 1333 | kfree(nvconfig); |
1332 | return 0; | 1334 | return 0; |
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index 5712fddd72f2..c7a933a3292e 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = { | |||
728 | }, | 728 | }, |
729 | }; | 729 | }; |
730 | 730 | ||
731 | static const struct falcon_board_type falcon_dummy_board = { | 731 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info) |
732 | .init = efx_port_dummy_op_int, | ||
733 | .init_phy = efx_port_dummy_op_void, | ||
734 | .fini = efx_port_dummy_op_void, | ||
735 | .set_id_led = efx_port_dummy_op_set_id_led, | ||
736 | .monitor = efx_port_dummy_op_int, | ||
737 | }; | ||
738 | |||
739 | void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | ||
740 | { | 732 | { |
741 | struct falcon_board *board = falcon_board(efx); | 733 | struct falcon_board *board = falcon_board(efx); |
742 | u8 type_id = FALCON_BOARD_TYPE(revision_info); | 734 | u8 type_id = FALCON_BOARD_TYPE(revision_info); |
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | |||
754 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | 746 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) |
755 | ? board->type->ref_model : board->type->gen_type, | 747 | ? board->type->ref_model : board->type->gen_type, |
756 | 'A' + board->major, board->minor); | 748 | 'A' + board->major, board->minor); |
749 | return 0; | ||
757 | } else { | 750 | } else { |
758 | EFX_ERR(efx, "unknown board type %d\n", type_id); | 751 | EFX_ERR(efx, "unknown board type %d\n", type_id); |
759 | board->type = &falcon_dummy_board; | 752 | return -ENODEV; |
760 | } | 753 | } |
761 | } | 754 | } |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 5825f37b51bd..bbc2c0c2f843 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -158,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type; | |||
158 | ************************************************************************** | 158 | ************************************************************************** |
159 | */ | 159 | */ |
160 | 160 | ||
161 | extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); | 161 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
162 | 162 | ||
163 | /* TX data path */ | 163 | /* TX data path */ |
164 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | 164 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); |
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 7bf93faff3ab..727b4228e081 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -475,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) | |||
475 | 475 | ||
476 | static void siena_update_nic_stats(struct efx_nic *efx) | 476 | static void siena_update_nic_stats(struct efx_nic *efx) |
477 | { | 477 | { |
478 | while (siena_try_update_nic_stats(efx) == -EAGAIN) | 478 | int retry; |
479 | cpu_relax(); | 479 | |
480 | /* If we're unlucky enough to read statistics wduring the DMA, wait | ||
481 | * up to 10ms for it to finish (typically takes <500us) */ | ||
482 | for (retry = 0; retry < 100; ++retry) { | ||
483 | if (siena_try_update_nic_stats(efx) == 0) | ||
484 | return; | ||
485 | udelay(100); | ||
486 | } | ||
487 | |||
488 | /* Use the old values instead */ | ||
480 | } | 489 | } |
481 | 490 | ||
482 | static void siena_start_nic_stats(struct efx_nic *efx) | 491 | static void siena_start_nic_stats(struct efx_nic *efx) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 20a17938c62b..e525a6cf5587 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -109,7 +109,7 @@ struct tun_struct { | |||
109 | 109 | ||
110 | struct tap_filter txflt; | 110 | struct tap_filter txflt; |
111 | struct socket socket; | 111 | struct socket socket; |
112 | 112 | struct socket_wq wq; | |
113 | #ifdef TUN_DEBUG | 113 | #ifdef TUN_DEBUG |
114 | int debug; | 114 | int debug; |
115 | #endif | 115 | #endif |
@@ -323,7 +323,7 @@ static void tun_net_uninit(struct net_device *dev) | |||
323 | /* Inform the methods they need to stop using the dev. | 323 | /* Inform the methods they need to stop using the dev. |
324 | */ | 324 | */ |
325 | if (tfile) { | 325 | if (tfile) { |
326 | wake_up_all(&tun->socket.wait); | 326 | wake_up_all(&tun->wq.wait); |
327 | if (atomic_dec_and_test(&tfile->count)) | 327 | if (atomic_dec_and_test(&tfile->count)) |
328 | __tun_detach(tun); | 328 | __tun_detach(tun); |
329 | } | 329 | } |
@@ -398,7 +398,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
398 | /* Notify and wake up reader process */ | 398 | /* Notify and wake up reader process */ |
399 | if (tun->flags & TUN_FASYNC) | 399 | if (tun->flags & TUN_FASYNC) |
400 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); | 400 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); |
401 | wake_up_interruptible_poll(&tun->socket.wait, POLLIN | | 401 | wake_up_interruptible_poll(&tun->wq.wait, POLLIN | |
402 | POLLRDNORM | POLLRDBAND); | 402 | POLLRDNORM | POLLRDBAND); |
403 | return NETDEV_TX_OK; | 403 | return NETDEV_TX_OK; |
404 | 404 | ||
@@ -498,7 +498,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) | |||
498 | 498 | ||
499 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); | 499 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); |
500 | 500 | ||
501 | poll_wait(file, &tun->socket.wait, wait); | 501 | poll_wait(file, &tun->wq.wait, wait); |
502 | 502 | ||
503 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 503 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
504 | mask |= POLLIN | POLLRDNORM; | 504 | mask |= POLLIN | POLLRDNORM; |
@@ -773,7 +773,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, | |||
773 | 773 | ||
774 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); | 774 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); |
775 | 775 | ||
776 | add_wait_queue(&tun->socket.wait, &wait); | 776 | add_wait_queue(&tun->wq.wait, &wait); |
777 | while (len) { | 777 | while (len) { |
778 | current->state = TASK_INTERRUPTIBLE; | 778 | current->state = TASK_INTERRUPTIBLE; |
779 | 779 | ||
@@ -804,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, | |||
804 | } | 804 | } |
805 | 805 | ||
806 | current->state = TASK_RUNNING; | 806 | current->state = TASK_RUNNING; |
807 | remove_wait_queue(&tun->socket.wait, &wait); | 807 | remove_wait_queue(&tun->wq.wait, &wait); |
808 | 808 | ||
809 | return ret; | 809 | return ret; |
810 | } | 810 | } |
@@ -861,6 +861,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = { | |||
861 | static void tun_sock_write_space(struct sock *sk) | 861 | static void tun_sock_write_space(struct sock *sk) |
862 | { | 862 | { |
863 | struct tun_struct *tun; | 863 | struct tun_struct *tun; |
864 | wait_queue_head_t *wqueue; | ||
864 | 865 | ||
865 | if (!sock_writeable(sk)) | 866 | if (!sock_writeable(sk)) |
866 | return; | 867 | return; |
@@ -868,8 +869,9 @@ static void tun_sock_write_space(struct sock *sk) | |||
868 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | 869 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
869 | return; | 870 | return; |
870 | 871 | ||
871 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) | 872 | wqueue = sk_sleep(sk); |
872 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT | | 873 | if (wqueue && waitqueue_active(wqueue)) |
874 | wake_up_interruptible_sync_poll(wqueue, POLLOUT | | ||
873 | POLLWRNORM | POLLWRBAND); | 875 | POLLWRNORM | POLLWRBAND); |
874 | 876 | ||
875 | tun = tun_sk(sk)->tun; | 877 | tun = tun_sk(sk)->tun; |
@@ -1039,7 +1041,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1039 | if (!sk) | 1041 | if (!sk) |
1040 | goto err_free_dev; | 1042 | goto err_free_dev; |
1041 | 1043 | ||
1042 | init_waitqueue_head(&tun->socket.wait); | 1044 | tun->socket.wq = &tun->wq; |
1045 | init_waitqueue_head(&tun->wq.wait); | ||
1043 | tun->socket.ops = &tun_socket_ops; | 1046 | tun->socket.ops = &tun_socket_ops; |
1044 | sock_init_data(&tun->socket, sk); | 1047 | sock_init_data(&tun->socket, sk); |
1045 | sk->sk_write_space = tun_sock_write_space; | 1048 | sk->sk_write_space = tun_sock_write_space; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 63be4caec70e..d7b7018a1de1 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -397,4 +397,13 @@ config USB_IPHETH | |||
397 | 397 | ||
398 | For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver | 398 | For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver |
399 | 399 | ||
400 | config USB_SIERRA_NET | ||
401 | tristate "USB-to-WWAN Driver for Sierra Wireless modems" | ||
402 | depends on USB_USBNET | ||
403 | help | ||
404 | Choose this option if you have a Sierra Wireless USB-to-WWAN device. | ||
405 | |||
406 | To compile this driver as a module, choose M here: the | ||
407 | module will be called sierra_net. | ||
408 | |||
400 | endmenu | 409 | endmenu |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index edb09c0ddf8e..b13a279663ba 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -24,4 +24,5 @@ obj-$(CONFIG_USB_USBNET) += usbnet.o | |||
24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o | 24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o |
25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o | 25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o |
26 | obj-$(CONFIG_USB_IPHETH) += ipheth.o | 26 | obj-$(CONFIG_USB_IPHETH) += ipheth.o |
27 | obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o | ||
27 | 28 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 811b2dc423d1..b3fe0de40469 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -466,6 +466,7 @@ static const struct driver_info mbm_info = { | |||
466 | .bind = cdc_bind, | 466 | .bind = cdc_bind, |
467 | .unbind = usbnet_cdc_unbind, | 467 | .unbind = usbnet_cdc_unbind, |
468 | .status = cdc_status, | 468 | .status = cdc_status, |
469 | .manage_power = cdc_manage_power, | ||
469 | }; | 470 | }; |
470 | 471 | ||
471 | /*-------------------------------------------------------------------------*/ | 472 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 291add255246..47634b617107 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -240,7 +240,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu | |||
240 | goto out; | 240 | goto out; |
241 | 241 | ||
242 | dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); | 242 | dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); |
243 | dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); | 243 | dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12); |
244 | 244 | ||
245 | for (i = 0; i < DM_TIMEOUT; i++) { | 245 | for (i = 0; i < DM_TIMEOUT; i++) { |
246 | u8 tmp; | 246 | u8 tmp; |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index fd1033130a81..418825d26f90 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -122,25 +122,25 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) | |||
122 | 122 | ||
123 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); | 123 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); |
124 | if (tx_urb == NULL) | 124 | if (tx_urb == NULL) |
125 | goto error; | 125 | goto error_nomem; |
126 | 126 | ||
127 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); | 127 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); |
128 | if (rx_urb == NULL) | 128 | if (rx_urb == NULL) |
129 | goto error; | 129 | goto free_tx_urb; |
130 | 130 | ||
131 | tx_buf = usb_buffer_alloc(iphone->udev, | 131 | tx_buf = usb_buffer_alloc(iphone->udev, |
132 | IPHETH_BUF_SIZE, | 132 | IPHETH_BUF_SIZE, |
133 | GFP_KERNEL, | 133 | GFP_KERNEL, |
134 | &tx_urb->transfer_dma); | 134 | &tx_urb->transfer_dma); |
135 | if (tx_buf == NULL) | 135 | if (tx_buf == NULL) |
136 | goto error; | 136 | goto free_rx_urb; |
137 | 137 | ||
138 | rx_buf = usb_buffer_alloc(iphone->udev, | 138 | rx_buf = usb_buffer_alloc(iphone->udev, |
139 | IPHETH_BUF_SIZE, | 139 | IPHETH_BUF_SIZE, |
140 | GFP_KERNEL, | 140 | GFP_KERNEL, |
141 | &rx_urb->transfer_dma); | 141 | &rx_urb->transfer_dma); |
142 | if (rx_buf == NULL) | 142 | if (rx_buf == NULL) |
143 | goto error; | 143 | goto free_tx_buf; |
144 | 144 | ||
145 | 145 | ||
146 | iphone->tx_urb = tx_urb; | 146 | iphone->tx_urb = tx_urb; |
@@ -149,13 +149,14 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) | |||
149 | iphone->rx_buf = rx_buf; | 149 | iphone->rx_buf = rx_buf; |
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | error: | 152 | free_tx_buf: |
153 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf, | ||
154 | rx_urb->transfer_dma); | ||
155 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf, | 153 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf, |
156 | tx_urb->transfer_dma); | 154 | tx_urb->transfer_dma); |
155 | free_rx_urb: | ||
157 | usb_free_urb(rx_urb); | 156 | usb_free_urb(rx_urb); |
157 | free_tx_urb: | ||
158 | usb_free_urb(tx_urb); | 158 | usb_free_urb(tx_urb); |
159 | error_nomem: | ||
159 | return -ENOMEM; | 160 | return -ENOMEM; |
160 | } | 161 | } |
161 | 162 | ||
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 52671ea043a7..c4c334d9770f 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = { | |||
145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ | 145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ |
146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ | 146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ |
147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ | 147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ |
148 | { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ | ||
148 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ | 149 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ |
149 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ | 150 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ |
150 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ | 151 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c new file mode 100644 index 000000000000..f1942d69a0d5 --- /dev/null +++ b/drivers/net/usb/sierra_net.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* | ||
2 | * USB-to-WWAN Driver for Sierra Wireless modems | ||
3 | * | ||
4 | * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer | ||
5 | * <linux@sierrawireless.com> | ||
6 | * | ||
7 | * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) | ||
8 | * and Ole Andre Vadla Ravnas (ActiveSync) (2006). | ||
9 | * | ||
10 | * IMPORTANT DISCLAIMER: This driver is not commercially supported by | ||
11 | * Sierra Wireless. Use at your own risk. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | ||
27 | |||
28 | #define DRIVER_VERSION "v.2.0" | ||
29 | #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" | ||
30 | #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" | ||
31 | static const char driver_name[] = "sierra_net"; | ||
32 | |||
33 | /* if defined debug messages enabled */ | ||
34 | /*#define DEBUG*/ | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/timer.h> | ||
42 | #include <linux/usb.h> | ||
43 | #include <linux/usb/cdc.h> | ||
44 | #include <net/ip.h> | ||
45 | #include <net/udp.h> | ||
46 | #include <asm/unaligned.h> | ||
47 | #include <linux/usb/usbnet.h> | ||
48 | |||
49 | #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 | ||
50 | #define SWI_GET_FW_ATTR_MASK 0x08 | ||
51 | |||
52 | /* atomic counter partially included in MAC address to make sure 2 devices | ||
53 | * do not end up with the same MAC - concept breaks in case of > 255 ifaces | ||
54 | */ | ||
55 | static atomic_t iface_counter = ATOMIC_INIT(0); | ||
56 | |||
57 | /* | ||
58 | * SYNC Timer Delay definition used to set the expiry time | ||
59 | */ | ||
60 | #define SIERRA_NET_SYNCDELAY (2*HZ) | ||
61 | |||
62 | /* Max. MTU supported. The modem buffers are limited to 1500 */ | ||
63 | #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 | ||
64 | |||
65 | /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control | ||
66 | * message reception ... and thus the max. received packet. | ||
67 | * (May be the cause for parse_hip returning -EINVAL) | ||
68 | */ | ||
69 | #define SIERRA_NET_USBCTL_BUF_LEN 1024 | ||
70 | |||
71 | /* list of interface numbers - used for constructing interface lists */ | ||
72 | struct sierra_net_iface_info { | ||
73 | const u32 infolen; /* number of interface numbers on list */ | ||
74 | const u8 *ifaceinfo; /* pointer to the array holding the numbers */ | ||
75 | }; | ||
76 | |||
77 | struct sierra_net_info_data { | ||
78 | u16 rx_urb_size; | ||
79 | struct sierra_net_iface_info whitelist; | ||
80 | }; | ||
81 | |||
82 | /* Private data structure */ | ||
83 | struct sierra_net_data { | ||
84 | |||
85 | u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ | ||
86 | |||
87 | u16 link_up; /* air link up or down */ | ||
88 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ | ||
89 | |||
90 | u8 sync_msg[4]; /* SYNC message */ | ||
91 | u8 shdwn_msg[4]; /* Shutdown message */ | ||
92 | |||
93 | /* Backpointer to the container */ | ||
94 | struct usbnet *usbnet; | ||
95 | |||
96 | u8 ifnum; /* interface number */ | ||
97 | |||
98 | /* Bit masks, must be a power of 2 */ | ||
99 | #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 | ||
100 | #define SIERRA_NET_TIMER_EXPIRY 0x02 | ||
101 | unsigned long kevent_flags; | ||
102 | struct work_struct sierra_net_kevent; | ||
103 | struct timer_list sync_timer; /* For retrying SYNC sequence */ | ||
104 | }; | ||
105 | |||
106 | struct param { | ||
107 | int is_present; | ||
108 | union { | ||
109 | void *ptr; | ||
110 | u32 dword; | ||
111 | u16 word; | ||
112 | u8 byte; | ||
113 | }; | ||
114 | }; | ||
115 | |||
116 | /* HIP message type */ | ||
117 | #define SIERRA_NET_HIP_EXTENDEDID 0x7F | ||
118 | #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ | ||
119 | #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ | ||
120 | #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ | ||
121 | #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ | ||
122 | |||
123 | #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 | ||
124 | #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 | ||
125 | |||
126 | /* 3G UMTS Link Sense Indication definitions */ | ||
127 | #define SIERRA_NET_HIP_LSI_UMTSID 0x78 | ||
128 | |||
129 | /* Reverse Channel Grant Indication HIP message */ | ||
130 | #define SIERRA_NET_HIP_RCGI 0x64 | ||
131 | |||
132 | /* LSI Protocol types */ | ||
133 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 | ||
134 | /* LSI Coverage */ | ||
135 | #define SIERRA_NET_COVERAGE_NONE 0x00 | ||
136 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 | ||
137 | |||
138 | /* LSI Session */ | ||
139 | #define SIERRA_NET_SESSION_IDLE 0x00 | ||
140 | /* LSI Link types */ | ||
141 | #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 | ||
142 | |||
143 | struct lsi_umts { | ||
144 | u8 protocol; | ||
145 | u8 unused1; | ||
146 | __be16 length; | ||
147 | /* eventually use a union for the rest - assume umts for now */ | ||
148 | u8 coverage; | ||
149 | u8 unused2[41]; | ||
150 | u8 session_state; | ||
151 | u8 unused3[33]; | ||
152 | u8 link_type; | ||
153 | u8 pdp_addr_len; /* NW-supplied PDP address len */ | ||
154 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ | ||
155 | u8 unused4[23]; | ||
156 | u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ | ||
157 | u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ | ||
158 | u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ | ||
159 | u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ | ||
160 | u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ | ||
161 | u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ | ||
162 | u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ | ||
163 | u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ | ||
164 | u8 unused5[4]; | ||
165 | u8 gw_addr_len; /* NW-supplied GW address len */ | ||
166 | u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ | ||
167 | u8 reserved[8]; | ||
168 | } __attribute__ ((packed)); | ||
169 | |||
170 | #define SIERRA_NET_LSI_COMMON_LEN 4 | ||
171 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | ||
172 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ | ||
173 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
174 | |||
175 | /* Forward definitions */ | ||
176 | static void sierra_sync_timer(unsigned long syncdata); | ||
177 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu); | ||
178 | |||
179 | /* Our own net device operations structure */ | ||
180 | static const struct net_device_ops sierra_net_device_ops = { | ||
181 | .ndo_open = usbnet_open, | ||
182 | .ndo_stop = usbnet_stop, | ||
183 | .ndo_start_xmit = usbnet_start_xmit, | ||
184 | .ndo_tx_timeout = usbnet_tx_timeout, | ||
185 | .ndo_change_mtu = sierra_net_change_mtu, | ||
186 | .ndo_set_mac_address = eth_mac_addr, | ||
187 | .ndo_validate_addr = eth_validate_addr, | ||
188 | }; | ||
189 | |||
190 | /* get private data associated with passed in usbnet device */ | ||
191 | static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) | ||
192 | { | ||
193 | return (struct sierra_net_data *)dev->data[0]; | ||
194 | } | ||
195 | |||
196 | /* set private data associated with passed in usbnet device */ | ||
197 | static inline void sierra_net_set_private(struct usbnet *dev, | ||
198 | struct sierra_net_data *priv) | ||
199 | { | ||
200 | dev->data[0] = (unsigned long)priv; | ||
201 | } | ||
202 | |||
203 | /* is packet IPv4 */ | ||
204 | static inline int is_ip(struct sk_buff *skb) | ||
205 | { | ||
206 | return (skb->protocol == cpu_to_be16(ETH_P_IP)); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * check passed in packet and make sure that: | ||
211 | * - it is linear (no scatter/gather) | ||
212 | * - it is ethernet (mac_header properly set) | ||
213 | */ | ||
214 | static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) | ||
215 | { | ||
216 | skb_reset_mac_header(skb); /* ethernet header */ | ||
217 | |||
218 | if (skb_is_nonlinear(skb)) { | ||
219 | netdev_err(dev->net, "Non linear buffer-dropping\n"); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | if (!pskb_may_pull(skb, ETH_HLEN)) | ||
224 | return 0; | ||
225 | skb->protocol = eth_hdr(skb)->h_proto; | ||
226 | |||
227 | return 1; | ||
228 | } | ||
229 | |||
230 | static const u8 *save16bit(struct param *p, const u8 *datap) | ||
231 | { | ||
232 | p->is_present = 1; | ||
233 | p->word = get_unaligned_be16(datap); | ||
234 | return datap + sizeof(p->word); | ||
235 | } | ||
236 | |||
237 | static const u8 *save8bit(struct param *p, const u8 *datap) | ||
238 | { | ||
239 | p->is_present = 1; | ||
240 | p->byte = *datap; | ||
241 | return datap + sizeof(p->byte); | ||
242 | } | ||
243 | |||
244 | /*----------------------------------------------------------------------------* | ||
245 | * BEGIN HIP * | ||
246 | *----------------------------------------------------------------------------*/ | ||
247 | /* HIP header */ | ||
248 | #define SIERRA_NET_HIP_HDR_LEN 4 | ||
249 | /* Extended HIP header */ | ||
250 | #define SIERRA_NET_HIP_EXT_HDR_LEN 6 | ||
251 | |||
252 | struct hip_hdr { | ||
253 | int hdrlen; | ||
254 | struct param payload_len; | ||
255 | struct param msgid; | ||
256 | struct param msgspecific; | ||
257 | struct param extmsgid; | ||
258 | }; | ||
259 | |||
260 | static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) | ||
261 | { | ||
262 | const u8 *curp = buf; | ||
263 | int padded; | ||
264 | |||
265 | if (buflen < SIERRA_NET_HIP_HDR_LEN) | ||
266 | return -EPROTO; | ||
267 | |||
268 | curp = save16bit(&hh->payload_len, curp); | ||
269 | curp = save8bit(&hh->msgid, curp); | ||
270 | curp = save8bit(&hh->msgspecific, curp); | ||
271 | |||
272 | padded = hh->msgid.byte & 0x80; | ||
273 | hh->msgid.byte &= 0x7F; /* 7 bits */ | ||
274 | |||
275 | hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); | ||
276 | if (hh->extmsgid.is_present) { | ||
277 | if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) | ||
278 | return -EPROTO; | ||
279 | |||
280 | hh->payload_len.word &= 0x3FFF; /* 14 bits */ | ||
281 | |||
282 | curp = save16bit(&hh->extmsgid, curp); | ||
283 | hh->extmsgid.word &= 0x03FF; /* 10 bits */ | ||
284 | |||
285 | hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; | ||
286 | } else { | ||
287 | hh->payload_len.word &= 0x07FF; /* 11 bits */ | ||
288 | hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; | ||
289 | } | ||
290 | |||
291 | if (padded) { | ||
292 | hh->hdrlen++; | ||
293 | hh->payload_len.word--; | ||
294 | } | ||
295 | |||
296 | /* if real packet shorter than the claimed length */ | ||
297 | if (buflen < (hh->hdrlen + hh->payload_len.word)) | ||
298 | return -EINVAL; | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void build_hip(u8 *buf, const u16 payloadlen, | ||
304 | struct sierra_net_data *priv) | ||
305 | { | ||
306 | /* the following doesn't have the full functionality. We | ||
307 | * currently build only one kind of header, so it is faster this way | ||
308 | */ | ||
309 | put_unaligned_be16(payloadlen, buf); | ||
310 | memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); | ||
311 | } | ||
312 | /*----------------------------------------------------------------------------* | ||
313 | * END HIP * | ||
314 | *----------------------------------------------------------------------------*/ | ||
315 | |||
316 | static int sierra_net_send_cmd(struct usbnet *dev, | ||
317 | u8 *cmd, int cmdlen, const char * cmd_name) | ||
318 | { | ||
319 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
320 | int status; | ||
321 | |||
322 | status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
323 | USB_CDC_SEND_ENCAPSULATED_COMMAND, | ||
324 | USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, | ||
325 | priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT); | ||
326 | |||
327 | if (status != cmdlen && status != -ENODEV) | ||
328 | netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); | ||
329 | |||
330 | return status; | ||
331 | } | ||
332 | |||
333 | static int sierra_net_send_sync(struct usbnet *dev) | ||
334 | { | ||
335 | int status; | ||
336 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
337 | |||
338 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
339 | |||
340 | status = sierra_net_send_cmd(dev, priv->sync_msg, | ||
341 | sizeof(priv->sync_msg), "SYNC"); | ||
342 | |||
343 | return status; | ||
344 | } | ||
345 | |||
346 | static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) | ||
347 | { | ||
348 | dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); | ||
349 | priv->tx_hdr_template[0] = 0x3F; | ||
350 | priv->tx_hdr_template[1] = ctx_ix; | ||
351 | *((u16 *)&priv->tx_hdr_template[2]) = | ||
352 | cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); | ||
353 | } | ||
354 | |||
355 | static inline int sierra_net_is_valid_addrlen(u8 len) | ||
356 | { | ||
357 | return (len == sizeof(struct in_addr)); | ||
358 | } | ||
359 | |||
360 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) | ||
361 | { | ||
362 | struct lsi_umts *lsi = (struct lsi_umts *)data; | ||
363 | |||
364 | if (datalen < sizeof(struct lsi_umts)) { | ||
365 | netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", | ||
366 | __func__, datalen, | ||
367 | sizeof(struct lsi_umts)); | ||
368 | return -1; | ||
369 | } | ||
370 | |||
371 | if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { | ||
372 | netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | ||
373 | __func__, be16_to_cpu(lsi->length), | ||
374 | (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); | ||
375 | return -1; | ||
376 | } | ||
377 | |||
378 | /* Validate the protocol - only support UMTS for now */ | ||
379 | if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { | ||
380 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", | ||
381 | lsi->protocol); | ||
382 | return -1; | ||
383 | } | ||
384 | |||
385 | /* Validate the link type */ | ||
386 | if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { | ||
387 | netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
388 | lsi->link_type); | ||
389 | return -1; | ||
390 | } | ||
391 | |||
392 | /* Validate the coverage */ | ||
393 | if (lsi->coverage == SIERRA_NET_COVERAGE_NONE | ||
394 | || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | ||
395 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | /* Validate the session state */ | ||
400 | if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
401 | netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
402 | lsi->session_state); | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* Set link_sense true */ | ||
407 | return 1; | ||
408 | } | ||
409 | |||
410 | static void sierra_net_handle_lsi(struct usbnet *dev, char *data, | ||
411 | struct hip_hdr *hh) | ||
412 | { | ||
413 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
414 | int link_up; | ||
415 | |||
416 | link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, | ||
417 | hh->payload_len.word); | ||
418 | if (link_up < 0) { | ||
419 | netdev_err(dev->net, "Invalid LSI\n"); | ||
420 | return; | ||
421 | } | ||
422 | if (link_up) { | ||
423 | sierra_net_set_ctx_index(priv, hh->msgspecific.byte); | ||
424 | priv->link_up = 1; | ||
425 | netif_carrier_on(dev->net); | ||
426 | } else { | ||
427 | priv->link_up = 0; | ||
428 | netif_carrier_off(dev->net); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void sierra_net_dosync(struct usbnet *dev) | ||
433 | { | ||
434 | int status; | ||
435 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
436 | |||
437 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
438 | |||
439 | /* tell modem we are ready */ | ||
440 | status = sierra_net_send_sync(dev); | ||
441 | if (status < 0) | ||
442 | netdev_err(dev->net, | ||
443 | "Send SYNC failed, status %d\n", status); | ||
444 | status = sierra_net_send_sync(dev); | ||
445 | if (status < 0) | ||
446 | netdev_err(dev->net, | ||
447 | "Send SYNC failed, status %d\n", status); | ||
448 | |||
449 | /* Now, start a timer and make sure we get the Restart Indication */ | ||
450 | priv->sync_timer.function = sierra_sync_timer; | ||
451 | priv->sync_timer.data = (unsigned long) dev; | ||
452 | priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; | ||
453 | add_timer(&priv->sync_timer); | ||
454 | } | ||
455 | |||
456 | static void sierra_net_kevent(struct work_struct *work) | ||
457 | { | ||
458 | struct sierra_net_data *priv = | ||
459 | container_of(work, struct sierra_net_data, sierra_net_kevent); | ||
460 | struct usbnet *dev = priv->usbnet; | ||
461 | int len; | ||
462 | int err; | ||
463 | u8 *buf; | ||
464 | u8 ifnum; | ||
465 | |||
466 | if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { | ||
467 | clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); | ||
468 | |||
469 | /* Query the modem for the LSI message */ | ||
470 | buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); | ||
471 | if (!buf) { | ||
472 | netdev_err(dev->net, | ||
473 | "failed to allocate buf for LS msg\n"); | ||
474 | return; | ||
475 | } | ||
476 | ifnum = priv->ifnum; | ||
477 | len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | ||
478 | USB_CDC_GET_ENCAPSULATED_RESPONSE, | ||
479 | USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, | ||
480 | 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, | ||
481 | USB_CTRL_SET_TIMEOUT); | ||
482 | |||
483 | if (len < 0) { | ||
484 | netdev_err(dev->net, | ||
485 | "usb_control_msg failed, status %d\n", len); | ||
486 | } else { | ||
487 | struct hip_hdr hh; | ||
488 | |||
489 | dev_dbg(&dev->udev->dev, "%s: Received status message," | ||
490 | " %04x bytes", __func__, len); | ||
491 | |||
492 | err = parse_hip(buf, len, &hh); | ||
493 | if (err) { | ||
494 | netdev_err(dev->net, "%s: Bad packet," | ||
495 | " parse result %d\n", __func__, err); | ||
496 | kfree(buf); | ||
497 | return; | ||
498 | } | ||
499 | |||
500 | /* Validate packet length */ | ||
501 | if (len != hh.hdrlen + hh.payload_len.word) { | ||
502 | netdev_err(dev->net, "%s: Bad packet, received" | ||
503 | " %d, expected %d\n", __func__, len, | ||
504 | hh.hdrlen + hh.payload_len.word); | ||
505 | kfree(buf); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | /* Switch on received message types */ | ||
510 | switch (hh.msgid.byte) { | ||
511 | case SIERRA_NET_HIP_LSI_UMTSID: | ||
512 | dev_dbg(&dev->udev->dev, "LSI for ctx:%d", | ||
513 | hh.msgspecific.byte); | ||
514 | sierra_net_handle_lsi(dev, buf, &hh); | ||
515 | break; | ||
516 | case SIERRA_NET_HIP_RESTART_ID: | ||
517 | dev_dbg(&dev->udev->dev, "Restart reported: %d," | ||
518 | " stopping sync timer", | ||
519 | hh.msgspecific.byte); | ||
520 | /* Got sync resp - stop timer & clear mask */ | ||
521 | del_timer_sync(&priv->sync_timer); | ||
522 | clear_bit(SIERRA_NET_TIMER_EXPIRY, | ||
523 | &priv->kevent_flags); | ||
524 | break; | ||
525 | case SIERRA_NET_HIP_HSYNC_ID: | ||
526 | dev_dbg(&dev->udev->dev, "SYNC received"); | ||
527 | err = sierra_net_send_sync(dev); | ||
528 | if (err < 0) | ||
529 | netdev_err(dev->net, | ||
530 | "Send SYNC failed %d\n", err); | ||
531 | break; | ||
532 | case SIERRA_NET_HIP_EXTENDEDID: | ||
533 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
534 | "extmsgid 0x%04x\n", hh.extmsgid.word); | ||
535 | break; | ||
536 | case SIERRA_NET_HIP_RCGI: | ||
537 | /* Ignored */ | ||
538 | break; | ||
539 | default: | ||
540 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
541 | "msgid 0x%02x\n", hh.msgid.byte); | ||
542 | break; | ||
543 | } | ||
544 | } | ||
545 | kfree(buf); | ||
546 | } | ||
547 | /* The sync timer bit might be set */ | ||
548 | if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { | ||
549 | clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); | ||
550 | dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); | ||
551 | sierra_net_dosync(priv->usbnet); | ||
552 | } | ||
553 | |||
554 | if (priv->kevent_flags) | ||
555 | dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " | ||
556 | "kevent_flags = 0x%lx", priv->kevent_flags); | ||
557 | } | ||
558 | |||
559 | static void sierra_net_defer_kevent(struct usbnet *dev, int work) | ||
560 | { | ||
561 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
562 | |||
563 | set_bit(work, &priv->kevent_flags); | ||
564 | schedule_work(&priv->sierra_net_kevent); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Sync Retransmit Timer Handler. On expiry, kick the work queue | ||
569 | */ | ||
570 | void sierra_sync_timer(unsigned long syncdata) | ||
571 | { | ||
572 | struct usbnet *dev = (struct usbnet *)syncdata; | ||
573 | |||
574 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
575 | /* Kick the tasklet */ | ||
576 | sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); | ||
577 | } | ||
578 | |||
579 | static void sierra_net_status(struct usbnet *dev, struct urb *urb) | ||
580 | { | ||
581 | struct usb_cdc_notification *event; | ||
582 | |||
583 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
584 | |||
585 | if (urb->actual_length < sizeof *event) | ||
586 | return; | ||
587 | |||
588 | /* Add cases to handle other standard notifications. */ | ||
589 | event = urb->transfer_buffer; | ||
590 | switch (event->bNotificationType) { | ||
591 | case USB_CDC_NOTIFY_NETWORK_CONNECTION: | ||
592 | case USB_CDC_NOTIFY_SPEED_CHANGE: | ||
593 | /* USB 305 sends those */ | ||
594 | break; | ||
595 | case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: | ||
596 | sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); | ||
597 | break; | ||
598 | default: | ||
599 | netdev_err(dev->net, ": unexpected notification %02x!\n", | ||
600 | event->bNotificationType); | ||
601 | break; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | static void sierra_net_get_drvinfo(struct net_device *net, | ||
606 | struct ethtool_drvinfo *info) | ||
607 | { | ||
608 | /* Inherit standard device info */ | ||
609 | usbnet_get_drvinfo(net, info); | ||
610 | strncpy(info->driver, driver_name, sizeof info->driver); | ||
611 | strncpy(info->version, DRIVER_VERSION, sizeof info->version); | ||
612 | } | ||
613 | |||
614 | static u32 sierra_net_get_link(struct net_device *net) | ||
615 | { | ||
616 | struct usbnet *dev = netdev_priv(net); | ||
617 | /* Report link is down whenever the interface is down */ | ||
618 | return sierra_net_get_private(dev)->link_up && netif_running(net); | ||
619 | } | ||
620 | |||
621 | static struct ethtool_ops sierra_net_ethtool_ops = { | ||
622 | .get_drvinfo = sierra_net_get_drvinfo, | ||
623 | .get_link = sierra_net_get_link, | ||
624 | .get_msglevel = usbnet_get_msglevel, | ||
625 | .set_msglevel = usbnet_set_msglevel, | ||
626 | .get_settings = usbnet_get_settings, | ||
627 | .set_settings = usbnet_set_settings, | ||
628 | .nway_reset = usbnet_nway_reset, | ||
629 | }; | ||
630 | |||
631 | /* MTU can not be more than 1500 bytes, enforce it. */ | ||
632 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu) | ||
633 | { | ||
634 | if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU) | ||
635 | return -EINVAL; | ||
636 | |||
637 | return usbnet_change_mtu(net, new_mtu); | ||
638 | } | ||
639 | |||
640 | static int is_whitelisted(const u8 ifnum, | ||
641 | const struct sierra_net_iface_info *whitelist) | ||
642 | { | ||
643 | if (whitelist) { | ||
644 | const u8 *list = whitelist->ifaceinfo; | ||
645 | int i; | ||
646 | |||
647 | for (i = 0; i < whitelist->infolen; i++) { | ||
648 | if (list[i] == ifnum) | ||
649 | return 1; | ||
650 | } | ||
651 | } | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) | ||
656 | { | ||
657 | int result = 0; | ||
658 | u16 *attrdata; | ||
659 | |||
660 | attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL); | ||
661 | if (!attrdata) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | result = usb_control_msg( | ||
665 | dev->udev, | ||
666 | usb_rcvctrlpipe(dev->udev, 0), | ||
667 | /* _u8 vendor specific request */ | ||
668 | SWI_USB_REQUEST_GET_FW_ATTR, | ||
669 | USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ | ||
670 | 0x0000, /* __u16 value not used */ | ||
671 | 0x0000, /* __u16 index not used */ | ||
672 | attrdata, /* char *data */ | ||
673 | sizeof(*attrdata), /* __u16 size */ | ||
674 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
675 | |||
676 | if (result < 0) { | ||
677 | kfree(attrdata); | ||
678 | return -EIO; | ||
679 | } | ||
680 | |||
681 | *datap = *attrdata; | ||
682 | |||
683 | kfree(attrdata); | ||
684 | return result; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * collects the bulk endpoints, the status endpoint. | ||
689 | */ | ||
690 | static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | ||
691 | { | ||
692 | u8 ifacenum; | ||
693 | u8 numendpoints; | ||
694 | u16 fwattr = 0; | ||
695 | int status; | ||
696 | struct ethhdr *eth; | ||
697 | struct sierra_net_data *priv; | ||
698 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { | ||
699 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; | ||
700 | static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { | ||
701 | 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; | ||
702 | |||
703 | struct sierra_net_info_data *data = | ||
704 | (struct sierra_net_info_data *)dev->driver_info->data; | ||
705 | |||
706 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
707 | |||
708 | ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; | ||
709 | /* We only accept certain interfaces */ | ||
710 | if (!is_whitelisted(ifacenum, &data->whitelist)) { | ||
711 | dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); | ||
712 | return -ENODEV; | ||
713 | } | ||
714 | numendpoints = intf->cur_altsetting->desc.bNumEndpoints; | ||
715 | /* We have three endpoints, bulk in and out, and a status */ | ||
716 | if (numendpoints != 3) { | ||
717 | dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", | ||
718 | numendpoints); | ||
719 | return -ENODEV; | ||
720 | } | ||
721 | /* Status endpoint set in usbnet_get_endpoints() */ | ||
722 | dev->status = NULL; | ||
723 | status = usbnet_get_endpoints(dev, intf); | ||
724 | if (status < 0) { | ||
725 | dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", | ||
726 | status); | ||
727 | return -ENODEV; | ||
728 | } | ||
729 | /* Initialize sierra private data */ | ||
730 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
731 | if (!priv) { | ||
732 | dev_err(&dev->udev->dev, "No memory"); | ||
733 | return -ENOMEM; | ||
734 | } | ||
735 | |||
736 | priv->usbnet = dev; | ||
737 | priv->ifnum = ifacenum; | ||
738 | dev->net->netdev_ops = &sierra_net_device_ops; | ||
739 | |||
740 | /* change MAC addr to include, ifacenum, and to be unique */ | ||
741 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); | ||
742 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; | ||
743 | |||
744 | /* we will have to manufacture ethernet headers, prepare template */ | ||
745 | eth = (struct ethhdr *)priv->ethr_hdr_tmpl; | ||
746 | memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
747 | eth->h_proto = cpu_to_be16(ETH_P_IP); | ||
748 | |||
749 | /* prepare shutdown message template */ | ||
750 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); | ||
751 | /* set context index initially to 0 - prepares tx hdr template */ | ||
752 | sierra_net_set_ctx_index(priv, 0); | ||
753 | |||
754 | /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ | ||
755 | dev->rx_urb_size = data->rx_urb_size; | ||
756 | if (dev->udev->speed != USB_SPEED_HIGH) | ||
757 | dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); | ||
758 | |||
759 | dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; | ||
760 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
761 | |||
762 | /* Set up the netdev */ | ||
763 | dev->net->flags |= IFF_NOARP; | ||
764 | dev->net->ethtool_ops = &sierra_net_ethtool_ops; | ||
765 | netif_carrier_off(dev->net); | ||
766 | |||
767 | sierra_net_set_private(dev, priv); | ||
768 | |||
769 | priv->kevent_flags = 0; | ||
770 | |||
771 | /* Use the shared workqueue */ | ||
772 | INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); | ||
773 | |||
774 | /* Only need to do this once */ | ||
775 | init_timer(&priv->sync_timer); | ||
776 | |||
777 | /* verify fw attributes */ | ||
778 | status = sierra_net_get_fw_attr(dev, &fwattr); | ||
779 | dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); | ||
780 | |||
781 | /* test whether firmware supports DHCP */ | ||
782 | if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { | ||
783 | /* found incompatible firmware version */ | ||
784 | dev_err(&dev->udev->dev, "Incompatible driver and firmware" | ||
785 | " versions\n"); | ||
786 | kfree(priv); | ||
787 | return -ENODEV; | ||
788 | } | ||
789 | /* prepare sync message from template */ | ||
790 | memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); | ||
791 | |||
792 | /* initiate the sync sequence */ | ||
793 | sierra_net_dosync(dev); | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) | ||
799 | { | ||
800 | int status; | ||
801 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
802 | |||
803 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
804 | |||
805 | /* Kill the timer then flush the work queue */ | ||
806 | del_timer_sync(&priv->sync_timer); | ||
807 | |||
808 | flush_scheduled_work(); | ||
809 | |||
810 | /* tell modem we are going away */ | ||
811 | status = sierra_net_send_cmd(dev, priv->shdwn_msg, | ||
812 | sizeof(priv->shdwn_msg), "Shutdown"); | ||
813 | if (status < 0) | ||
814 | netdev_err(dev->net, | ||
815 | "usb_control_msg failed, status %d\n", status); | ||
816 | |||
817 | sierra_net_set_private(dev, NULL); | ||
818 | |||
819 | kfree(priv); | ||
820 | } | ||
821 | |||
822 | static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, | ||
823 | struct sk_buff *skb, int len) | ||
824 | { | ||
825 | struct sk_buff *new_skb; | ||
826 | |||
827 | /* clone skb */ | ||
828 | new_skb = skb_clone(skb, GFP_ATOMIC); | ||
829 | |||
830 | /* remove len bytes from original */ | ||
831 | skb_pull(skb, len); | ||
832 | |||
833 | /* trim next packet to it's length */ | ||
834 | if (new_skb) { | ||
835 | skb_trim(new_skb, len); | ||
836 | } else { | ||
837 | if (netif_msg_rx_err(dev)) | ||
838 | netdev_err(dev->net, "failed to get skb\n"); | ||
839 | dev->net->stats.rx_dropped++; | ||
840 | } | ||
841 | |||
842 | return new_skb; | ||
843 | } | ||
844 | |||
845 | /* ---------------------------- Receive data path ----------------------*/ | ||
846 | static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
847 | { | ||
848 | int err; | ||
849 | struct hip_hdr hh; | ||
850 | struct sk_buff *new_skb; | ||
851 | |||
852 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
853 | |||
854 | /* could contain multiple packets */ | ||
855 | while (likely(skb->len)) { | ||
856 | err = parse_hip(skb->data, skb->len, &hh); | ||
857 | if (err) { | ||
858 | if (netif_msg_rx_err(dev)) | ||
859 | netdev_err(dev->net, "Invalid HIP header %d\n", | ||
860 | err); | ||
861 | /* dev->net->stats.rx_errors incremented by caller */ | ||
862 | dev->net->stats.rx_length_errors++; | ||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | /* Validate Extended HIP header */ | ||
867 | if (!hh.extmsgid.is_present | ||
868 | || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { | ||
869 | if (netif_msg_rx_err(dev)) | ||
870 | netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); | ||
871 | |||
872 | dev->net->stats.rx_frame_errors++; | ||
873 | /* dev->net->stats.rx_errors incremented by caller */; | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | skb_pull(skb, hh.hdrlen); | ||
878 | |||
879 | /* We are going to accept this packet, prepare it */ | ||
880 | memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, | ||
881 | ETH_HLEN); | ||
882 | |||
883 | /* Last packet in batch handled by usbnet */ | ||
884 | if (hh.payload_len.word == skb->len) | ||
885 | return 1; | ||
886 | |||
887 | new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); | ||
888 | if (new_skb) | ||
889 | usbnet_skb_return(dev, new_skb); | ||
890 | |||
891 | } /* while */ | ||
892 | |||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | /* ---------------------------- Transmit data path ----------------------*/ | ||
897 | struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | ||
898 | gfp_t flags) | ||
899 | { | ||
900 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
901 | u16 len; | ||
902 | bool need_tail; | ||
903 | |||
904 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
905 | if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { | ||
906 | /* enough head room as is? */ | ||
907 | if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { | ||
908 | /* Save the Eth/IP length and set up HIP hdr */ | ||
909 | len = skb->len; | ||
910 | skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); | ||
911 | /* Handle ZLP issue */ | ||
912 | need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) | ||
913 | % dev->maxpacket == 0); | ||
914 | if (need_tail) { | ||
915 | if (unlikely(skb_tailroom(skb) == 0)) { | ||
916 | netdev_err(dev->net, "tx_fixup:" | ||
917 | "no room for packet\n"); | ||
918 | dev_kfree_skb_any(skb); | ||
919 | return NULL; | ||
920 | } else { | ||
921 | skb->data[skb->len] = 0; | ||
922 | __skb_put(skb, 1); | ||
923 | len = len + 1; | ||
924 | } | ||
925 | } | ||
926 | build_hip(skb->data, len, priv); | ||
927 | return skb; | ||
928 | } else { | ||
929 | /* | ||
930 | * compensate in the future if necessary | ||
931 | */ | ||
932 | netdev_err(dev->net, "tx_fixup: no room for HIP\n"); | ||
933 | } /* headroom */ | ||
934 | } | ||
935 | |||
936 | if (!priv->link_up) | ||
937 | dev->net->stats.tx_carrier_errors++; | ||
938 | |||
939 | /* tx_dropped incremented by usbnet */ | ||
940 | |||
941 | /* filter the packet out, release it */ | ||
942 | dev_kfree_skb_any(skb); | ||
943 | return NULL; | ||
944 | } | ||
945 | |||
946 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | ||
947 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | ||
948 | .rx_urb_size = 8 * 1024, | ||
949 | .whitelist = { | ||
950 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | ||
951 | .ifaceinfo = sierra_net_ifnum_list | ||
952 | } | ||
953 | }; | ||
954 | |||
955 | static const struct driver_info sierra_net_info_68A3 = { | ||
956 | .description = "Sierra Wireless USB-to-WWAN Modem", | ||
957 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | ||
958 | .bind = sierra_net_bind, | ||
959 | .unbind = sierra_net_unbind, | ||
960 | .status = sierra_net_status, | ||
961 | .rx_fixup = sierra_net_rx_fixup, | ||
962 | .tx_fixup = sierra_net_tx_fixup, | ||
963 | .data = (unsigned long)&sierra_net_info_data_68A3, | ||
964 | }; | ||
965 | |||
966 | static const struct usb_device_id products[] = { | ||
967 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | ||
968 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | ||
969 | |||
970 | {}, /* last item */ | ||
971 | }; | ||
972 | MODULE_DEVICE_TABLE(usb, products); | ||
973 | |||
974 | /* We are based on usbnet, so let it handle the USB driver specifics */ | ||
975 | static struct usb_driver sierra_net_driver = { | ||
976 | .name = "sierra_net", | ||
977 | .id_table = products, | ||
978 | .probe = usbnet_probe, | ||
979 | .disconnect = usbnet_disconnect, | ||
980 | .suspend = usbnet_suspend, | ||
981 | .resume = usbnet_resume, | ||
982 | .no_dynamic_id = 1, | ||
983 | }; | ||
984 | |||
985 | static int __init sierra_net_init(void) | ||
986 | { | ||
987 | BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) | ||
988 | < sizeof(struct cdc_state)); | ||
989 | |||
990 | return usb_register(&sierra_net_driver); | ||
991 | } | ||
992 | |||
993 | static void __exit sierra_net_exit(void) | ||
994 | { | ||
995 | usb_deregister(&sierra_net_driver); | ||
996 | } | ||
997 | |||
998 | module_exit(sierra_net_exit); | ||
999 | module_init(sierra_net_init); | ||
1000 | |||
1001 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
1002 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1003 | MODULE_VERSION(DRIVER_VERSION); | ||
1004 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 679da7e7522e..ca42ccb23d76 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, | |||
246 | u32 idx, i; | 246 | u32 idx, i; |
247 | 247 | ||
248 | i = (*index) % ring_limit; | 248 | i = (*index) % ring_limit; |
249 | (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); | 249 | (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); |
250 | idx %= ring_limit; | 250 | idx %= ring_limit; |
251 | 251 | ||
252 | while (i != idx) { | 252 | while (i != idx) { |
diff --git a/include/linux/net.h b/include/linux/net.h index 4157b5d42bd6..2b4deeeb8646 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -59,6 +59,7 @@ typedef enum { | |||
59 | #include <linux/wait.h> | 59 | #include <linux/wait.h> |
60 | #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ | 60 | #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ |
61 | #include <linux/kmemcheck.h> | 61 | #include <linux/kmemcheck.h> |
62 | #include <linux/rcupdate.h> | ||
62 | 63 | ||
63 | struct poll_table_struct; | 64 | struct poll_table_struct; |
64 | struct pipe_inode_info; | 65 | struct pipe_inode_info; |
@@ -116,6 +117,12 @@ enum sock_shutdown_cmd { | |||
116 | SHUT_RDWR = 2, | 117 | SHUT_RDWR = 2, |
117 | }; | 118 | }; |
118 | 119 | ||
120 | struct socket_wq { | ||
121 | wait_queue_head_t wait; | ||
122 | struct fasync_struct *fasync_list; | ||
123 | struct rcu_head rcu; | ||
124 | } ____cacheline_aligned_in_smp; | ||
125 | |||
119 | /** | 126 | /** |
120 | * struct socket - general BSD socket | 127 | * struct socket - general BSD socket |
121 | * @state: socket state (%SS_CONNECTED, etc) | 128 | * @state: socket state (%SS_CONNECTED, etc) |
@@ -135,11 +142,8 @@ struct socket { | |||
135 | kmemcheck_bitfield_end(type); | 142 | kmemcheck_bitfield_end(type); |
136 | 143 | ||
137 | unsigned long flags; | 144 | unsigned long flags; |
138 | /* | 145 | |
139 | * Please keep fasync_list & wait fields in the same cache line | 146 | struct socket_wq *wq; |
140 | */ | ||
141 | struct fasync_struct *fasync_list; | ||
142 | wait_queue_head_t wait; | ||
143 | 147 | ||
144 | struct file *file; | 148 | struct file *file; |
145 | struct sock *sk; | 149 | struct sock *sk; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40d4c20d034b..98112fbddefd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -218,16 +218,6 @@ struct neighbour; | |||
218 | struct neigh_parms; | 218 | struct neigh_parms; |
219 | struct sk_buff; | 219 | struct sk_buff; |
220 | 220 | ||
221 | struct netif_rx_stats { | ||
222 | unsigned total; | ||
223 | unsigned dropped; | ||
224 | unsigned time_squeeze; | ||
225 | unsigned cpu_collision; | ||
226 | unsigned received_rps; | ||
227 | }; | ||
228 | |||
229 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | ||
230 | |||
231 | struct netdev_hw_addr { | 221 | struct netdev_hw_addr { |
232 | struct list_head list; | 222 | struct list_head list; |
233 | unsigned char addr[MAX_ADDR_LEN]; | 223 | unsigned char addr[MAX_ADDR_LEN]; |
@@ -888,7 +878,7 @@ struct net_device { | |||
888 | unsigned char operstate; /* RFC2863 operstate */ | 878 | unsigned char operstate; /* RFC2863 operstate */ |
889 | unsigned char link_mode; /* mapping policy to operstate */ | 879 | unsigned char link_mode; /* mapping policy to operstate */ |
890 | 880 | ||
891 | unsigned mtu; /* interface MTU value */ | 881 | unsigned int mtu; /* interface MTU value */ |
892 | unsigned short type; /* interface hardware type */ | 882 | unsigned short type; /* interface hardware type */ |
893 | unsigned short hard_header_len; /* hardware hdr length */ | 883 | unsigned short hard_header_len; /* hardware hdr length */ |
894 | 884 | ||
@@ -1390,6 +1380,12 @@ struct softnet_data { | |||
1390 | struct sk_buff *completion_queue; | 1380 | struct sk_buff *completion_queue; |
1391 | struct sk_buff_head process_queue; | 1381 | struct sk_buff_head process_queue; |
1392 | 1382 | ||
1383 | /* stats */ | ||
1384 | unsigned int processed; | ||
1385 | unsigned int time_squeeze; | ||
1386 | unsigned int cpu_collision; | ||
1387 | unsigned int received_rps; | ||
1388 | |||
1393 | #ifdef CONFIG_RPS | 1389 | #ifdef CONFIG_RPS |
1394 | struct softnet_data *rps_ipi_list; | 1390 | struct softnet_data *rps_ipi_list; |
1395 | 1391 | ||
@@ -1399,6 +1395,7 @@ struct softnet_data { | |||
1399 | unsigned int cpu; | 1395 | unsigned int cpu; |
1400 | unsigned int input_queue_head; | 1396 | unsigned int input_queue_head; |
1401 | #endif | 1397 | #endif |
1398 | unsigned dropped; | ||
1402 | struct sk_buff_head input_pkt_queue; | 1399 | struct sk_buff_head input_pkt_queue; |
1403 | struct napi_struct backlog; | 1400 | struct napi_struct backlog; |
1404 | }; | 1401 | }; |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 004908b104d5..4ec3b38ce9c5 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -429,6 +429,23 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
429 | pos = rcu_dereference_raw(pos->next)) | 429 | pos = rcu_dereference_raw(pos->next)) |
430 | 430 | ||
431 | /** | 431 | /** |
432 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | ||
433 | * @tpos: the type * to use as a loop cursor. | ||
434 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
435 | * @head: the head for your list. | ||
436 | * @member: the name of the hlist_node within the struct. | ||
437 | * | ||
438 | * This list-traversal primitive may safely run concurrently with | ||
439 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | ||
440 | * as long as the traversal is guarded by rcu_read_lock(). | ||
441 | */ | ||
442 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ | ||
443 | for (pos = rcu_dereference_bh((head)->first); \ | ||
444 | pos && ({ prefetch(pos->next); 1; }) && \ | ||
445 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | ||
446 | pos = rcu_dereference_bh(pos->next)) | ||
447 | |||
448 | /** | ||
432 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point | 449 | * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
433 | * @tpos: the type * to use as a loop cursor. | 450 | * @tpos: the type * to use as a loop cursor. |
434 | * @pos: the &struct hlist_node to use as a loop cursor. | 451 | * @pos: the &struct hlist_node to use as a loop cursor. |
@@ -440,6 +457,18 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
440 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 457 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
441 | pos = rcu_dereference(pos->next)) | 458 | pos = rcu_dereference(pos->next)) |
442 | 459 | ||
460 | /** | ||
461 | * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point | ||
462 | * @tpos: the type * to use as a loop cursor. | ||
463 | * @pos: the &struct hlist_node to use as a loop cursor. | ||
464 | * @member: the name of the hlist_node within the struct. | ||
465 | */ | ||
466 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ | ||
467 | for (pos = rcu_dereference_bh((pos)->next); \ | ||
468 | pos && ({ prefetch(pos->next); 1; }) && \ | ||
469 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | ||
470 | pos = rcu_dereference_bh(pos->next)) | ||
471 | |||
443 | 472 | ||
444 | #endif /* __KERNEL__ */ | 473 | #endif /* __KERNEL__ */ |
445 | #endif | 474 | #endif |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 82f5116a89e4..746a652b9f6f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1128,6 +1128,11 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) | |||
1128 | return skb->data += len; | 1128 | return skb->data += len; |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) | ||
1132 | { | ||
1133 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); | ||
1134 | } | ||
1135 | |||
1131 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); | 1136 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); |
1132 | 1137 | ||
1133 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) | 1138 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 1614d78c60ed..20725e213aee 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
@@ -30,7 +30,7 @@ struct unix_skb_parms { | |||
30 | #endif | 30 | #endif |
31 | }; | 31 | }; |
32 | 32 | ||
33 | #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) | 33 | #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) |
34 | #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) | 34 | #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) |
35 | #define UNIXSID(skb) (&UNIXCB((skb)).secid) | 35 | #define UNIXSID(skb) (&UNIXCB((skb)).secid) |
36 | 36 | ||
@@ -45,21 +45,23 @@ struct unix_skb_parms { | |||
45 | struct unix_sock { | 45 | struct unix_sock { |
46 | /* WARNING: sk has to be the first member */ | 46 | /* WARNING: sk has to be the first member */ |
47 | struct sock sk; | 47 | struct sock sk; |
48 | struct unix_address *addr; | 48 | struct unix_address *addr; |
49 | struct dentry *dentry; | 49 | struct dentry *dentry; |
50 | struct vfsmount *mnt; | 50 | struct vfsmount *mnt; |
51 | struct mutex readlock; | 51 | struct mutex readlock; |
52 | struct sock *peer; | 52 | struct sock *peer; |
53 | struct sock *other; | 53 | struct sock *other; |
54 | struct list_head link; | 54 | struct list_head link; |
55 | atomic_long_t inflight; | 55 | atomic_long_t inflight; |
56 | spinlock_t lock; | 56 | spinlock_t lock; |
57 | unsigned int gc_candidate : 1; | 57 | unsigned int gc_candidate : 1; |
58 | unsigned int gc_maybe_cycle : 1; | 58 | unsigned int gc_maybe_cycle : 1; |
59 | wait_queue_head_t peer_wait; | 59 | struct socket_wq peer_wq; |
60 | }; | 60 | }; |
61 | #define unix_sk(__sk) ((struct unix_sock *)__sk) | 61 | #define unix_sk(__sk) ((struct unix_sock *)__sk) |
62 | 62 | ||
63 | #define peer_wait peer_wq.wait | ||
64 | |||
63 | #ifdef CONFIG_SYSCTL | 65 | #ifdef CONFIG_SYSCTL |
64 | extern int unix_sysctl_register(struct net *net); | 66 | extern int unix_sysctl_register(struct net *net); |
65 | extern void unix_sysctl_unregister(struct net *net); | 67 | extern void unix_sysctl_unregister(struct net *net); |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 8be5135ff7aa..2c55a7ea20af 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -107,6 +107,7 @@ typedef enum { | |||
107 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ | 107 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ |
108 | SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ | 108 | SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ |
109 | SCTP_CMD_SEND_MSG, /* Send the whole use message */ | 109 | SCTP_CMD_SEND_MSG, /* Send the whole use message */ |
110 | SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ | ||
110 | SCTP_CMD_LAST | 111 | SCTP_CMD_LAST |
111 | } sctp_verb_t; | 112 | } sctp_verb_t; |
112 | 113 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 289241d31cc1..65946bc43d00 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t); | |||
128 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 128 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
129 | int sctp_inet_listen(struct socket *sock, int backlog); | 129 | int sctp_inet_listen(struct socket *sock, int backlog); |
130 | void sctp_write_space(struct sock *sk); | 130 | void sctp_write_space(struct sock *sk); |
131 | void sctp_data_ready(struct sock *sk, int len); | ||
131 | unsigned int sctp_poll(struct file *file, struct socket *sock, | 132 | unsigned int sctp_poll(struct file *file, struct socket *sock, |
132 | poll_table *wait); | 133 | poll_table *wait); |
133 | void sctp_sock_rfree(struct sk_buff *skb); | 134 | void sctp_sock_rfree(struct sk_buff *skb); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9d44aef365da..43257b903c82 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -775,6 +775,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, | |||
775 | struct iovec *data); | 775 | struct iovec *data); |
776 | void sctp_chunk_free(struct sctp_chunk *); | 776 | void sctp_chunk_free(struct sctp_chunk *); |
777 | void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); | 777 | void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); |
778 | void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); | ||
778 | struct sctp_chunk *sctp_chunkify(struct sk_buff *, | 779 | struct sctp_chunk *sctp_chunkify(struct sk_buff *, |
779 | const struct sctp_association *, | 780 | const struct sctp_association *, |
780 | struct sock *); | 781 | struct sock *); |
diff --git a/include/net/sock.h b/include/net/sock.h index e1777db5b9ab..328e03f47dd1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -74,7 +74,7 @@ | |||
74 | printk(KERN_DEBUG msg); } while (0) | 74 | printk(KERN_DEBUG msg); } while (0) |
75 | #else | 75 | #else |
76 | /* Validate arguments and do nothing */ | 76 | /* Validate arguments and do nothing */ |
77 | static void inline int __attribute__ ((format (printf, 2, 3))) | 77 | static inline void __attribute__ ((format (printf, 2, 3))) |
78 | SOCK_DEBUG(struct sock *sk, const char *msg, ...) | 78 | SOCK_DEBUG(struct sock *sk, const char *msg, ...) |
79 | { | 79 | { |
80 | } | 80 | } |
@@ -159,7 +159,7 @@ struct sock_common { | |||
159 | * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings | 159 | * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings |
160 | * @sk_lock: synchronizer | 160 | * @sk_lock: synchronizer |
161 | * @sk_rcvbuf: size of receive buffer in bytes | 161 | * @sk_rcvbuf: size of receive buffer in bytes |
162 | * @sk_sleep: sock wait queue | 162 | * @sk_wq: sock wait queue and async head |
163 | * @sk_dst_cache: destination cache | 163 | * @sk_dst_cache: destination cache |
164 | * @sk_dst_lock: destination cache lock | 164 | * @sk_dst_lock: destination cache lock |
165 | * @sk_policy: flow policy | 165 | * @sk_policy: flow policy |
@@ -257,7 +257,7 @@ struct sock { | |||
257 | struct sk_buff *tail; | 257 | struct sk_buff *tail; |
258 | int len; | 258 | int len; |
259 | } sk_backlog; | 259 | } sk_backlog; |
260 | wait_queue_head_t *sk_sleep; | 260 | struct socket_wq *sk_wq; |
261 | struct dst_entry *sk_dst_cache; | 261 | struct dst_entry *sk_dst_cache; |
262 | #ifdef CONFIG_XFRM | 262 | #ifdef CONFIG_XFRM |
263 | struct xfrm_policy *sk_policy[2]; | 263 | struct xfrm_policy *sk_policy[2]; |
@@ -1219,7 +1219,7 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock) | |||
1219 | 1219 | ||
1220 | static inline wait_queue_head_t *sk_sleep(struct sock *sk) | 1220 | static inline wait_queue_head_t *sk_sleep(struct sock *sk) |
1221 | { | 1221 | { |
1222 | return sk->sk_sleep; | 1222 | return &sk->sk_wq->wait; |
1223 | } | 1223 | } |
1224 | /* Detach socket from process context. | 1224 | /* Detach socket from process context. |
1225 | * Announce socket dead, detach it from wait queue and inode. | 1225 | * Announce socket dead, detach it from wait queue and inode. |
@@ -1233,14 +1233,14 @@ static inline void sock_orphan(struct sock *sk) | |||
1233 | write_lock_bh(&sk->sk_callback_lock); | 1233 | write_lock_bh(&sk->sk_callback_lock); |
1234 | sock_set_flag(sk, SOCK_DEAD); | 1234 | sock_set_flag(sk, SOCK_DEAD); |
1235 | sk_set_socket(sk, NULL); | 1235 | sk_set_socket(sk, NULL); |
1236 | sk->sk_sleep = NULL; | 1236 | sk->sk_wq = NULL; |
1237 | write_unlock_bh(&sk->sk_callback_lock); | 1237 | write_unlock_bh(&sk->sk_callback_lock); |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static inline void sock_graft(struct sock *sk, struct socket *parent) | 1240 | static inline void sock_graft(struct sock *sk, struct socket *parent) |
1241 | { | 1241 | { |
1242 | write_lock_bh(&sk->sk_callback_lock); | 1242 | write_lock_bh(&sk->sk_callback_lock); |
1243 | sk->sk_sleep = &parent->wait; | 1243 | rcu_assign_pointer(sk->sk_wq, parent->wq); |
1244 | parent->sk = sk; | 1244 | parent->sk = sk; |
1245 | sk_set_socket(sk, parent); | 1245 | sk_set_socket(sk, parent); |
1246 | security_sock_graft(sk, parent); | 1246 | security_sock_graft(sk, parent); |
@@ -1392,12 +1392,12 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | /** | 1394 | /** |
1395 | * sk_has_sleeper - check if there are any waiting processes | 1395 | * wq_has_sleeper - check if there are any waiting processes |
1396 | * @sk: socket | 1396 | * @sk: struct socket_wq |
1397 | * | 1397 | * |
1398 | * Returns true if socket has waiting processes | 1398 | * Returns true if socket_wq has waiting processes |
1399 | * | 1399 | * |
1400 | * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory | 1400 | * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory |
1401 | * barrier call. They were added due to the race found within the tcp code. | 1401 | * barrier call. They were added due to the race found within the tcp code. |
1402 | * | 1402 | * |
1403 | * Consider following tcp code paths: | 1403 | * Consider following tcp code paths: |
@@ -1410,9 +1410,10 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1410 | * ... ... | 1410 | * ... ... |
1411 | * tp->rcv_nxt check sock_def_readable | 1411 | * tp->rcv_nxt check sock_def_readable |
1412 | * ... { | 1412 | * ... { |
1413 | * schedule ... | 1413 | * schedule rcu_read_lock(); |
1414 | * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) | 1414 | * wq = rcu_dereference(sk->sk_wq); |
1415 | * wake_up_interruptible(sk_sleep(sk)) | 1415 | * if (wq && waitqueue_active(&wq->wait)) |
1416 | * wake_up_interruptible(&wq->wait) | ||
1416 | * ... | 1417 | * ... |
1417 | * } | 1418 | * } |
1418 | * | 1419 | * |
@@ -1421,19 +1422,18 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1421 | * could then endup calling schedule and sleep forever if there are no more | 1422 | * could then endup calling schedule and sleep forever if there are no more |
1422 | * data on the socket. | 1423 | * data on the socket. |
1423 | * | 1424 | * |
1424 | * The sk_has_sleeper is always called right after a call to read_lock, so we | ||
1425 | * can use smp_mb__after_lock barrier. | ||
1426 | */ | 1425 | */ |
1427 | static inline int sk_has_sleeper(struct sock *sk) | 1426 | static inline bool wq_has_sleeper(struct socket_wq *wq) |
1428 | { | 1427 | { |
1428 | |||
1429 | /* | 1429 | /* |
1430 | * We need to be sure we are in sync with the | 1430 | * We need to be sure we are in sync with the |
1431 | * add_wait_queue modifications to the wait queue. | 1431 | * add_wait_queue modifications to the wait queue. |
1432 | * | 1432 | * |
1433 | * This memory barrier is paired in the sock_poll_wait. | 1433 | * This memory barrier is paired in the sock_poll_wait. |
1434 | */ | 1434 | */ |
1435 | smp_mb__after_lock(); | 1435 | smp_mb(); |
1436 | return sk_sleep(sk) && waitqueue_active(sk_sleep(sk)); | 1436 | return wq && waitqueue_active(&wq->wait); |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | /** | 1439 | /** |
@@ -1442,7 +1442,7 @@ static inline int sk_has_sleeper(struct sock *sk) | |||
1442 | * @wait_address: socket wait queue | 1442 | * @wait_address: socket wait queue |
1443 | * @p: poll_table | 1443 | * @p: poll_table |
1444 | * | 1444 | * |
1445 | * See the comments in the sk_has_sleeper function. | 1445 | * See the comments in the wq_has_sleeper function. |
1446 | */ | 1446 | */ |
1447 | static inline void sock_poll_wait(struct file *filp, | 1447 | static inline void sock_poll_wait(struct file *filp, |
1448 | wait_queue_head_t *wait_address, poll_table *p) | 1448 | wait_queue_head_t *wait_address, poll_table *p) |
@@ -1453,7 +1453,7 @@ static inline void sock_poll_wait(struct file *filp, | |||
1453 | * We need to be sure we are in sync with the | 1453 | * We need to be sure we are in sync with the |
1454 | * socket flags modification. | 1454 | * socket flags modification. |
1455 | * | 1455 | * |
1456 | * This memory barrier is paired in the sk_has_sleeper. | 1456 | * This memory barrier is paired in the wq_has_sleeper. |
1457 | */ | 1457 | */ |
1458 | smp_mb(); | 1458 | smp_mb(); |
1459 | } | 1459 | } |
diff --git a/net/atm/common.c b/net/atm/common.c index e3e10e6f8628..b43feb1a3995 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk) | |||
90 | 90 | ||
91 | static void vcc_def_wakeup(struct sock *sk) | 91 | static void vcc_def_wakeup(struct sock *sk) |
92 | { | 92 | { |
93 | read_lock(&sk->sk_callback_lock); | 93 | struct socket_wq *wq; |
94 | if (sk_has_sleeper(sk)) | 94 | |
95 | wake_up(sk_sleep(sk)); | 95 | rcu_read_lock(); |
96 | read_unlock(&sk->sk_callback_lock); | 96 | wq = rcu_dereference(sk->sk_wq); |
97 | if (wq_has_sleeper(wq)) | ||
98 | wake_up(&wq->wait); | ||
99 | rcu_read_unlock(); | ||
97 | } | 100 | } |
98 | 101 | ||
99 | static inline int vcc_writable(struct sock *sk) | 102 | static inline int vcc_writable(struct sock *sk) |
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk) | |||
106 | 109 | ||
107 | static void vcc_write_space(struct sock *sk) | 110 | static void vcc_write_space(struct sock *sk) |
108 | { | 111 | { |
109 | read_lock(&sk->sk_callback_lock); | 112 | struct socket_wq *wq; |
113 | |||
114 | rcu_read_lock(); | ||
110 | 115 | ||
111 | if (vcc_writable(sk)) { | 116 | if (vcc_writable(sk)) { |
112 | if (sk_has_sleeper(sk)) | 117 | wq = rcu_dereference(sk->sk_wq); |
113 | wake_up_interruptible(sk_sleep(sk)); | 118 | if (wq_has_sleeper(wq)) |
119 | wake_up_interruptible(&wq->wait); | ||
114 | 120 | ||
115 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 121 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
116 | } | 122 | } |
117 | 123 | ||
118 | read_unlock(&sk->sk_callback_lock); | 124 | rcu_read_unlock(); |
119 | } | 125 | } |
120 | 126 | ||
121 | static struct proto vcc_proto = { | 127 | static struct proto vcc_proto = { |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index c1e60eed5a97..864c76f4a678 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
1626 | /* Connectionless channel */ | 1626 | /* Connectionless channel */ |
1627 | if (sk->sk_type == SOCK_DGRAM) { | 1627 | if (sk->sk_type == SOCK_DGRAM) { |
1628 | skb = l2cap_create_connless_pdu(sk, msg, len); | 1628 | skb = l2cap_create_connless_pdu(sk, msg, len); |
1629 | err = l2cap_do_send(sk, skb); | 1629 | if (IS_ERR(skb)) |
1630 | err = PTR_ERR(skb); | ||
1631 | else | ||
1632 | err = l2cap_do_send(sk, skb); | ||
1630 | goto done; | 1633 | goto done; |
1631 | } | 1634 | } |
1632 | 1635 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 100dcbd29739..36d53be4fca6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2205,8 +2205,6 @@ int netdev_max_backlog __read_mostly = 1000; | |||
2205 | int netdev_budget __read_mostly = 300; | 2205 | int netdev_budget __read_mostly = 300; |
2206 | int weight_p __read_mostly = 64; /* old backlog weight */ | 2206 | int weight_p __read_mostly = 64; /* old backlog weight */ |
2207 | 2207 | ||
2208 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | ||
2209 | |||
2210 | #ifdef CONFIG_RPS | 2208 | #ifdef CONFIG_RPS |
2211 | 2209 | ||
2212 | /* One global table that all flow-based protocols share. */ | 2210 | /* One global table that all flow-based protocols share. */ |
@@ -2366,7 +2364,7 @@ static void rps_trigger_softirq(void *data) | |||
2366 | struct softnet_data *sd = data; | 2364 | struct softnet_data *sd = data; |
2367 | 2365 | ||
2368 | __napi_schedule(&sd->backlog); | 2366 | __napi_schedule(&sd->backlog); |
2369 | __get_cpu_var(netdev_rx_stat).received_rps++; | 2367 | sd->received_rps++; |
2370 | } | 2368 | } |
2371 | 2369 | ||
2372 | #endif /* CONFIG_RPS */ | 2370 | #endif /* CONFIG_RPS */ |
@@ -2405,7 +2403,6 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | |||
2405 | sd = &per_cpu(softnet_data, cpu); | 2403 | sd = &per_cpu(softnet_data, cpu); |
2406 | 2404 | ||
2407 | local_irq_save(flags); | 2405 | local_irq_save(flags); |
2408 | __get_cpu_var(netdev_rx_stat).total++; | ||
2409 | 2406 | ||
2410 | rps_lock(sd); | 2407 | rps_lock(sd); |
2411 | if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { | 2408 | if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { |
@@ -2429,9 +2426,9 @@ enqueue: | |||
2429 | goto enqueue; | 2426 | goto enqueue; |
2430 | } | 2427 | } |
2431 | 2428 | ||
2429 | sd->dropped++; | ||
2432 | rps_unlock(sd); | 2430 | rps_unlock(sd); |
2433 | 2431 | ||
2434 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2435 | local_irq_restore(flags); | 2432 | local_irq_restore(flags); |
2436 | 2433 | ||
2437 | kfree_skb(skb); | 2434 | kfree_skb(skb); |
@@ -2806,7 +2803,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2806 | skb->dev = master; | 2803 | skb->dev = master; |
2807 | } | 2804 | } |
2808 | 2805 | ||
2809 | __get_cpu_var(netdev_rx_stat).total++; | 2806 | __get_cpu_var(softnet_data).processed++; |
2810 | 2807 | ||
2811 | skb_reset_network_header(skb); | 2808 | skb_reset_network_header(skb); |
2812 | skb_reset_transport_header(skb); | 2809 | skb_reset_transport_header(skb); |
@@ -3490,7 +3487,7 @@ out: | |||
3490 | return; | 3487 | return; |
3491 | 3488 | ||
3492 | softnet_break: | 3489 | softnet_break: |
3493 | __get_cpu_var(netdev_rx_stat).time_squeeze++; | 3490 | sd->time_squeeze++; |
3494 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 3491 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
3495 | goto out; | 3492 | goto out; |
3496 | } | 3493 | } |
@@ -3691,17 +3688,17 @@ static int dev_seq_show(struct seq_file *seq, void *v) | |||
3691 | return 0; | 3688 | return 0; |
3692 | } | 3689 | } |
3693 | 3690 | ||
3694 | static struct netif_rx_stats *softnet_get_online(loff_t *pos) | 3691 | static struct softnet_data *softnet_get_online(loff_t *pos) |
3695 | { | 3692 | { |
3696 | struct netif_rx_stats *rc = NULL; | 3693 | struct softnet_data *sd = NULL; |
3697 | 3694 | ||
3698 | while (*pos < nr_cpu_ids) | 3695 | while (*pos < nr_cpu_ids) |
3699 | if (cpu_online(*pos)) { | 3696 | if (cpu_online(*pos)) { |
3700 | rc = &per_cpu(netdev_rx_stat, *pos); | 3697 | sd = &per_cpu(softnet_data, *pos); |
3701 | break; | 3698 | break; |
3702 | } else | 3699 | } else |
3703 | ++*pos; | 3700 | ++*pos; |
3704 | return rc; | 3701 | return sd; |
3705 | } | 3702 | } |
3706 | 3703 | ||
3707 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) | 3704 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -3721,12 +3718,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v) | |||
3721 | 3718 | ||
3722 | static int softnet_seq_show(struct seq_file *seq, void *v) | 3719 | static int softnet_seq_show(struct seq_file *seq, void *v) |
3723 | { | 3720 | { |
3724 | struct netif_rx_stats *s = v; | 3721 | struct softnet_data *sd = v; |
3725 | 3722 | ||
3726 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3723 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3727 | s->total, s->dropped, s->time_squeeze, 0, | 3724 | sd->processed, sd->dropped, sd->time_squeeze, 0, |
3728 | 0, 0, 0, 0, /* was fastroute */ | 3725 | 0, 0, 0, 0, /* was fastroute */ |
3729 | s->cpu_collision, s->received_rps); | 3726 | sd->cpu_collision, sd->received_rps); |
3730 | return 0; | 3727 | return 0; |
3731 | } | 3728 | } |
3732 | 3729 | ||
@@ -5869,6 +5866,7 @@ static int __init net_dev_init(void) | |||
5869 | for_each_possible_cpu(i) { | 5866 | for_each_possible_cpu(i) { |
5870 | struct softnet_data *sd = &per_cpu(softnet_data, i); | 5867 | struct softnet_data *sd = &per_cpu(softnet_data, i); |
5871 | 5868 | ||
5869 | memset(sd, 0, sizeof(*sd)); | ||
5872 | skb_queue_head_init(&sd->input_pkt_queue); | 5870 | skb_queue_head_init(&sd->input_pkt_queue); |
5873 | skb_queue_head_init(&sd->process_queue); | 5871 | skb_queue_head_init(&sd->process_queue); |
5874 | sd->completion_queue = NULL; | 5872 | sd->completion_queue = NULL; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4218ff49bf13..8b9c109166a7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1051,7 +1051,7 @@ EXPORT_SYMBOL(skb_push); | |||
1051 | */ | 1051 | */ |
1052 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) | 1052 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
1053 | { | 1053 | { |
1054 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); | 1054 | return skb_pull_inline(skb, len); |
1055 | } | 1055 | } |
1056 | EXPORT_SYMBOL(skb_pull); | 1056 | EXPORT_SYMBOL(skb_pull); |
1057 | 1057 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 51041759517e..94c4affdda9b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1211,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1211 | */ | 1211 | */ |
1212 | sk_refcnt_debug_inc(newsk); | 1212 | sk_refcnt_debug_inc(newsk); |
1213 | sk_set_socket(newsk, NULL); | 1213 | sk_set_socket(newsk, NULL); |
1214 | newsk->sk_sleep = NULL; | 1214 | newsk->sk_wq = NULL; |
1215 | 1215 | ||
1216 | if (newsk->sk_prot->sockets_allocated) | 1216 | if (newsk->sk_prot->sockets_allocated) |
1217 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); | 1217 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); |
@@ -1800,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage); | |||
1800 | 1800 | ||
1801 | static void sock_def_wakeup(struct sock *sk) | 1801 | static void sock_def_wakeup(struct sock *sk) |
1802 | { | 1802 | { |
1803 | read_lock(&sk->sk_callback_lock); | 1803 | struct socket_wq *wq; |
1804 | if (sk_has_sleeper(sk)) | 1804 | |
1805 | wake_up_interruptible_all(sk_sleep(sk)); | 1805 | rcu_read_lock(); |
1806 | read_unlock(&sk->sk_callback_lock); | 1806 | wq = rcu_dereference(sk->sk_wq); |
1807 | if (wq_has_sleeper(wq)) | ||
1808 | wake_up_interruptible_all(&wq->wait); | ||
1809 | rcu_read_unlock(); | ||
1807 | } | 1810 | } |
1808 | 1811 | ||
1809 | static void sock_def_error_report(struct sock *sk) | 1812 | static void sock_def_error_report(struct sock *sk) |
1810 | { | 1813 | { |
1811 | read_lock(&sk->sk_callback_lock); | 1814 | struct socket_wq *wq; |
1812 | if (sk_has_sleeper(sk)) | 1815 | |
1813 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR); | 1816 | rcu_read_lock(); |
1817 | wq = rcu_dereference(sk->sk_wq); | ||
1818 | if (wq_has_sleeper(wq)) | ||
1819 | wake_up_interruptible_poll(&wq->wait, POLLERR); | ||
1814 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1820 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1815 | read_unlock(&sk->sk_callback_lock); | 1821 | rcu_read_unlock(); |
1816 | } | 1822 | } |
1817 | 1823 | ||
1818 | static void sock_def_readable(struct sock *sk, int len) | 1824 | static void sock_def_readable(struct sock *sk, int len) |
1819 | { | 1825 | { |
1820 | read_lock(&sk->sk_callback_lock); | 1826 | struct socket_wq *wq; |
1821 | if (sk_has_sleeper(sk)) | 1827 | |
1822 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | | 1828 | rcu_read_lock(); |
1829 | wq = rcu_dereference(sk->sk_wq); | ||
1830 | if (wq_has_sleeper(wq)) | ||
1831 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
1823 | POLLRDNORM | POLLRDBAND); | 1832 | POLLRDNORM | POLLRDBAND); |
1824 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1833 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1825 | read_unlock(&sk->sk_callback_lock); | 1834 | rcu_read_unlock(); |
1826 | } | 1835 | } |
1827 | 1836 | ||
1828 | static void sock_def_write_space(struct sock *sk) | 1837 | static void sock_def_write_space(struct sock *sk) |
1829 | { | 1838 | { |
1830 | read_lock(&sk->sk_callback_lock); | 1839 | struct socket_wq *wq; |
1840 | |||
1841 | rcu_read_lock(); | ||
1831 | 1842 | ||
1832 | /* Do not wake up a writer until he can make "significant" | 1843 | /* Do not wake up a writer until he can make "significant" |
1833 | * progress. --DaveM | 1844 | * progress. --DaveM |
1834 | */ | 1845 | */ |
1835 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1846 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1836 | if (sk_has_sleeper(sk)) | 1847 | wq = rcu_dereference(sk->sk_wq); |
1837 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT | | 1848 | if (wq_has_sleeper(wq)) |
1849 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | ||
1838 | POLLWRNORM | POLLWRBAND); | 1850 | POLLWRNORM | POLLWRBAND); |
1839 | 1851 | ||
1840 | /* Should agree with poll, otherwise some programs break */ | 1852 | /* Should agree with poll, otherwise some programs break */ |
@@ -1842,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1842 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 1854 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
1843 | } | 1855 | } |
1844 | 1856 | ||
1845 | read_unlock(&sk->sk_callback_lock); | 1857 | rcu_read_unlock(); |
1846 | } | 1858 | } |
1847 | 1859 | ||
1848 | static void sock_def_destruct(struct sock *sk) | 1860 | static void sock_def_destruct(struct sock *sk) |
@@ -1896,10 +1908,10 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1896 | 1908 | ||
1897 | if (sock) { | 1909 | if (sock) { |
1898 | sk->sk_type = sock->type; | 1910 | sk->sk_type = sock->type; |
1899 | sk->sk_sleep = &sock->wait; | 1911 | sk->sk_wq = sock->wq; |
1900 | sock->sk = sk; | 1912 | sock->sk = sk; |
1901 | } else | 1913 | } else |
1902 | sk->sk_sleep = NULL; | 1914 | sk->sk_wq = NULL; |
1903 | 1915 | ||
1904 | spin_lock_init(&sk->sk_dst_lock); | 1916 | spin_lock_init(&sk->sk_dst_lock); |
1905 | rwlock_init(&sk->sk_callback_lock); | 1917 | rwlock_init(&sk->sk_callback_lock); |
diff --git a/net/core/stream.c b/net/core/stream.c index 7b3c3f30b107..cc196f42b8d8 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -28,15 +28,19 @@ | |||
28 | void sk_stream_write_space(struct sock *sk) | 28 | void sk_stream_write_space(struct sock *sk) |
29 | { | 29 | { |
30 | struct socket *sock = sk->sk_socket; | 30 | struct socket *sock = sk->sk_socket; |
31 | struct socket_wq *wq; | ||
31 | 32 | ||
32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { | 33 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { |
33 | clear_bit(SOCK_NOSPACE, &sock->flags); | 34 | clear_bit(SOCK_NOSPACE, &sock->flags); |
34 | 35 | ||
35 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) | 36 | rcu_read_lock(); |
36 | wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | | 37 | wq = rcu_dereference(sk->sk_wq); |
38 | if (wq_has_sleeper(wq)) | ||
39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | | ||
37 | POLLWRNORM | POLLWRBAND); | 40 | POLLWRNORM | POLLWRBAND); |
38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 42 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); |
43 | rcu_read_unlock(); | ||
40 | } | 44 | } |
41 | } | 45 | } |
42 | 46 | ||
diff --git a/net/dccp/output.c b/net/dccp/output.c index 2d3dcb39851f..aadbdb58758b 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss); | |||
195 | 195 | ||
196 | void dccp_write_space(struct sock *sk) | 196 | void dccp_write_space(struct sock *sk) |
197 | { | 197 | { |
198 | read_lock(&sk->sk_callback_lock); | 198 | struct socket_wq *wq; |
199 | 199 | ||
200 | if (sk_has_sleeper(sk)) | 200 | rcu_read_lock(); |
201 | wake_up_interruptible(sk_sleep(sk)); | 201 | wq = rcu_dereference(sk->sk_wq); |
202 | if (wq_has_sleeper(wq)) | ||
203 | wake_up_interruptible(&wq->wait); | ||
202 | /* Should agree with poll, otherwise some programs break */ | 204 | /* Should agree with poll, otherwise some programs break */ |
203 | if (sock_writeable(sk)) | 205 | if (sock_writeable(sk)) |
204 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 206 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
205 | 207 | ||
206 | read_unlock(&sk->sk_callback_lock); | 208 | rcu_read_unlock(); |
207 | } | 209 | } |
208 | 210 | ||
209 | /** | 211 | /** |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 0c0d272a9888..61ec0329316c 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
162 | 162 | ||
163 | skb->dev = dev; | 163 | skb->dev = dev; |
164 | skb_reset_mac_header(skb); | 164 | skb_reset_mac_header(skb); |
165 | skb_pull(skb, ETH_HLEN); | 165 | skb_pull_inline(skb, ETH_HLEN); |
166 | eth = eth_hdr(skb); | 166 | eth = eth_hdr(skb); |
167 | 167 | ||
168 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | 168 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 78cbc39f56c4..e0a3e3537b14 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -70,17 +70,13 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
70 | (!sk->sk_bound_dev_if || | 70 | (!sk->sk_bound_dev_if || |
71 | !sk2->sk_bound_dev_if || | 71 | !sk2->sk_bound_dev_if || |
72 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 72 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
73 | const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | ||
74 | |||
75 | if (!reuse || !sk2->sk_reuse || | 73 | if (!reuse || !sk2->sk_reuse || |
76 | sk2->sk_state == TCP_LISTEN) { | 74 | sk2->sk_state == TCP_LISTEN) { |
75 | const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | ||
77 | if (!sk2_rcv_saddr || !sk_rcv_saddr || | 76 | if (!sk2_rcv_saddr || !sk_rcv_saddr || |
78 | sk2_rcv_saddr == sk_rcv_saddr) | 77 | sk2_rcv_saddr == sk_rcv_saddr) |
79 | break; | 78 | break; |
80 | } else if (reuse && sk2->sk_reuse && | 79 | } |
81 | sk2_rcv_saddr && | ||
82 | sk2_rcv_saddr == sk_rcv_saddr) | ||
83 | break; | ||
84 | } | 80 | } |
85 | } | 81 | } |
86 | return node != NULL; | 82 | return node != NULL; |
@@ -124,11 +120,9 @@ again: | |||
124 | smallest_size = tb->num_owners; | 120 | smallest_size = tb->num_owners; |
125 | smallest_rover = rover; | 121 | smallest_rover = rover; |
126 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 122 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { |
127 | if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | 123 | spin_unlock(&head->lock); |
128 | spin_unlock(&head->lock); | 124 | snum = smallest_rover; |
129 | snum = smallest_rover; | 125 | goto have_snum; |
130 | goto have_snum; | ||
131 | } | ||
132 | } | 126 | } |
133 | } | 127 | } |
134 | goto next; | 128 | goto next; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 34d2d649e396..3984f52181f4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1346,7 +1346,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add | |||
1346 | struct hlist_node *node; | 1346 | struct hlist_node *node; |
1347 | 1347 | ||
1348 | rcu_read_lock_bh(); | 1348 | rcu_read_lock_bh(); |
1349 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1349 | hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1350 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1350 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1351 | continue; | 1351 | continue; |
1352 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1352 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
@@ -2959,7 +2959,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) | |||
2959 | 2959 | ||
2960 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 2960 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
2961 | struct hlist_node *n; | 2961 | struct hlist_node *n; |
2962 | hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket], | 2962 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], |
2963 | addr_lst) | 2963 | addr_lst) |
2964 | if (net_eq(dev_net(ifa->idev->dev), net)) | 2964 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2965 | return ifa; | 2965 | return ifa; |
@@ -2974,12 +2974,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
2974 | struct net *net = seq_file_net(seq); | 2974 | struct net *net = seq_file_net(seq); |
2975 | struct hlist_node *n = &ifa->addr_lst; | 2975 | struct hlist_node *n = &ifa->addr_lst; |
2976 | 2976 | ||
2977 | hlist_for_each_entry_continue_rcu(ifa, n, addr_lst) | 2977 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) |
2978 | if (net_eq(dev_net(ifa->idev->dev), net)) | 2978 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2979 | return ifa; | 2979 | return ifa; |
2980 | 2980 | ||
2981 | while (++state->bucket < IN6_ADDR_HSIZE) { | 2981 | while (++state->bucket < IN6_ADDR_HSIZE) { |
2982 | hlist_for_each_entry(ifa, n, | 2982 | hlist_for_each_entry_rcu_bh(ifa, n, |
2983 | &inet6_addr_lst[state->bucket], addr_lst) { | 2983 | &inet6_addr_lst[state->bucket], addr_lst) { |
2984 | if (net_eq(dev_net(ifa->idev->dev), net)) | 2984 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2985 | return ifa; | 2985 | return ifa; |
@@ -3000,7 +3000,7 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | |||
3000 | } | 3000 | } |
3001 | 3001 | ||
3002 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) | 3002 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) |
3003 | __acquires(rcu) | 3003 | __acquires(rcu_bh) |
3004 | { | 3004 | { |
3005 | rcu_read_lock_bh(); | 3005 | rcu_read_lock_bh(); |
3006 | return if6_get_idx(seq, *pos); | 3006 | return if6_get_idx(seq, *pos); |
@@ -3016,7 +3016,7 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
3016 | } | 3016 | } |
3017 | 3017 | ||
3018 | static void if6_seq_stop(struct seq_file *seq, void *v) | 3018 | static void if6_seq_stop(struct seq_file *seq, void *v) |
3019 | __releases(rcu) | 3019 | __releases(rcu_bh) |
3020 | { | 3020 | { |
3021 | rcu_read_unlock_bh(); | 3021 | rcu_read_unlock_bh(); |
3022 | } | 3022 | } |
@@ -3093,7 +3093,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3093 | unsigned int hash = ipv6_addr_hash(addr); | 3093 | unsigned int hash = ipv6_addr_hash(addr); |
3094 | 3094 | ||
3095 | rcu_read_lock_bh(); | 3095 | rcu_read_lock_bh(); |
3096 | hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) { | 3096 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { |
3097 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3097 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
3098 | continue; | 3098 | continue; |
3099 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3099 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -3127,7 +3127,7 @@ static void addrconf_verify(unsigned long foo) | |||
3127 | 3127 | ||
3128 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3128 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3129 | restart: | 3129 | restart: |
3130 | hlist_for_each_entry_rcu(ifp, node, | 3130 | hlist_for_each_entry_rcu_bh(ifp, node, |
3131 | &inet6_addr_lst[i], addr_lst) { | 3131 | &inet6_addr_lst[i], addr_lst) { |
3132 | unsigned long age; | 3132 | unsigned long age; |
3133 | 3133 | ||
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 9ca1efc923a1..0c5e3c3b7fd5 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -42,16 +42,11 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
42 | if (sk != sk2 && | 42 | if (sk != sk2 && |
43 | (!sk->sk_bound_dev_if || | 43 | (!sk->sk_bound_dev_if || |
44 | !sk2->sk_bound_dev_if || | 44 | !sk2->sk_bound_dev_if || |
45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && |
46 | if ((!sk->sk_reuse || !sk2->sk_reuse || | 46 | (!sk->sk_reuse || !sk2->sk_reuse || |
47 | sk2->sk_state == TCP_LISTEN) && | 47 | sk2->sk_state == TCP_LISTEN) && |
48 | ipv6_rcv_saddr_equal(sk, sk2)) | 48 | ipv6_rcv_saddr_equal(sk, sk2)) |
49 | break; | 49 | break; |
50 | else if (sk->sk_reuse && sk2->sk_reuse && | ||
51 | !ipv6_addr_any(inet6_rcv_saddr(sk)) && | ||
52 | ipv6_rcv_saddr_equal(sk, sk2)) | ||
53 | break; | ||
54 | } | ||
55 | } | 50 | } |
56 | 51 | ||
57 | return node != NULL; | 52 | return node != NULL; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 9636b7d27b48..8be324fe08b9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk) | |||
305 | */ | 305 | */ |
306 | static void iucv_sock_wake_msglim(struct sock *sk) | 306 | static void iucv_sock_wake_msglim(struct sock *sk) |
307 | { | 307 | { |
308 | read_lock(&sk->sk_callback_lock); | 308 | struct socket_wq *wq; |
309 | if (sk_has_sleeper(sk)) | 309 | |
310 | wake_up_interruptible_all(sk_sleep(sk)); | 310 | rcu_read_lock(); |
311 | wq = rcu_dereference(sk->sk_wq); | ||
312 | if (wq_has_sleeper(wq)) | ||
313 | wake_up_interruptible_all(&wq->wait); | ||
311 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 314 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
312 | read_unlock(&sk->sk_callback_lock); | 315 | rcu_read_unlock(); |
313 | } | 316 | } |
314 | 317 | ||
315 | /* Timers */ | 318 | /* Timers */ |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index e2a95762abd3..af4d38bc3b22 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock) | |||
664 | if (signal_pending(tsk)) | 664 | if (signal_pending(tsk)) |
665 | return sock_intr_errno(timeo); | 665 | return sock_intr_errno(timeo); |
666 | 666 | ||
667 | prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait, | 667 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
668 | TASK_INTERRUPTIBLE); | 668 | TASK_INTERRUPTIBLE); |
669 | release_sock(sk); | 669 | release_sock(sk); |
670 | timeo = schedule_timeout(timeo); | 670 | timeo = schedule_timeout(timeo); |
671 | lock_sock(sk); | 671 | lock_sock(sk); |
672 | finish_wait(&sk->sk_socket->wait, &wait); | 672 | finish_wait(sk_sleep(sk), &wait); |
673 | } | 673 | } |
674 | 674 | ||
675 | return 0; | 675 | return 0; |
@@ -910,10 +910,10 @@ disabled: | |||
910 | goto out; | 910 | goto out; |
911 | } | 911 | } |
912 | 912 | ||
913 | prepare_to_wait(&sk->sk_socket->wait, &wait, | 913 | prepare_to_wait(sk_sleep(sk), &wait, |
914 | TASK_INTERRUPTIBLE); | 914 | TASK_INTERRUPTIBLE); |
915 | done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); | 915 | done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); |
916 | finish_wait(&sk->sk_socket->wait, &wait); | 916 | finish_wait(sk_sleep(sk), &wait); |
917 | 917 | ||
918 | if (sk->sk_state != TCP_ESTABLISHED) | 918 | if (sk->sk_state != TCP_ESTABLISHED) |
919 | goto disabled; | 919 | goto disabled; |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index c785bfd0744f..6e9848bf0370 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock, | |||
265 | struct pep_sock *pn = pep_sk(sk); | 265 | struct pep_sock *pn = pep_sk(sk); |
266 | unsigned int mask = 0; | 266 | unsigned int mask = 0; |
267 | 267 | ||
268 | poll_wait(file, &sock->wait, wait); | 268 | poll_wait(file, sk_sleep(sk), wait); |
269 | 269 | ||
270 | switch (sk->sk_state) { | 270 | switch (sk->sk_state) { |
271 | case TCP_LISTEN: | 271 | case TCP_LISTEN: |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index c432d76f415e..0b9bb2085ce4 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk) | |||
62 | static void rxrpc_write_space(struct sock *sk) | 62 | static void rxrpc_write_space(struct sock *sk) |
63 | { | 63 | { |
64 | _enter("%p", sk); | 64 | _enter("%p", sk); |
65 | read_lock(&sk->sk_callback_lock); | 65 | rcu_read_lock(); |
66 | if (rxrpc_writable(sk)) { | 66 | if (rxrpc_writable(sk)) { |
67 | if (sk_has_sleeper(sk)) | 67 | struct socket_wq *wq = rcu_dereference(sk->sk_wq); |
68 | wake_up_interruptible(sk_sleep(sk)); | 68 | |
69 | if (wq_has_sleeper(wq)) | ||
70 | wake_up_interruptible(&wq->wait); | ||
69 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 71 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
70 | } | 72 | } |
71 | read_unlock(&sk->sk_callback_lock); | 73 | rcu_read_unlock(); |
72 | } | 74 | } |
73 | 75 | ||
74 | /* | 76 | /* |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index aeddabfb8e4e..a969b111bd76 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
94 | * Another cpu is holding lock, requeue & delay xmits for | 94 | * Another cpu is holding lock, requeue & delay xmits for |
95 | * some time. | 95 | * some time. |
96 | */ | 96 | */ |
97 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 97 | __get_cpu_var(softnet_data).cpu_collision++; |
98 | ret = dev_requeue_skb(skb, q); | 98 | ret = dev_requeue_skb(skb, q); |
99 | } | 99 | } |
100 | 100 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 65f9a7cdf466..3912420cedcc 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1192,8 +1192,10 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1192 | /* Remove any peer addresses not present in the new association. */ | 1192 | /* Remove any peer addresses not present in the new association. */ |
1193 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 1193 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
1194 | trans = list_entry(pos, struct sctp_transport, transports); | 1194 | trans = list_entry(pos, struct sctp_transport, transports); |
1195 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | 1195 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { |
1196 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | 1196 | sctp_assoc_rm_peer(asoc, trans); |
1197 | continue; | ||
1198 | } | ||
1197 | 1199 | ||
1198 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | 1200 | if (asoc->state >= SCTP_STATE_ESTABLISHED) |
1199 | sctp_transport_reset(trans); | 1201 | sctp_transport_reset(trans); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2f8763bae9ed..e10acc01c75f 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -142,6 +142,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
142 | /* Use SCTP specific send buffer space queues. */ | 142 | /* Use SCTP specific send buffer space queues. */ |
143 | ep->sndbuf_policy = sctp_sndbuf_policy; | 143 | ep->sndbuf_policy = sctp_sndbuf_policy; |
144 | 144 | ||
145 | sk->sk_data_ready = sctp_data_ready; | ||
145 | sk->sk_write_space = sctp_write_space; | 146 | sk->sk_write_space = sctp_write_space; |
146 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 147 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
147 | 148 | ||
diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 8f025d5831aa..db3a42b8b349 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/socket.h> | 27 | #include <linux/socket.h> |
28 | #include <linux/sctp.h> | 28 | #include <linux/sctp.h> |
29 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
30 | #include <linux/vmalloc.h> | ||
30 | #include <linux/module.h> | 31 | #include <linux/module.h> |
31 | #include <linux/kfifo.h> | 32 | #include <linux/kfifo.h> |
32 | #include <linux/time.h> | 33 | #include <linux/time.h> |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 24effdf471eb..d8261f3d7715 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
108 | cpu_to_be16(sizeof(struct sctp_paramhdr)), | 108 | cpu_to_be16(sizeof(struct sctp_paramhdr)), |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /* A helper to initialize to initialize an op error inside a | 111 | /* A helper to initialize an op error inside a |
112 | * provided chunk, as most cause codes will be embedded inside an | 112 | * provided chunk, as most cause codes will be embedded inside an |
113 | * abort chunk. | 113 | * abort chunk. |
114 | */ | 114 | */ |
@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
125 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 125 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* A helper to initialize an op error inside a | ||
129 | * provided chunk, as most cause codes will be embedded inside an | ||
130 | * abort chunk. Differs from sctp_init_cause in that it won't oops | ||
131 | * if there isn't enough space in the op error chunk | ||
132 | */ | ||
133 | int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, | ||
134 | size_t paylen) | ||
135 | { | ||
136 | sctp_errhdr_t err; | ||
137 | __u16 len; | ||
138 | |||
139 | /* Cause code constants are now defined in network order. */ | ||
140 | err.cause = cause_code; | ||
141 | len = sizeof(sctp_errhdr_t) + paylen; | ||
142 | err.length = htons(len); | ||
143 | |||
144 | if (skb_tailroom(chunk->skb) > len) | ||
145 | return -ENOSPC; | ||
146 | chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, | ||
147 | sizeof(sctp_errhdr_t), | ||
148 | &err); | ||
149 | return 0; | ||
150 | } | ||
128 | /* 3.3.2 Initiation (INIT) (1) | 151 | /* 3.3.2 Initiation (INIT) (1) |
129 | * | 152 | * |
130 | * This chunk is used to initiate a SCTP association between two | 153 | * This chunk is used to initiate a SCTP association between two |
@@ -208,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
208 | sp = sctp_sk(asoc->base.sk); | 231 | sp = sctp_sk(asoc->base.sk); |
209 | num_types = sp->pf->supported_addrs(sp, types); | 232 | num_types = sp->pf->supported_addrs(sp, types); |
210 | 233 | ||
211 | chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); | 234 | chunksize = sizeof(init) + addrs_len; |
235 | chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); | ||
212 | chunksize += sizeof(ecap_param); | 236 | chunksize += sizeof(ecap_param); |
213 | 237 | ||
214 | if (sctp_prsctp_enable) | 238 | if (sctp_prsctp_enable) |
@@ -238,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
238 | /* Add HMACS parameter length if any were defined */ | 262 | /* Add HMACS parameter length if any were defined */ |
239 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 263 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
240 | if (auth_hmacs->length) | 264 | if (auth_hmacs->length) |
241 | chunksize += ntohs(auth_hmacs->length); | 265 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
242 | else | 266 | else |
243 | auth_hmacs = NULL; | 267 | auth_hmacs = NULL; |
244 | 268 | ||
245 | /* Add CHUNKS parameter length */ | 269 | /* Add CHUNKS parameter length */ |
246 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 270 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
247 | if (auth_chunks->length) | 271 | if (auth_chunks->length) |
248 | chunksize += ntohs(auth_chunks->length); | 272 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
249 | else | 273 | else |
250 | auth_chunks = NULL; | 274 | auth_chunks = NULL; |
251 | 275 | ||
@@ -255,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
255 | 279 | ||
256 | /* If we have any extensions to report, account for that */ | 280 | /* If we have any extensions to report, account for that */ |
257 | if (num_ext) | 281 | if (num_ext) |
258 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 282 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
283 | num_ext); | ||
259 | 284 | ||
260 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | 285 | /* RFC 2960 3.3.2 Initiation (INIT) (1) |
261 | * | 286 | * |
@@ -397,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
397 | 422 | ||
398 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 423 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
399 | if (auth_hmacs->length) | 424 | if (auth_hmacs->length) |
400 | chunksize += ntohs(auth_hmacs->length); | 425 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
401 | else | 426 | else |
402 | auth_hmacs = NULL; | 427 | auth_hmacs = NULL; |
403 | 428 | ||
404 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 429 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
405 | if (auth_chunks->length) | 430 | if (auth_chunks->length) |
406 | chunksize += ntohs(auth_chunks->length); | 431 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
407 | else | 432 | else |
408 | auth_chunks = NULL; | 433 | auth_chunks = NULL; |
409 | 434 | ||
@@ -412,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
412 | } | 437 | } |
413 | 438 | ||
414 | if (num_ext) | 439 | if (num_ext) |
415 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 440 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
441 | num_ext); | ||
416 | 442 | ||
417 | /* Now allocate and fill out the chunk. */ | 443 | /* Now allocate and fill out the chunk. */ |
418 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); | 444 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); |
@@ -1124,6 +1150,24 @@ nodata: | |||
1124 | return retval; | 1150 | return retval; |
1125 | } | 1151 | } |
1126 | 1152 | ||
1153 | /* Create an Operation Error chunk of a fixed size, | ||
1154 | * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) | ||
1155 | * This is a helper function to allocate an error chunk for | ||
1156 | * for those invalid parameter codes in which we may not want | ||
1157 | * to report all the errors, if the incomming chunk is large | ||
1158 | */ | ||
1159 | static inline struct sctp_chunk *sctp_make_op_error_fixed( | ||
1160 | const struct sctp_association *asoc, | ||
1161 | const struct sctp_chunk *chunk) | ||
1162 | { | ||
1163 | size_t size = asoc ? asoc->pathmtu : 0; | ||
1164 | |||
1165 | if (!size) | ||
1166 | size = SCTP_DEFAULT_MAXSEGMENT; | ||
1167 | |||
1168 | return sctp_make_op_error_space(asoc, chunk, size); | ||
1169 | } | ||
1170 | |||
1127 | /* Create an Operation Error chunk. */ | 1171 | /* Create an Operation Error chunk. */ |
1128 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | 1172 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, |
1129 | const struct sctp_chunk *chunk, | 1173 | const struct sctp_chunk *chunk, |
@@ -1365,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1365 | return target; | 1409 | return target; |
1366 | } | 1410 | } |
1367 | 1411 | ||
1412 | /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient | ||
1413 | * space in the chunk | ||
1414 | */ | ||
1415 | void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, | ||
1416 | int len, const void *data) | ||
1417 | { | ||
1418 | if (skb_tailroom(chunk->skb) > len) | ||
1419 | return sctp_addto_chunk(chunk, len, data); | ||
1420 | else | ||
1421 | return NULL; | ||
1422 | } | ||
1423 | |||
1368 | /* Append bytes from user space to the end of a chunk. Will panic if | 1424 | /* Append bytes from user space to the end of a chunk. Will panic if |
1369 | * chunk is not big enough. | 1425 | * chunk is not big enough. |
1370 | * Returns a kernel err value. | 1426 | * Returns a kernel err value. |
@@ -1968,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
1968 | * returning multiple unknown parameters. | 2024 | * returning multiple unknown parameters. |
1969 | */ | 2025 | */ |
1970 | if (NULL == *errp) | 2026 | if (NULL == *errp) |
1971 | *errp = sctp_make_op_error_space(asoc, chunk, | 2027 | *errp = sctp_make_op_error_fixed(asoc, chunk); |
1972 | ntohs(chunk->chunk_hdr->length)); | ||
1973 | 2028 | ||
1974 | if (*errp) { | 2029 | if (*errp) { |
1975 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 2030 | sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1976 | WORD_ROUND(ntohs(param.p->length))); | 2031 | WORD_ROUND(ntohs(param.p->length))); |
1977 | sctp_addto_chunk(*errp, | 2032 | sctp_addto_chunk_fixed(*errp, |
1978 | WORD_ROUND(ntohs(param.p->length)), | 2033 | WORD_ROUND(ntohs(param.p->length)), |
1979 | param.v); | 2034 | param.v); |
1980 | } else { | 2035 | } else { |
@@ -3309,21 +3364,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3309 | sctp_chunk_free(asconf); | 3364 | sctp_chunk_free(asconf); |
3310 | asoc->addip_last_asconf = NULL; | 3365 | asoc->addip_last_asconf = NULL; |
3311 | 3366 | ||
3312 | /* Send the next asconf chunk from the addip chunk queue. */ | ||
3313 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
3314 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
3315 | asconf = list_entry(entry, struct sctp_chunk, list); | ||
3316 | |||
3317 | list_del_init(entry); | ||
3318 | |||
3319 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
3320 | sctp_chunk_hold(asconf); | ||
3321 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
3322 | sctp_chunk_free(asconf); | ||
3323 | else | ||
3324 | asoc->addip_last_asconf = asconf; | ||
3325 | } | ||
3326 | |||
3327 | return retval; | 3367 | return retval; |
3328 | } | 3368 | } |
3329 | 3369 | ||
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 49fb9acece63..3b7230ef77c2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -966,6 +966,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc, | |||
966 | } | 966 | } |
967 | 967 | ||
968 | 968 | ||
969 | /* Sent the next ASCONF packet currently stored in the association. | ||
970 | * This happens after the ASCONF_ACK was succeffully processed. | ||
971 | */ | ||
972 | static void sctp_cmd_send_asconf(struct sctp_association *asoc) | ||
973 | { | ||
974 | /* Send the next asconf chunk from the addip chunk | ||
975 | * queue. | ||
976 | */ | ||
977 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
978 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
979 | struct sctp_chunk *asconf = list_entry(entry, | ||
980 | struct sctp_chunk, list); | ||
981 | list_del_init(entry); | ||
982 | |||
983 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
984 | sctp_chunk_hold(asconf); | ||
985 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
986 | sctp_chunk_free(asconf); | ||
987 | else | ||
988 | asoc->addip_last_asconf = asconf; | ||
989 | } | ||
990 | } | ||
991 | |||
969 | 992 | ||
970 | /* These three macros allow us to pull the debugging code out of the | 993 | /* These three macros allow us to pull the debugging code out of the |
971 | * main flow of sctp_do_sm() to keep attention focused on the real | 994 | * main flow of sctp_do_sm() to keep attention focused on the real |
@@ -1621,6 +1644,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1621 | } | 1644 | } |
1622 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); | 1645 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); |
1623 | break; | 1646 | break; |
1647 | case SCTP_CMD_SEND_NEXT_ASCONF: | ||
1648 | sctp_cmd_send_asconf(asoc); | ||
1649 | break; | ||
1624 | default: | 1650 | default: |
1625 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1651 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1626 | cmd->verb, cmd->obj.ptr); | 1652 | cmd->verb, cmd->obj.ptr); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index abf601a1b847..24b2cd555637 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3676 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 3676 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3677 | 3677 | ||
3678 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, | 3678 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, |
3679 | asconf_ack)) | 3679 | asconf_ack)) { |
3680 | /* Successfully processed ASCONF_ACK. We can | ||
3681 | * release the next asconf if we have one. | ||
3682 | */ | ||
3683 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, | ||
3684 | SCTP_NULL()); | ||
3680 | return SCTP_DISPOSITION_CONSUME; | 3685 | return SCTP_DISPOSITION_CONSUME; |
3686 | } | ||
3681 | 3687 | ||
3682 | abort = sctp_make_abort(asoc, asconf_ack, | 3688 | abort = sctp_make_abort(asoc, asconf_ack, |
3683 | sizeof(sctp_errhdr_t)); | 3689 | sizeof(sctp_errhdr_t)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1282a0ed855e..ba1add0b13c3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3719,9 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3719 | sp->hmac = NULL; | 3719 | sp->hmac = NULL; |
3720 | 3720 | ||
3721 | SCTP_DBG_OBJCNT_INC(sock); | 3721 | SCTP_DBG_OBJCNT_INC(sock); |
3722 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3723 | 3722 | ||
3724 | local_bh_disable(); | 3723 | local_bh_disable(); |
3724 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3725 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 3725 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
3726 | local_bh_enable(); | 3726 | local_bh_enable(); |
3727 | 3727 | ||
@@ -3738,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) | |||
3738 | /* Release our hold on the endpoint. */ | 3738 | /* Release our hold on the endpoint. */ |
3739 | ep = sctp_sk(sk)->ep; | 3739 | ep = sctp_sk(sk)->ep; |
3740 | sctp_endpoint_free(ep); | 3740 | sctp_endpoint_free(ep); |
3741 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3742 | local_bh_disable(); | 3741 | local_bh_disable(); |
3742 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3743 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 3743 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
3744 | local_bh_enable(); | 3744 | local_bh_enable(); |
3745 | } | 3745 | } |
@@ -6065,7 +6065,7 @@ static void __sctp_write_space(struct sctp_association *asoc) | |||
6065 | * here by modeling from the current TCP/UDP code. | 6065 | * here by modeling from the current TCP/UDP code. |
6066 | * We have not tested with it yet. | 6066 | * We have not tested with it yet. |
6067 | */ | 6067 | */ |
6068 | if (sock->fasync_list && | 6068 | if (sock->wq->fasync_list && |
6069 | !(sk->sk_shutdown & SEND_SHUTDOWN)) | 6069 | !(sk->sk_shutdown & SEND_SHUTDOWN)) |
6070 | sock_wake_async(sock, | 6070 | sock_wake_async(sock, |
6071 | SOCK_WAKE_SPACE, POLL_OUT); | 6071 | SOCK_WAKE_SPACE, POLL_OUT); |
@@ -6185,6 +6185,19 @@ do_nonblock: | |||
6185 | goto out; | 6185 | goto out; |
6186 | } | 6186 | } |
6187 | 6187 | ||
6188 | void sctp_data_ready(struct sock *sk, int len) | ||
6189 | { | ||
6190 | struct socket_wq *wq; | ||
6191 | |||
6192 | rcu_read_lock(); | ||
6193 | wq = rcu_dereference(sk->sk_wq); | ||
6194 | if (wq_has_sleeper(wq)) | ||
6195 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
6196 | POLLRDNORM | POLLRDBAND); | ||
6197 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | ||
6198 | rcu_read_unlock(); | ||
6199 | } | ||
6200 | |||
6188 | /* If socket sndbuf has changed, wake up all per association waiters. */ | 6201 | /* If socket sndbuf has changed, wake up all per association waiters. */ |
6189 | void sctp_write_space(struct sock *sk) | 6202 | void sctp_write_space(struct sock *sk) |
6190 | { | 6203 | { |
diff --git a/net/socket.c b/net/socket.c index cb7c1f6c0d6e..dae8c6b84a09 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb) | |||
252 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); | 252 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); |
253 | if (!ei) | 253 | if (!ei) |
254 | return NULL; | 254 | return NULL; |
255 | init_waitqueue_head(&ei->socket.wait); | 255 | ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL); |
256 | if (!ei->socket.wq) { | ||
257 | kmem_cache_free(sock_inode_cachep, ei); | ||
258 | return NULL; | ||
259 | } | ||
260 | init_waitqueue_head(&ei->socket.wq->wait); | ||
261 | ei->socket.wq->fasync_list = NULL; | ||
256 | 262 | ||
257 | ei->socket.fasync_list = NULL; | ||
258 | ei->socket.state = SS_UNCONNECTED; | 263 | ei->socket.state = SS_UNCONNECTED; |
259 | ei->socket.flags = 0; | 264 | ei->socket.flags = 0; |
260 | ei->socket.ops = NULL; | 265 | ei->socket.ops = NULL; |
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb) | |||
264 | return &ei->vfs_inode; | 269 | return &ei->vfs_inode; |
265 | } | 270 | } |
266 | 271 | ||
272 | |||
273 | static void wq_free_rcu(struct rcu_head *head) | ||
274 | { | ||
275 | struct socket_wq *wq = container_of(head, struct socket_wq, rcu); | ||
276 | |||
277 | kfree(wq); | ||
278 | } | ||
279 | |||
267 | static void sock_destroy_inode(struct inode *inode) | 280 | static void sock_destroy_inode(struct inode *inode) |
268 | { | 281 | { |
269 | kmem_cache_free(sock_inode_cachep, | 282 | struct socket_alloc *ei; |
270 | container_of(inode, struct socket_alloc, vfs_inode)); | 283 | |
284 | ei = container_of(inode, struct socket_alloc, vfs_inode); | ||
285 | call_rcu(&ei->socket.wq->rcu, wq_free_rcu); | ||
286 | kmem_cache_free(sock_inode_cachep, ei); | ||
271 | } | 287 | } |
272 | 288 | ||
273 | static void init_once(void *foo) | 289 | static void init_once(void *foo) |
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock) | |||
513 | module_put(owner); | 529 | module_put(owner); |
514 | } | 530 | } |
515 | 531 | ||
516 | if (sock->fasync_list) | 532 | if (sock->wq->fasync_list) |
517 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); | 533 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); |
518 | 534 | ||
519 | percpu_sub(sockets_in_use, 1); | 535 | percpu_sub(sockets_in_use, 1); |
@@ -1080,9 +1096,9 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1080 | 1096 | ||
1081 | lock_sock(sk); | 1097 | lock_sock(sk); |
1082 | 1098 | ||
1083 | fasync_helper(fd, filp, on, &sock->fasync_list); | 1099 | fasync_helper(fd, filp, on, &sock->wq->fasync_list); |
1084 | 1100 | ||
1085 | if (!sock->fasync_list) | 1101 | if (!sock->wq->fasync_list) |
1086 | sock_reset_flag(sk, SOCK_FASYNC); | 1102 | sock_reset_flag(sk, SOCK_FASYNC); |
1087 | else | 1103 | else |
1088 | sock_set_flag(sk, SOCK_FASYNC); | 1104 | sock_set_flag(sk, SOCK_FASYNC); |
@@ -1091,12 +1107,20 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1091 | return 0; | 1107 | return 0; |
1092 | } | 1108 | } |
1093 | 1109 | ||
1094 | /* This function may be called only under socket lock or callback_lock */ | 1110 | /* This function may be called only under socket lock or callback_lock or rcu_lock */ |
1095 | 1111 | ||
1096 | int sock_wake_async(struct socket *sock, int how, int band) | 1112 | int sock_wake_async(struct socket *sock, int how, int band) |
1097 | { | 1113 | { |
1098 | if (!sock || !sock->fasync_list) | 1114 | struct socket_wq *wq; |
1115 | |||
1116 | if (!sock) | ||
1099 | return -1; | 1117 | return -1; |
1118 | rcu_read_lock(); | ||
1119 | wq = rcu_dereference(sock->wq); | ||
1120 | if (!wq || !wq->fasync_list) { | ||
1121 | rcu_read_unlock(); | ||
1122 | return -1; | ||
1123 | } | ||
1100 | switch (how) { | 1124 | switch (how) { |
1101 | case SOCK_WAKE_WAITD: | 1125 | case SOCK_WAKE_WAITD: |
1102 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) | 1126 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) |
@@ -1108,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band) | |||
1108 | /* fall through */ | 1132 | /* fall through */ |
1109 | case SOCK_WAKE_IO: | 1133 | case SOCK_WAKE_IO: |
1110 | call_kill: | 1134 | call_kill: |
1111 | kill_fasync(&sock->fasync_list, SIGIO, band); | 1135 | kill_fasync(&wq->fasync_list, SIGIO, band); |
1112 | break; | 1136 | break; |
1113 | case SOCK_WAKE_URG: | 1137 | case SOCK_WAKE_URG: |
1114 | kill_fasync(&sock->fasync_list, SIGURG, band); | 1138 | kill_fasync(&wq->fasync_list, SIGURG, band); |
1115 | } | 1139 | } |
1140 | rcu_read_unlock(); | ||
1116 | return 0; | 1141 | return 0; |
1117 | } | 1142 | } |
1118 | 1143 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 87c0360eaa25..fef2cc5e9d2b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk) | |||
313 | 313 | ||
314 | static void unix_write_space(struct sock *sk) | 314 | static void unix_write_space(struct sock *sk) |
315 | { | 315 | { |
316 | read_lock(&sk->sk_callback_lock); | 316 | struct socket_wq *wq; |
317 | |||
318 | rcu_read_lock(); | ||
317 | if (unix_writable(sk)) { | 319 | if (unix_writable(sk)) { |
318 | if (sk_has_sleeper(sk)) | 320 | wq = rcu_dereference(sk->sk_wq); |
319 | wake_up_interruptible_sync(sk_sleep(sk)); | 321 | if (wq_has_sleeper(wq)) |
322 | wake_up_interruptible_sync(&wq->wait); | ||
320 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 323 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
321 | } | 324 | } |
322 | read_unlock(&sk->sk_callback_lock); | 325 | rcu_read_unlock(); |
323 | } | 326 | } |
324 | 327 | ||
325 | /* When dgram socket disconnects (or changes its peer), we clear its receive | 328 | /* When dgram socket disconnects (or changes its peer), we clear its receive |
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion) | |||
406 | skpair->sk_err = ECONNRESET; | 409 | skpair->sk_err = ECONNRESET; |
407 | unix_state_unlock(skpair); | 410 | unix_state_unlock(skpair); |
408 | skpair->sk_state_change(skpair); | 411 | skpair->sk_state_change(skpair); |
409 | read_lock(&skpair->sk_callback_lock); | ||
410 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); | 412 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); |
411 | read_unlock(&skpair->sk_callback_lock); | ||
412 | } | 413 | } |
413 | sock_put(skpair); /* It may now die */ | 414 | sock_put(skpair); /* It may now die */ |
414 | unix_peer(sk) = NULL; | 415 | unix_peer(sk) = NULL; |
@@ -1142,7 +1143,7 @@ restart: | |||
1142 | newsk->sk_peercred.pid = task_tgid_vnr(current); | 1143 | newsk->sk_peercred.pid = task_tgid_vnr(current); |
1143 | current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); | 1144 | current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); |
1144 | newu = unix_sk(newsk); | 1145 | newu = unix_sk(newsk); |
1145 | newsk->sk_sleep = &newu->peer_wait; | 1146 | newsk->sk_wq = &newu->peer_wq; |
1146 | otheru = unix_sk(other); | 1147 | otheru = unix_sk(other); |
1147 | 1148 | ||
1148 | /* copy address information from listening to new sock*/ | 1149 | /* copy address information from listening to new sock*/ |
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode) | |||
1931 | other->sk_shutdown |= peer_mode; | 1932 | other->sk_shutdown |= peer_mode; |
1932 | unix_state_unlock(other); | 1933 | unix_state_unlock(other); |
1933 | other->sk_state_change(other); | 1934 | other->sk_state_change(other); |
1934 | read_lock(&other->sk_callback_lock); | ||
1935 | if (peer_mode == SHUTDOWN_MASK) | 1935 | if (peer_mode == SHUTDOWN_MASK) |
1936 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); | 1936 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); |
1937 | else if (peer_mode & RCV_SHUTDOWN) | 1937 | else if (peer_mode & RCV_SHUTDOWN) |
1938 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); | 1938 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); |
1939 | read_unlock(&other->sk_callback_lock); | ||
1940 | } | 1939 | } |
1941 | if (other) | 1940 | if (other) |
1942 | sock_put(other); | 1941 | sock_put(other); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 14c22c3768da..c8df6fda0b1f 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp) | |||
153 | } | 153 | } |
154 | } | 154 | } |
155 | 155 | ||
156 | static inline struct sk_buff *sock_queue_head(struct sock *sk) | ||
157 | { | ||
158 | return (struct sk_buff *)&sk->sk_receive_queue; | ||
159 | } | ||
160 | |||
161 | #define receive_queue_for_each_skb(sk, next, skb) \ | ||
162 | for (skb = sock_queue_head(sk)->next, next = skb->next; \ | ||
163 | skb != sock_queue_head(sk); skb = next, next = skb->next) | ||
164 | |||
165 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | 156 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
166 | struct sk_buff_head *hitlist) | 157 | struct sk_buff_head *hitlist) |
167 | { | 158 | { |
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
169 | struct sk_buff *next; | 160 | struct sk_buff *next; |
170 | 161 | ||
171 | spin_lock(&x->sk_receive_queue.lock); | 162 | spin_lock(&x->sk_receive_queue.lock); |
172 | receive_queue_for_each_skb(x, next, skb) { | 163 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
173 | /* | 164 | /* |
174 | * Do we have file descriptors ? | 165 | * Do we have file descriptors ? |
175 | */ | 166 | */ |
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *), | |||
225 | * and perform a scan on them as well. | 216 | * and perform a scan on them as well. |
226 | */ | 217 | */ |
227 | spin_lock(&x->sk_receive_queue.lock); | 218 | spin_lock(&x->sk_receive_queue.lock); |
228 | receive_queue_for_each_skb(x, next, skb) { | 219 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
229 | u = unix_sk(skb->sk); | 220 | u = unix_sk(skb->sk); |
230 | 221 | ||
231 | /* | 222 | /* |