diff options
Diffstat (limited to 'drivers/net')
73 files changed, 960 insertions, 559 deletions
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 39214e512452..7ca0eded2561 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data) | |||
425 | int csr0, boguscnt; | 425 | int csr0, boguscnt; |
426 | int handled = 0; | 426 | int handled = 0; |
427 | 427 | ||
428 | if (dev == NULL) { | ||
429 | printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n"); | ||
430 | return IRQ_NONE; | ||
431 | } | ||
432 | |||
433 | lance->RAP = CSR0; /* PCnet-ISA Controller Status */ | 428 | lance->RAP = CSR0; /* PCnet-ISA Controller Status */ |
434 | 429 | ||
435 | if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ | 430 | if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 0c7811faf72c..a179cc6d79f2 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1786 | spin_lock_bh(&adapter->mcc_lock); | 1786 | spin_lock_bh(&adapter->mcc_lock); |
1787 | 1787 | ||
1788 | wrb = wrb_from_mccq(adapter); | 1788 | wrb = wrb_from_mccq(adapter); |
1789 | if (!wrb) { | ||
1790 | status = -EBUSY; | ||
1791 | goto err; | ||
1792 | } | ||
1789 | req = nonemb_cmd->va; | 1793 | req = nonemb_cmd->va; |
1790 | sge = nonembedded_sgl(wrb); | 1794 | sge = nonembedded_sgl(wrb); |
1791 | 1795 | ||
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1801 | 1805 | ||
1802 | status = be_mcc_notify_wait(adapter); | 1806 | status = be_mcc_notify_wait(adapter); |
1803 | 1807 | ||
1808 | err: | ||
1804 | spin_unlock_bh(&adapter->mcc_lock); | 1809 | spin_unlock_bh(&adapter->mcc_lock); |
1805 | return status; | 1810 | return status; |
1806 | } | 1811 | } |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 653c62475cb6..8849699c66c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * (you will need to reboot afterwards) */ | 22 | * (you will need to reboot afterwards) */ |
23 | /* #define BNX2X_STOP_ON_ERROR */ | 23 | /* #define BNX2X_STOP_ON_ERROR */ |
24 | 24 | ||
25 | #define DRV_MODULE_VERSION "1.62.00-5" | 25 | #define DRV_MODULE_VERSION "1.62.00-6" |
26 | #define DRV_MODULE_RELDATE "2011/01/30" | 26 | #define DRV_MODULE_RELDATE "2011/01/30" |
27 | #define BNX2X_BC_VER 0x040200 | 27 | #define BNX2X_BC_VER 0x040200 |
28 | 28 | ||
@@ -1211,6 +1211,7 @@ struct bnx2x { | |||
1211 | /* DCBX Negotation results */ | 1211 | /* DCBX Negotation results */ |
1212 | struct dcbx_features dcbx_local_feat; | 1212 | struct dcbx_features dcbx_local_feat; |
1213 | u32 dcbx_error; | 1213 | u32 dcbx_error; |
1214 | u32 pending_max; | ||
1214 | }; | 1215 | }; |
1215 | 1216 | ||
1216 | /** | 1217 | /** |
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1613 | #define BNX2X_BTR 4 | 1614 | #define BNX2X_BTR 4 |
1614 | #define MAX_SPQ_PENDING 8 | 1615 | #define MAX_SPQ_PENDING 8 |
1615 | 1616 | ||
1616 | 1617 | /* CMNG constants, as derived from system spec calculations */ | |
1617 | /* CMNG constants | 1618 | /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ |
1618 | derived from lab experiments, and not from system spec calculations !!! */ | 1619 | #define DEF_MIN_RATE 100 |
1619 | #define DEF_MIN_RATE 100 | 1620 | /* resolution of the rate shaping timer - 400 usec */ |
1620 | /* resolution of the rate shaping timer - 100 usec */ | 1621 | #define RS_PERIODIC_TIMEOUT_USEC 400 |
1621 | #define RS_PERIODIC_TIMEOUT_USEC 100 | ||
1622 | /* resolution of fairness algorithm in usecs - | ||
1623 | coefficient for calculating the actual t fair */ | ||
1624 | #define T_FAIR_COEF 10000000 | ||
1625 | /* number of bytes in single QM arbitration cycle - | 1622 | /* number of bytes in single QM arbitration cycle - |
1626 | coefficient for calculating the fairness timer */ | 1623 | * coefficient for calculating the fairness timer */ |
1627 | #define QM_ARB_BYTES 40000 | 1624 | #define QM_ARB_BYTES 160000 |
1628 | #define FAIR_MEM 2 | 1625 | /* resolution of Min algorithm 1:100 */ |
1626 | #define MIN_RES 100 | ||
1627 | /* how many bytes above threshold for the minimal credit of Min algorithm*/ | ||
1628 | #define MIN_ABOVE_THRESH 32768 | ||
1629 | /* Fairness algorithm integration time coefficient - | ||
1630 | * for calculating the actual Tfair */ | ||
1631 | #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) | ||
1632 | /* Memory of fairness algorithm . 2 cycles */ | ||
1633 | #define FAIR_MEM 2 | ||
1629 | 1634 | ||
1630 | 1635 | ||
1631 | #define ATTN_NIG_FOR_FUNC (1L << 8) | 1636 | #define ATTN_NIG_FOR_FUNC (1L << 8) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..a71b32940533 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
259 | #endif | 259 | #endif |
260 | } | 260 | } |
261 | 261 | ||
262 | /* Timestamp option length allowed for TPA aggregation: | ||
263 | * | ||
264 | * nop nop kind length echo val | ||
265 | */ | ||
266 | #define TPA_TSTAMP_OPT_LEN 12 | ||
267 | /** | ||
268 | * Calculate the approximate value of the MSS for this | ||
269 | * aggregation using the first packet of it. | ||
270 | * | ||
271 | * @param bp | ||
272 | * @param parsing_flags Parsing flags from the START CQE | ||
273 | * @param len_on_bd Total length of the first packet for the | ||
274 | * aggregation. | ||
275 | */ | ||
276 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | ||
277 | u16 len_on_bd) | ||
278 | { | ||
279 | /* TPA arrgregation won't have an IP options and TCP options | ||
280 | * other than timestamp. | ||
281 | */ | ||
282 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | ||
283 | |||
284 | |||
285 | /* Check if there was a TCP timestamp, if there is it's will | ||
286 | * always be 12 bytes length: nop nop kind length echo val. | ||
287 | * | ||
288 | * Otherwise FW would close the aggregation. | ||
289 | */ | ||
290 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) | ||
291 | hdrs_len += TPA_TSTAMP_OPT_LEN; | ||
292 | |||
293 | return len_on_bd - hdrs_len; | ||
294 | } | ||
295 | |||
262 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 296 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
263 | struct sk_buff *skb, | 297 | struct sk_buff *skb, |
264 | struct eth_fast_path_rx_cqe *fp_cqe, | 298 | struct eth_fast_path_rx_cqe *fp_cqe, |
265 | u16 cqe_idx) | 299 | u16 cqe_idx, u16 parsing_flags) |
266 | { | 300 | { |
267 | struct sw_rx_page *rx_pg, old_rx_pg; | 301 | struct sw_rx_page *rx_pg, old_rx_pg; |
268 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 302 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
275 | 309 | ||
276 | /* This is needed in order to enable forwarding support */ | 310 | /* This is needed in order to enable forwarding support */ |
277 | if (frag_size) | 311 | if (frag_size) |
278 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | 312 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, |
279 | max(frag_size, (u32)len_on_bd)); | 313 | len_on_bd); |
280 | 314 | ||
281 | #ifdef BNX2X_STOP_ON_ERROR | 315 | #ifdef BNX2X_STOP_ON_ERROR |
282 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 316 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
344 | if (likely(new_skb)) { | 378 | if (likely(new_skb)) { |
345 | /* fix ip xsum and give it to the stack */ | 379 | /* fix ip xsum and give it to the stack */ |
346 | /* (no need to map the new skb) */ | 380 | /* (no need to map the new skb) */ |
381 | u16 parsing_flags = | ||
382 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
347 | 383 | ||
348 | prefetch(skb); | 384 | prefetch(skb); |
349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 385 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
373 | } | 409 | } |
374 | 410 | ||
375 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 411 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
376 | &cqe->fast_path_cqe, cqe_idx)) { | 412 | &cqe->fast_path_cqe, cqe_idx, |
377 | if ((le16_to_cpu(cqe->fast_path_cqe. | 413 | parsing_flags)) { |
378 | pars_flags.flags) & PARSING_FLAGS_VLAN)) | 414 | if (parsing_flags & PARSING_FLAGS_VLAN) |
379 | __vlan_hwaccel_put_tag(skb, | 415 | __vlan_hwaccel_put_tag(skb, |
380 | le16_to_cpu(cqe->fast_path_cqe. | 416 | le16_to_cpu(cqe->fast_path_cqe. |
381 | vlan_tag)); | 417 | vlan_tag)); |
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) | |||
703 | { | 739 | { |
704 | u16 line_speed = bp->link_vars.line_speed; | 740 | u16 line_speed = bp->link_vars.line_speed; |
705 | if (IS_MF(bp)) { | 741 | if (IS_MF(bp)) { |
706 | u16 maxCfg = (bp->mf_config[BP_VN(bp)] & | 742 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
707 | FUNC_MF_CFG_MAX_BW_MASK) >> | 743 | bp->mf_config[BP_VN(bp)]); |
708 | FUNC_MF_CFG_MAX_BW_SHIFT; | 744 | |
709 | /* Calculate the current MAX line speed limit for the DCC | 745 | /* Calculate the current MAX line speed limit for the MF |
710 | * capable devices | 746 | * devices |
711 | */ | 747 | */ |
712 | if (IS_MF_SD(bp)) { | 748 | if (IS_MF_SI(bp)) |
749 | line_speed = (line_speed * maxCfg) / 100; | ||
750 | else { /* SD mode */ | ||
713 | u16 vn_max_rate = maxCfg * 100; | 751 | u16 vn_max_rate = maxCfg * 100; |
714 | 752 | ||
715 | if (vn_max_rate < line_speed) | 753 | if (vn_max_rate < line_speed) |
716 | line_speed = vn_max_rate; | 754 | line_speed = vn_max_rate; |
717 | } else /* IS_MF_SI(bp)) */ | 755 | } |
718 | line_speed = (line_speed * maxCfg) / 100; | ||
719 | } | 756 | } |
720 | 757 | ||
721 | return line_speed; | 758 | return line_speed; |
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp) | |||
959 | bnx2x_free_rx_skbs(bp); | 996 | bnx2x_free_rx_skbs(bp); |
960 | } | 997 | } |
961 | 998 | ||
999 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) | ||
1000 | { | ||
1001 | /* load old values */ | ||
1002 | u32 mf_cfg = bp->mf_config[BP_VN(bp)]; | ||
1003 | |||
1004 | if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { | ||
1005 | /* leave all but MAX value */ | ||
1006 | mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; | ||
1007 | |||
1008 | /* set new MAX value */ | ||
1009 | mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) | ||
1010 | & FUNC_MF_CFG_MAX_BW_MASK; | ||
1011 | |||
1012 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); | ||
1013 | } | ||
1014 | } | ||
1015 | |||
962 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | 1016 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) |
963 | { | 1017 | { |
964 | int i, offset = 1; | 1018 | int i, offset = 1; |
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1427 | 1481 | ||
1428 | bnx2x_set_eth_mac(bp, 1); | 1482 | bnx2x_set_eth_mac(bp, 1); |
1429 | 1483 | ||
1484 | if (bp->pending_max) { | ||
1485 | bnx2x_update_max_mf_config(bp, bp->pending_max); | ||
1486 | bp->pending_max = 0; | ||
1487 | } | ||
1488 | |||
1430 | if (bp->port.pmf) | 1489 | if (bp->port.pmf) |
1431 | bnx2x_initial_phy_init(bp, load_mode); | 1490 | bnx2x_initial_phy_init(bp, load_mode); |
1432 | 1491 | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..85ea7f26b51f 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp); | |||
341 | */ | 341 | */ |
342 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | 342 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); |
343 | 343 | ||
344 | /** | ||
345 | * Updates MAX part of MF configuration in HW | ||
346 | * (if required) | ||
347 | * | ||
348 | * @param bp | ||
349 | * @param value | ||
350 | */ | ||
351 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); | ||
352 | |||
344 | /* dev_close main block */ | 353 | /* dev_close main block */ |
345 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | 354 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); |
346 | 355 | ||
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, | |||
1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1053 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1054 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
1046 | 1055 | ||
1056 | /** | ||
1057 | * Extracts MAX BW part from MF configuration. | ||
1058 | * | ||
1059 | * @param bp | ||
1060 | * @param mf_cfg | ||
1061 | * | ||
1062 | * @return u16 | ||
1063 | */ | ||
1064 | static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) | ||
1065 | { | ||
1066 | u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
1067 | FUNC_MF_CFG_MAX_BW_SHIFT; | ||
1068 | if (!max_cfg) { | ||
1069 | BNX2X_ERR("Illegal configuration detected for Max BW - " | ||
1070 | "using 100 instead\n"); | ||
1071 | max_cfg = 100; | ||
1072 | } | ||
1073 | return max_cfg; | ||
1074 | } | ||
1075 | |||
1047 | #endif /* BNX2X_CMN_H */ | 1076 | #endif /* BNX2X_CMN_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..7e92f9d0dcfd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
238 | speed |= (cmd->speed_hi << 16); | 238 | speed |= (cmd->speed_hi << 16); |
239 | 239 | ||
240 | if (IS_MF_SI(bp)) { | 240 | if (IS_MF_SI(bp)) { |
241 | u32 param = 0; | 241 | u32 part; |
242 | u32 line_speed = bp->link_vars.line_speed; | 242 | u32 line_speed = bp->link_vars.line_speed; |
243 | 243 | ||
244 | /* use 10G if no link detected */ | 244 | /* use 10G if no link detected */ |
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
251 | REQ_BC_VER_4_SET_MF_BW); | 251 | REQ_BC_VER_4_SET_MF_BW); |
252 | return -EINVAL; | 252 | return -EINVAL; |
253 | } | 253 | } |
254 | if (line_speed < speed) { | 254 | |
255 | BNX2X_DEV_INFO("New speed should be less or equal " | 255 | part = (speed * 100) / line_speed; |
256 | "to actual line speed\n"); | 256 | |
257 | if (line_speed < speed || !part) { | ||
258 | BNX2X_DEV_INFO("Speed setting should be in a range " | ||
259 | "from 1%% to 100%% " | ||
260 | "of actual line speed\n"); | ||
257 | return -EINVAL; | 261 | return -EINVAL; |
258 | } | 262 | } |
259 | /* load old values */ | ||
260 | param = bp->mf_config[BP_VN(bp)]; | ||
261 | |||
262 | /* leave only MIN value */ | ||
263 | param &= FUNC_MF_CFG_MIN_BW_MASK; | ||
264 | 263 | ||
265 | /* set new MAX value */ | 264 | if (bp->state != BNX2X_STATE_OPEN) |
266 | param |= (((speed * 100) / line_speed) | 265 | /* store value for following "load" */ |
267 | << FUNC_MF_CFG_MAX_BW_SHIFT) | 266 | bp->pending_max = part; |
268 | & FUNC_MF_CFG_MAX_BW_MASK; | 267 | else |
268 | bnx2x_update_max_mf_config(bp, part); | ||
269 | 269 | ||
270 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); | ||
271 | return 0; | 270 | return 0; |
272 | } | 271 | } |
273 | 272 | ||
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) | |||
1781 | { 0x100, 0x350 }, /* manuf_info */ | 1780 | { 0x100, 0x350 }, /* manuf_info */ |
1782 | { 0x450, 0xf0 }, /* feature_info */ | 1781 | { 0x450, 0xf0 }, /* feature_info */ |
1783 | { 0x640, 0x64 }, /* upgrade_key_info */ | 1782 | { 0x640, 0x64 }, /* upgrade_key_info */ |
1784 | { 0x6a4, 0x64 }, | ||
1785 | { 0x708, 0x70 }, /* manuf_key_info */ | 1783 | { 0x708, 0x70 }, /* manuf_key_info */ |
1786 | { 0x778, 0x70 }, | ||
1787 | { 0, 0 } | 1784 | { 0, 0 } |
1788 | }; | 1785 | }; |
1789 | __be32 buf[0x350 / 4]; | 1786 | __be32 buf[0x350 / 4]; |
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1933 | buf[4] = 1; | 1930 | buf[4] = 1; |
1934 | etest->flags |= ETH_TEST_FL_FAILED; | 1931 | etest->flags |= ETH_TEST_FL_FAILED; |
1935 | } | 1932 | } |
1936 | if (bp->port.pmf) | 1933 | |
1937 | if (bnx2x_link_test(bp, is_serdes) != 0) { | 1934 | if (bnx2x_link_test(bp, is_serdes) != 0) { |
1938 | buf[5] = 1; | 1935 | buf[5] = 1; |
1939 | etest->flags |= ETH_TEST_FL_FAILED; | 1936 | etest->flags |= ETH_TEST_FL_FAILED; |
1940 | } | 1937 | } |
1941 | 1938 | ||
1942 | #ifdef BNX2X_EXTRA_DEBUG | 1939 | #ifdef BNX2X_EXTRA_DEBUG |
1943 | bnx2x_panic_dump(bp); | 1940 | bnx2x_panic_dump(bp); |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 5a268e9a0895..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
@@ -241,7 +241,7 @@ static const struct { | |||
241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't | 241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't |
242 | * want to handle "system kill" flow at the moment. | 242 | * want to handle "system kill" flow at the moment. |
243 | */ | 243 | */ |
244 | BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), | 244 | BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), |
245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), | 245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), |
246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), | 246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), |
247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), | 247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f40740e68ea5..aa032339e321 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
1974 | vn_max_rate = 0; | 1974 | vn_max_rate = 0; |
1975 | 1975 | ||
1976 | } else { | 1976 | } else { |
1977 | u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); | ||
1978 | |||
1977 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 1979 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
1978 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 1980 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
1979 | /* If min rate is zero - set it to 1 */ | 1981 | /* If fairness is enabled (not all min rates are zeroes) and |
1982 | if current min rate is zero - set it to 1. | ||
1983 | This is a requirement of the algorithm. */ | ||
1980 | if (bp->vn_weight_sum && (vn_min_rate == 0)) | 1984 | if (bp->vn_weight_sum && (vn_min_rate == 0)) |
1981 | vn_min_rate = DEF_MIN_RATE; | 1985 | vn_min_rate = DEF_MIN_RATE; |
1982 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 1986 | |
1983 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 1987 | if (IS_MF_SI(bp)) |
1988 | /* maxCfg in percents of linkspeed */ | ||
1989 | vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; | ||
1990 | else | ||
1991 | /* maxCfg is absolute in 100Mb units */ | ||
1992 | vn_max_rate = maxCfg * 100; | ||
1984 | } | 1993 | } |
1985 | 1994 | ||
1986 | DP(NETIF_MSG_IFUP, | 1995 | DP(NETIF_MSG_IFUP, |
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
2006 | m_fair_vn.vn_credit_delta = | 2015 | m_fair_vn.vn_credit_delta = |
2007 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / | 2016 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / |
2008 | (8 * bp->vn_weight_sum))), | 2017 | (8 * bp->vn_weight_sum))), |
2009 | (bp->cmng.fair_vars.fair_threshold * 2)); | 2018 | (bp->cmng.fair_vars.fair_threshold + |
2019 | MIN_ABOVE_THRESH)); | ||
2010 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", | 2020 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", |
2011 | m_fair_vn.vn_credit_delta); | 2021 | m_fair_vn.vn_credit_delta); |
2012 | } | 2022 | } |
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2082 | bnx2x_calc_vn_weight_sum(bp); | 2092 | bnx2x_calc_vn_weight_sum(bp); |
2083 | 2093 | ||
2084 | /* calculate and set min-max rate for each vn */ | 2094 | /* calculate and set min-max rate for each vn */ |
2085 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 2095 | if (bp->port.pmf) |
2086 | bnx2x_init_vn_minmax(bp, vn); | 2096 | for (vn = VN_0; vn < E1HVN_MAX; vn++) |
2097 | bnx2x_init_vn_minmax(bp, vn); | ||
2087 | 2098 | ||
2088 | /* always enable rate shaping and fairness */ | 2099 | /* always enable rate shaping and fairness */ |
2089 | bp->cmng.flags.cmng_enables |= | 2100 | bp->cmng.flags.cmng_enables |= |
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2152 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2163 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2153 | } | 2164 | } |
2154 | 2165 | ||
2155 | /* indicate link status only if link status actually changed */ | ||
2156 | if (prev_link_status != bp->link_vars.link_status) | ||
2157 | bnx2x_link_report(bp); | ||
2158 | |||
2159 | if (IS_MF(bp)) | ||
2160 | bnx2x_link_sync_notify(bp); | ||
2161 | |||
2162 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2166 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { |
2163 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2167 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); |
2164 | 2168 | ||
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2170 | DP(NETIF_MSG_IFUP, | 2174 | DP(NETIF_MSG_IFUP, |
2171 | "single function mode without fairness\n"); | 2175 | "single function mode without fairness\n"); |
2172 | } | 2176 | } |
2177 | |||
2178 | if (IS_MF(bp)) | ||
2179 | bnx2x_link_sync_notify(bp); | ||
2180 | |||
2181 | /* indicate link status only if link status actually changed */ | ||
2182 | if (prev_link_status != bp->link_vars.link_status) | ||
2183 | bnx2x_link_report(bp); | ||
2173 | } | 2184 | } |
2174 | 2185 | ||
2175 | void bnx2x__link_status_update(struct bnx2x *bp) | 2186 | void bnx2x__link_status_update(struct bnx2x *bp) |
@@ -4276,9 +4287,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4276 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4287 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4277 | BNX2X_ACCEPT_MULTICAST; | 4288 | BNX2X_ACCEPT_MULTICAST; |
4278 | #ifdef BCM_CNIC | 4289 | #ifdef BCM_CNIC |
4279 | cl_id = bnx2x_fcoe(bp, cl_id); | 4290 | if (!NO_FCOE(bp)) { |
4280 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4291 | cl_id = bnx2x_fcoe(bp, cl_id); |
4281 | BNX2X_ACCEPT_MULTICAST); | 4292 | bnx2x_rxq_set_mac_filters(bp, cl_id, |
4293 | BNX2X_ACCEPT_UNICAST | | ||
4294 | BNX2X_ACCEPT_MULTICAST); | ||
4295 | } | ||
4282 | #endif | 4296 | #endif |
4283 | break; | 4297 | break; |
4284 | 4298 | ||
@@ -4286,18 +4300,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4286 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4300 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4287 | BNX2X_ACCEPT_ALL_MULTICAST; | 4301 | BNX2X_ACCEPT_ALL_MULTICAST; |
4288 | #ifdef BCM_CNIC | 4302 | #ifdef BCM_CNIC |
4289 | cl_id = bnx2x_fcoe(bp, cl_id); | 4303 | /* |
4290 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4304 | * Prevent duplication of multicast packets by configuring FCoE |
4291 | BNX2X_ACCEPT_MULTICAST); | 4305 | * L2 Client to receive only matched unicast frames. |
4306 | */ | ||
4307 | if (!NO_FCOE(bp)) { | ||
4308 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4309 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
4310 | BNX2X_ACCEPT_UNICAST); | ||
4311 | } | ||
4292 | #endif | 4312 | #endif |
4293 | break; | 4313 | break; |
4294 | 4314 | ||
4295 | case BNX2X_RX_MODE_PROMISC: | 4315 | case BNX2X_RX_MODE_PROMISC: |
4296 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; | 4316 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; |
4297 | #ifdef BCM_CNIC | 4317 | #ifdef BCM_CNIC |
4298 | cl_id = bnx2x_fcoe(bp, cl_id); | 4318 | /* |
4299 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4319 | * Prevent packets duplication by configuring DROP_ALL for FCoE |
4300 | BNX2X_ACCEPT_MULTICAST); | 4320 | * L2 Client. |
4321 | */ | ||
4322 | if (!NO_FCOE(bp)) { | ||
4323 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4324 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | ||
4325 | } | ||
4301 | #endif | 4326 | #endif |
4302 | /* pass management unicast packets as well */ | 4327 | /* pass management unicast packets as well */ |
4303 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 4328 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index bda60d590fa8..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
1239 | if (unlikely(bp->panic)) | 1239 | if (unlikely(bp->panic)) |
1240 | return; | 1240 | return; |
1241 | 1241 | ||
1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | ||
1243 | |||
1242 | /* Protect a state change flow */ | 1244 | /* Protect a state change flow */ |
1243 | spin_lock_bh(&bp->stats_lock); | 1245 | spin_lock_bh(&bp->stats_lock); |
1244 | state = bp->stats_state; | 1246 | state = bp->stats_state; |
1245 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1246 | spin_unlock_bh(&bp->stats_lock); | 1248 | spin_unlock_bh(&bp->stats_lock); |
1247 | 1249 | ||
1248 | bnx2x_stats_stm[state][event].action(bp); | ||
1249 | |||
1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
1252 | state, event, bp->stats_state); | 1252 | state, event, bp->stats_state); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 1024ae158227..a5d5d0b5b155 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
281 | } | 281 | } |
282 | 282 | ||
283 | /** | 283 | /** |
284 | * __get_rx_machine_lock - lock the port's RX machine | 284 | * __get_state_machine_lock - lock the port's state machines |
285 | * @port: the port we're looking at | 285 | * @port: the port we're looking at |
286 | * | 286 | * |
287 | */ | 287 | */ |
288 | static inline void __get_rx_machine_lock(struct port *port) | 288 | static inline void __get_state_machine_lock(struct port *port) |
289 | { | 289 | { |
290 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 290 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
291 | } | 291 | } |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * __release_rx_machine_lock - unlock the port's RX machine | 294 | * __release_state_machine_lock - unlock the port's state machines |
295 | * @port: the port we're looking at | 295 | * @port: the port we're looking at |
296 | * | 296 | * |
297 | */ | 297 | */ |
298 | static inline void __release_rx_machine_lock(struct port *port) | 298 | static inline void __release_state_machine_lock(struct port *port) |
299 | { | 299 | { |
300 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 300 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
301 | } | 301 | } |
302 | 302 | ||
303 | /** | 303 | /** |
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | /** | 390 | /** |
391 | * __initialize_port_locks - initialize a port's RX machine spinlock | 391 | * __initialize_port_locks - initialize a port's STATE machine spinlock |
392 | * @port: the port we're looking at | 392 | * @port: the port we're looking at |
393 | * | 393 | * |
394 | */ | 394 | */ |
395 | static inline void __initialize_port_locks(struct port *port) | 395 | static inline void __initialize_port_locks(struct port *port) |
396 | { | 396 | { |
397 | // make sure it isn't called twice | 397 | // make sure it isn't called twice |
398 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 398 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
399 | } | 399 | } |
400 | 400 | ||
401 | //conversions | 401 | //conversions |
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1025 | { | 1025 | { |
1026 | rx_states_t last_state; | 1026 | rx_states_t last_state; |
1027 | 1027 | ||
1028 | // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback) | ||
1029 | __get_rx_machine_lock(port); | ||
1030 | |||
1031 | // keep current State Machine state to compare later if it was changed | 1028 | // keep current State Machine state to compare later if it was changed |
1032 | last_state = port->sm_rx_state; | 1029 | last_state = port->sm_rx_state; |
1033 | 1030 | ||
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1133 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" | 1130 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" |
1134 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", | 1131 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", |
1135 | port->slave->dev->master->name, port->slave->dev->name); | 1132 | port->slave->dev->master->name, port->slave->dev->name); |
1136 | __release_rx_machine_lock(port); | ||
1137 | return; | 1133 | return; |
1138 | } | 1134 | } |
1139 | __update_selected(lacpdu, port); | 1135 | __update_selected(lacpdu, port); |
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1153 | break; | 1149 | break; |
1154 | } | 1150 | } |
1155 | } | 1151 | } |
1156 | __release_rx_machine_lock(port); | ||
1157 | } | 1152 | } |
1158 | 1153 | ||
1159 | /** | 1154 | /** |
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2155 | goto re_arm; | 2150 | goto re_arm; |
2156 | } | 2151 | } |
2157 | 2152 | ||
2153 | /* Lock around state machines to protect data accessed | ||
2154 | * by all (e.g., port->sm_vars). ad_rx_machine may run | ||
2155 | * concurrently due to incoming LACPDU. | ||
2156 | */ | ||
2157 | __get_state_machine_lock(port); | ||
2158 | |||
2158 | ad_rx_machine(NULL, port); | 2159 | ad_rx_machine(NULL, port); |
2159 | ad_periodic_machine(port); | 2160 | ad_periodic_machine(port); |
2160 | ad_port_selection_logic(port); | 2161 | ad_port_selection_logic(port); |
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2164 | // turn off the BEGIN bit, since we already handled it | 2165 | // turn off the BEGIN bit, since we already handled it |
2165 | if (port->sm_vars & AD_PORT_BEGIN) | 2166 | if (port->sm_vars & AD_PORT_BEGIN) |
2166 | port->sm_vars &= ~AD_PORT_BEGIN; | 2167 | port->sm_vars &= ~AD_PORT_BEGIN; |
2168 | |||
2169 | __release_state_machine_lock(port); | ||
2167 | } | 2170 | } |
2168 | 2171 | ||
2169 | re_arm: | 2172 | re_arm: |
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u | |||
2200 | case AD_TYPE_LACPDU: | 2203 | case AD_TYPE_LACPDU: |
2201 | pr_debug("Received LACPDU on port %d\n", | 2204 | pr_debug("Received LACPDU on port %d\n", |
2202 | port->actor_port_number); | 2205 | port->actor_port_number); |
2206 | /* Protect against concurrent state machines */ | ||
2207 | __get_state_machine_lock(port); | ||
2203 | ad_rx_machine(lacpdu, port); | 2208 | ad_rx_machine(lacpdu, port); |
2209 | __release_state_machine_lock(port); | ||
2204 | break; | 2210 | break; |
2205 | 2211 | ||
2206 | case AD_TYPE_MARKER: | 2212 | case AD_TYPE_MARKER: |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 2c46a154f2c6..b28baff70864 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -264,7 +264,8 @@ struct ad_bond_info { | |||
264 | struct ad_slave_info { | 264 | struct ad_slave_info { |
265 | struct aggregator aggregator; // 802.3ad aggregator structure | 265 | struct aggregator aggregator; // 802.3ad aggregator structure |
266 | struct port port; // 802.3ad port structure | 266 | struct port port; // 802.3ad port structure |
267 | spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt | 267 | spinlock_t state_machine_lock; /* mutex state machines vs. |
268 | incoming LACPDU */ | ||
268 | u16 id; | 269 | u16 id; |
269 | }; | 270 | }; |
270 | 271 | ||
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) | |||
940 | goto open_unlock; | 940 | goto open_unlock; |
941 | } | 941 | } |
942 | 942 | ||
943 | priv->wq = create_freezeable_workqueue("mcp251x_wq"); | 943 | priv->wq = create_freezable_workqueue("mcp251x_wq"); |
944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
946 | 946 | ||
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 27d1d398e25e..d38706958af6 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config CAN_MSCAN | 1 | config CAN_MSCAN |
2 | depends on CAN_DEV && (PPC || M68K || M68KNOMMU) | 2 | depends on CAN_DEV && (PPC || M68K) |
3 | tristate "Support for Freescale MSCAN based chips" | 3 | tristate "Support for Freescale MSCAN based chips" |
4 | ---help--- | 4 | ---help--- |
5 | The Motorola Scalable Controller Area Network (MSCAN) definition | 5 | The Motorola Scalable Controller Area Network (MSCAN) definition |
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c42e97268248..e54712b22c27 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c | |||
@@ -185,7 +185,7 @@ struct pch_can_priv { | |||
185 | 185 | ||
186 | static struct can_bittiming_const pch_can_bittiming_const = { | 186 | static struct can_bittiming_const pch_can_bittiming_const = { |
187 | .name = KBUILD_MODNAME, | 187 | .name = KBUILD_MODNAME, |
188 | .tseg1_min = 1, | 188 | .tseg1_min = 2, |
189 | .tseg1_max = 16, | 189 | .tseg1_max = 16, |
190 | .tseg2_min = 1, | 190 | .tseg2_min = 1, |
191 | .tseg2_max = 8, | 191 | .tseg2_max = 8, |
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev) | |||
959 | struct pch_can_priv *priv = netdev_priv(ndev); | 959 | struct pch_can_priv *priv = netdev_priv(ndev); |
960 | 960 | ||
961 | unregister_candev(priv->ndev); | 961 | unregister_candev(priv->ndev); |
962 | pci_iounmap(pdev, priv->regs); | ||
963 | if (priv->use_msi) | 962 | if (priv->use_msi) |
964 | pci_disable_msi(priv->dev); | 963 | pci_disable_msi(priv->dev); |
965 | pci_release_regions(pdev); | 964 | pci_release_regions(pdev); |
966 | pci_disable_device(pdev); | 965 | pci_disable_device(pdev); |
967 | pci_set_drvdata(pdev, NULL); | 966 | pci_set_drvdata(pdev, NULL); |
968 | pch_can_reset(priv); | 967 | pch_can_reset(priv); |
968 | pci_iounmap(pdev, priv->regs); | ||
969 | free_candev(priv->ndev); | 969 | free_candev(priv->ndev); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev, | |||
1238 | priv->use_msi = 0; | 1238 | priv->use_msi = 0; |
1239 | } else { | 1239 | } else { |
1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); | 1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); |
1241 | pci_set_master(pdev); | ||
1241 | priv->use_msi = 1; | 1242 | priv->use_msi = 1; |
1242 | } | 1243 | } |
1243 | 1244 | ||
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd90..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig | |||
@@ -18,7 +18,7 @@ config CAN_SOFTING | |||
18 | config CAN_SOFTING_CS | 18 | config CAN_SOFTING_CS |
19 | tristate "Softing Gmbh CAN pcmcia cards" | 19 | tristate "Softing Gmbh CAN pcmcia cards" |
20 | depends on PCMCIA | 20 | depends on PCMCIA |
21 | select CAN_SOFTING | 21 | depends on CAN_SOFTING |
22 | ---help--- | 22 | ---help--- |
23 | Support for PCMCIA cards from Softing Gmbh & some cards | 23 | Support for PCMCIA cards from Softing Gmbh & some cards |
24 | from Vector Gmbh. | 24 | from Vector Gmbh. |
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 300fe75dd1a7..c11bb4de8630 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <pcmcia/cistpl.h> | 24 | #include <pcmcia/cistpl.h> |
24 | #include <pcmcia/ds.h> | 25 | #include <pcmcia/ds.h> |
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5157e15e96eb..aeea9f9ff6e8 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c | |||
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = { | |||
633 | }; | 633 | }; |
634 | 634 | ||
635 | static const struct can_bittiming_const softing_btr_const = { | 635 | static const struct can_bittiming_const softing_btr_const = { |
636 | .name = "softing", | ||
636 | .tseg1_min = 1, | 637 | .tseg1_min = 1, |
637 | .tseg1_max = 16, | 638 | .tseg1_max = 16, |
638 | .tseg2_min = 1, | 639 | .tseg2_min = 1, |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 7ff170cbc7dc..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2761 | int kcqe_cnt; | 2761 | int kcqe_cnt; |
2762 | 2762 | ||
2763 | /* status block index must be read before reading other fields */ | ||
2764 | rmb(); | ||
2763 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2765 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2764 | 2766 | ||
2765 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { | 2767 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { |
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2770 | barrier(); | 2772 | barrier(); |
2771 | if (status_idx != *cp->kcq1.status_idx_ptr) { | 2773 | if (status_idx != *cp->kcq1.status_idx_ptr) { |
2772 | status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2774 | status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2775 | /* status block index must be read first */ | ||
2776 | rmb(); | ||
2773 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2777 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2774 | } else | 2778 | } else |
2775 | break; | 2779 | break; |
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2888 | u32 last_status = *info->status_idx_ptr; | 2892 | u32 last_status = *info->status_idx_ptr; |
2889 | int kcqe_cnt; | 2893 | int kcqe_cnt; |
2890 | 2894 | ||
2895 | /* status block index must be read before reading the KCQ */ | ||
2896 | rmb(); | ||
2891 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { | 2897 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { |
2892 | 2898 | ||
2893 | service_kcqes(dev, kcqe_cnt); | 2899 | service_kcqes(dev, kcqe_cnt); |
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2898 | break; | 2904 | break; |
2899 | 2905 | ||
2900 | last_status = *info->status_idx_ptr; | 2906 | last_status = *info->status_idx_ptr; |
2907 | /* status block index must be read before reading the KCQ */ | ||
2908 | rmb(); | ||
2901 | } | 2909 | } |
2902 | return last_status; | 2910 | return last_status; |
2903 | } | 2911 | } |
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
2906 | { | 2914 | { |
2907 | struct cnic_dev *dev = (struct cnic_dev *) data; | 2915 | struct cnic_dev *dev = (struct cnic_dev *) data; |
2908 | struct cnic_local *cp = dev->cnic_priv; | 2916 | struct cnic_local *cp = dev->cnic_priv; |
2909 | u32 status_idx; | 2917 | u32 status_idx, new_status_idx; |
2910 | 2918 | ||
2911 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | 2919 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) |
2912 | return; | 2920 | return; |
2913 | 2921 | ||
2914 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | 2922 | while (1) { |
2923 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | ||
2915 | 2924 | ||
2916 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2925 | CNIC_WR16(dev, cp->kcq1.io_addr, |
2926 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | ||
2917 | 2927 | ||
2918 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 2928 | if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { |
2919 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | 2929 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, |
2930 | status_idx, IGU_INT_ENABLE, 1); | ||
2931 | break; | ||
2932 | } | ||
2933 | |||
2934 | new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | ||
2935 | |||
2936 | if (new_status_idx != status_idx) | ||
2937 | continue; | ||
2920 | 2938 | ||
2921 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + | 2939 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + |
2922 | MAX_KCQ_IDX); | 2940 | MAX_KCQ_IDX); |
2923 | 2941 | ||
2924 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, | 2942 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, |
2925 | status_idx, IGU_INT_ENABLE, 1); | 2943 | status_idx, IGU_INT_ENABLE, 1); |
2926 | } else { | 2944 | |
2927 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2945 | break; |
2928 | status_idx, IGU_INT_ENABLE, 1); | ||
2929 | } | 2946 | } |
2930 | } | 2947 | } |
2931 | 2948 | ||
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2040 | { | 2040 | { |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | BUG_ON(adapter->debugfs_root == NULL); | 2043 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2044 | 2044 | ||
2045 | /* | 2045 | /* |
2046 | * Debugfs support is best effort. | 2046 | * Debugfs support is best effort. |
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2061 | */ | 2061 | */ |
2062 | static void cleanup_debugfs(struct adapter *adapter) | 2062 | static void cleanup_debugfs(struct adapter *adapter) |
2063 | { | 2063 | { |
2064 | BUG_ON(adapter->debugfs_root == NULL); | 2064 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2065 | 2065 | ||
2066 | /* | 2066 | /* |
2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2489 | struct net_device *netdev; | 2489 | struct net_device *netdev; |
2490 | 2490 | ||
2491 | /* | 2491 | /* |
2492 | * Vet our module parameters. | ||
2493 | */ | ||
2494 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2495 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2496 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2497 | MSI_MSI); | ||
2498 | err = -EINVAL; | ||
2499 | goto err_out; | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * Print our driver banner the first time we're called to initialize a | 2492 | * Print our driver banner the first time we're called to initialize a |
2504 | * device. | 2493 | * device. |
2505 | */ | 2494 | */ |
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2711 | /* | 2700 | /* |
2712 | * Set up our debugfs entries. | 2701 | * Set up our debugfs entries. |
2713 | */ | 2702 | */ |
2714 | if (cxgb4vf_debugfs_root) { | 2703 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2715 | adapter->debugfs_root = | 2704 | adapter->debugfs_root = |
2716 | debugfs_create_dir(pci_name(pdev), | 2705 | debugfs_create_dir(pci_name(pdev), |
2717 | cxgb4vf_debugfs_root); | 2706 | cxgb4vf_debugfs_root); |
2718 | if (adapter->debugfs_root == NULL) | 2707 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2719 | dev_warn(&pdev->dev, "could not create debugfs" | 2708 | dev_warn(&pdev->dev, "could not create debugfs" |
2720 | " directory"); | 2709 | " directory"); |
2721 | else | 2710 | else |
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2770 | */ | 2759 | */ |
2771 | 2760 | ||
2772 | err_free_debugfs: | 2761 | err_free_debugfs: |
2773 | if (adapter->debugfs_root) { | 2762 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2774 | cleanup_debugfs(adapter); | 2763 | cleanup_debugfs(adapter); |
2775 | debugfs_remove_recursive(adapter->debugfs_root); | 2764 | debugfs_remove_recursive(adapter->debugfs_root); |
2776 | } | 2765 | } |
@@ -2802,7 +2791,6 @@ err_release_regions: | |||
2802 | err_disable_device: | 2791 | err_disable_device: |
2803 | pci_disable_device(pdev); | 2792 | pci_disable_device(pdev); |
2804 | 2793 | ||
2805 | err_out: | ||
2806 | return err; | 2794 | return err; |
2807 | } | 2795 | } |
2808 | 2796 | ||
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2840 | /* | 2828 | /* |
2841 | * Tear down our debugfs entries. | 2829 | * Tear down our debugfs entries. |
2842 | */ | 2830 | */ |
2843 | if (adapter->debugfs_root) { | 2831 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2844 | cleanup_debugfs(adapter); | 2832 | cleanup_debugfs(adapter); |
2845 | debugfs_remove_recursive(adapter->debugfs_root); | 2833 | debugfs_remove_recursive(adapter->debugfs_root); |
2846 | } | 2834 | } |
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2874 | } | 2862 | } |
2875 | 2863 | ||
2876 | /* | 2864 | /* |
2865 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2866 | * delivery. | ||
2867 | */ | ||
2868 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2869 | { | ||
2870 | struct adapter *adapter; | ||
2871 | int pidx; | ||
2872 | |||
2873 | adapter = pci_get_drvdata(pdev); | ||
2874 | if (!adapter) | ||
2875 | return; | ||
2876 | |||
2877 | /* | ||
2878 | * Disable all Virtual Interfaces. This will shut down the | ||
2879 | * delivery of all ingress packets into the chip for these | ||
2880 | * Virtual Interfaces. | ||
2881 | */ | ||
2882 | for_each_port(adapter, pidx) { | ||
2883 | struct net_device *netdev; | ||
2884 | struct port_info *pi; | ||
2885 | |||
2886 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2887 | continue; | ||
2888 | |||
2889 | netdev = adapter->port[pidx]; | ||
2890 | if (!netdev) | ||
2891 | continue; | ||
2892 | |||
2893 | pi = netdev_priv(netdev); | ||
2894 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * Free up all Queues which will prevent further DMA and | ||
2899 | * Interrupts allowing various internal pathways to drain. | ||
2900 | */ | ||
2901 | t4vf_free_sge_resources(adapter); | ||
2902 | } | ||
2903 | |||
2904 | /* | ||
2877 | * PCI Device registration data structures. | 2905 | * PCI Device registration data structures. |
2878 | */ | 2906 | */ |
2879 | #define CH_DEVICE(devid, idx) \ | 2907 | #define CH_DEVICE(devid, idx) \ |
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2906 | .id_table = cxgb4vf_pci_tbl, | 2934 | .id_table = cxgb4vf_pci_tbl, |
2907 | .probe = cxgb4vf_pci_probe, | 2935 | .probe = cxgb4vf_pci_probe, |
2908 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2936 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2937 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2909 | }; | 2938 | }; |
2910 | 2939 | ||
2911 | /* | 2940 | /* |
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) | |||
2915 | { | 2944 | { |
2916 | int ret; | 2945 | int ret; |
2917 | 2946 | ||
2947 | /* | ||
2948 | * Vet our module parameters. | ||
2949 | */ | ||
2950 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2951 | printk(KERN_WARNING KBUILD_MODNAME | ||
2952 | ": bad module parameter msi=%d; must be %d" | ||
2953 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2954 | msi, MSI_MSIX, MSI_MSI); | ||
2955 | return -EINVAL; | ||
2956 | } | ||
2957 | |||
2918 | /* Debugfs support is optional, just warn if this fails */ | 2958 | /* Debugfs support is optional, just warn if this fails */ |
2919 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2959 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2920 | if (!cxgb4vf_debugfs_root) | 2960 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2921 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2961 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2922 | " debugfs entry, continuing\n"); | 2962 | " debugfs entry, continuing\n"); |
2923 | 2963 | ||
2924 | ret = pci_register_driver(&cxgb4vf_driver); | 2964 | ret = pci_register_driver(&cxgb4vf_driver); |
2925 | if (ret < 0) | 2965 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2926 | debugfs_remove(cxgb4vf_debugfs_root); | 2966 | debugfs_remove(cxgb4vf_debugfs_root); |
2927 | return ret; | 2967 | return ret; |
2928 | } | 2968 | } |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
171 | delay_idx = 0; | 171 | delay_idx = 0; |
172 | ms = delay[0]; | 172 | ms = delay[0]; |
173 | 173 | ||
174 | for (i = 0; i < 500; i += ms) { | 174 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
175 | if (sleep_ok) { | 175 | if (sleep_ok) { |
176 | ms = delay[delay_idx]; | 176 | ms = delay[delay_idx]; |
177 | if (delay_idx < ARRAY_SIZE(delay) - 1) | 177 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) | |||
1008 | int ret; | 1008 | int ret; |
1009 | 1009 | ||
1010 | /* free and bail if we are shutting down */ | 1010 | /* free and bail if we are shutting down */ |
1011 | if (unlikely(!netif_running(ndev))) { | 1011 | if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { |
1012 | dev_kfree_skb_any(skb); | 1012 | dev_kfree_skb_any(skb); |
1013 | return; | 1013 | return; |
1014 | } | 1014 | } |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) | |||
802 | /* Checksum mode */ | 802 | /* Checksum mode */ |
803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); | 803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); |
804 | 804 | ||
805 | /* GPIO0 on pre-activate PHY */ | ||
806 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
807 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 805 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
808 | iow(db, DM9000_GPR, 0); /* Enable PHY */ | ||
809 | 806 | ||
810 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 807 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
811 | 808 | ||
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) | |||
852 | unsigned long flags; | 849 | unsigned long flags; |
853 | 850 | ||
854 | /* Save previous register address */ | 851 | /* Save previous register address */ |
855 | reg_save = readb(db->io_addr); | ||
856 | spin_lock_irqsave(&db->lock, flags); | 852 | spin_lock_irqsave(&db->lock, flags); |
853 | reg_save = readb(db->io_addr); | ||
857 | 854 | ||
858 | netif_stop_queue(dev); | 855 | netif_stop_queue(dev); |
859 | dm9000_reset(db); | 856 | dm9000_reset(db); |
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) | |||
1194 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) | 1191 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) |
1195 | return -EAGAIN; | 1192 | return -EAGAIN; |
1196 | 1193 | ||
1194 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | ||
1195 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
1196 | mdelay(1); /* delay needs by DM9000B */ | ||
1197 | |||
1197 | /* Initialize DM9000 board */ | 1198 | /* Initialize DM9000 board */ |
1198 | dm9000_reset(db); | 1199 | dm9000_reset(db); |
1199 | dm9000_init_dm9000(dev); | 1200 | dm9000_init_dm9000(dev); |
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c | |||
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) | |||
337 | for (i = 0; i < PHY_MAX_ADDR; i++) | 337 | for (i = 0; i < PHY_MAX_ADDR; i++) |
338 | bp->mii_bus->irq[i] = PHY_POLL; | 338 | bp->mii_bus->irq[i] = PHY_POLL; |
339 | 339 | ||
340 | platform_set_drvdata(bp->dev, bp->mii_bus); | ||
341 | |||
342 | if (mdiobus_register(bp->mii_bus)) { | 340 | if (mdiobus_register(bp->mii_bus)) { |
343 | err = -ENXIO; | 341 | err = -ENXIO; |
344 | goto err_out_free_mdio_irq; | 342 | goto err_out_free_mdio_irq; |
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) | |||
863 | bp = netdev_priv(dev); | 861 | bp = netdev_priv(dev); |
864 | bp->dev = dev; | 862 | bp->dev = dev; |
865 | 863 | ||
864 | platform_set_drvdata(pdev, dev); | ||
866 | SET_NETDEV_DEV(dev, &pdev->dev); | 865 | SET_NETDEV_DEV(dev, &pdev->dev); |
867 | 866 | ||
868 | spin_lock_init(&bp->lock); | 867 | spin_lock_init(&bp->lock); |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index aed223b1b897..7501d977d992 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) | |||
124 | case M88E1000_I_PHY_ID: | 124 | case M88E1000_I_PHY_ID: |
125 | case M88E1011_I_PHY_ID: | 125 | case M88E1011_I_PHY_ID: |
126 | case M88E1111_I_PHY_ID: | 126 | case M88E1111_I_PHY_ID: |
127 | case M88E1118_E_PHY_ID: | ||
127 | hw->phy_type = e1000_phy_m88; | 128 | hw->phy_type = e1000_phy_m88; |
128 | break; | 129 | break; |
129 | case IGP01E1000_I_PHY_ID: | 130 | case IGP01E1000_I_PHY_ID: |
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3222 | break; | 3223 | break; |
3223 | case e1000_ce4100: | 3224 | case e1000_ce4100: |
3224 | if ((hw->phy_id == RTL8211B_PHY_ID) || | 3225 | if ((hw->phy_id == RTL8211B_PHY_ID) || |
3225 | (hw->phy_id == RTL8201N_PHY_ID)) | 3226 | (hw->phy_id == RTL8201N_PHY_ID) || |
3227 | (hw->phy_id == M88E1118_E_PHY_ID)) | ||
3226 | match = true; | 3228 | match = true; |
3227 | break; | 3229 | break; |
3228 | case e1000_82541: | 3230 | case e1000_82541: |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 196eeda2dd6c..c70b23d52284 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info { | |||
2917 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID | 2917 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID |
2918 | #define M88E1011_I_REV_4 0x04 | 2918 | #define M88E1011_I_REV_4 0x04 |
2919 | #define M88E1111_I_PHY_ID 0x01410CC0 | 2919 | #define M88E1111_I_PHY_ID 0x01410CC0 |
2920 | #define M88E1118_E_PHY_ID 0x01410E40 | ||
2920 | #define L1LXT971A_PHY_ID 0x001378E0 | 2921 | #define L1LXT971A_PHY_ID 0x001378E0 |
2921 | 2922 | ||
2922 | #define RTL8211B_PHY_ID 0x001CC910 | 2923 | #define RTL8211B_PHY_ID 0x001CC910 |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 55c1711f1688..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -42,7 +42,8 @@ | |||
42 | #define GBE_CONFIG_RAM_BASE \ | 42 | #define GBE_CONFIG_RAM_BASE \ |
43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) | 43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) |
44 | 44 | ||
45 | #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) | 45 | #define GBE_CONFIG_BASE_VIRT \ |
46 | ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) | ||
46 | 47 | ||
47 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ | 48 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ |
48 | (iowrite16_rep(base + offset, data, count)) | 49 | (iowrite16_rep(base + offset, data, count)) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 1c18f26b0812..2e5022849f18 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
937 | u16 phy_status, phy_1000t_status, phy_ext_status; | 937 | u16 phy_status, phy_1000t_status, phy_ext_status; |
938 | u16 pci_status; | 938 | u16 pci_status; |
939 | 939 | ||
940 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
941 | return; | ||
942 | |||
940 | e1e_rphy(hw, PHY_STATUS, &phy_status); | 943 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
941 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | 944 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
942 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | 945 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) | |||
1506 | struct e1000_adapter *adapter = container_of(work, | 1509 | struct e1000_adapter *adapter = container_of(work, |
1507 | struct e1000_adapter, downshift_task); | 1510 | struct e1000_adapter, downshift_task); |
1508 | 1511 | ||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1509 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | 1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); |
1510 | } | 1516 | } |
1511 | 1517 | ||
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3338 | return 0; | 3344 | return 0; |
3339 | } | 3345 | } |
3340 | 3346 | ||
3347 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3348 | { | ||
3349 | struct e1000_hw *hw = &adapter->hw; | ||
3350 | |||
3351 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3352 | return; | ||
3353 | |||
3354 | /* flush pending descriptor writebacks to memory */ | ||
3355 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3356 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3357 | |||
3358 | /* execute the writes immediately */ | ||
3359 | e1e_flush(); | ||
3360 | } | ||
3361 | |||
3341 | void e1000e_down(struct e1000_adapter *adapter) | 3362 | void e1000e_down(struct e1000_adapter *adapter) |
3342 | { | 3363 | { |
3343 | struct net_device *netdev = adapter->netdev; | 3364 | struct net_device *netdev = adapter->netdev; |
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3377 | 3398 | ||
3378 | if (!pci_channel_offline(adapter->pdev)) | 3399 | if (!pci_channel_offline(adapter->pdev)) |
3379 | e1000e_reset(adapter); | 3400 | e1000e_reset(adapter); |
3401 | |||
3402 | e1000e_flush_descriptors(adapter); | ||
3403 | |||
3380 | e1000_clean_tx_ring(adapter); | 3404 | e1000_clean_tx_ring(adapter); |
3381 | e1000_clean_rx_ring(adapter); | 3405 | e1000_clean_rx_ring(adapter); |
3382 | 3406 | ||
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3765 | { | 3789 | { |
3766 | struct e1000_adapter *adapter = container_of(work, | 3790 | struct e1000_adapter *adapter = container_of(work, |
3767 | struct e1000_adapter, update_phy_task); | 3791 | struct e1000_adapter, update_phy_task); |
3792 | |||
3793 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3794 | return; | ||
3795 | |||
3768 | e1000_get_phy_info(&adapter->hw); | 3796 | e1000_get_phy_info(&adapter->hw); |
3769 | } | 3797 | } |
3770 | 3798 | ||
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3775 | static void e1000_update_phy_info(unsigned long data) | 3803 | static void e1000_update_phy_info(unsigned long data) |
3776 | { | 3804 | { |
3777 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 3805 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
3806 | |||
3807 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3808 | return; | ||
3809 | |||
3778 | schedule_work(&adapter->update_phy_task); | 3810 | schedule_work(&adapter->update_phy_task); |
3779 | } | 3811 | } |
3780 | 3812 | ||
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4149 | u32 link, tctl; | 4181 | u32 link, tctl; |
4150 | int tx_pending = 0; | 4182 | int tx_pending = 0; |
4151 | 4183 | ||
4184 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4185 | return; | ||
4186 | |||
4152 | link = e1000e_has_link(adapter); | 4187 | link = e1000e_has_link(adapter); |
4153 | if ((netif_carrier_ok(netdev)) && link) { | 4188 | if ((netif_carrier_ok(netdev)) && link) { |
4154 | /* Cancel scheduled suspend requests. */ | 4189 | /* Cancel scheduled suspend requests. */ |
@@ -4309,7 +4344,6 @@ link_up: | |||
4309 | * to get done, so reset controller to flush Tx. | 4344 | * to get done, so reset controller to flush Tx. |
4310 | * (Do the reset outside of interrupt context). | 4345 | * (Do the reset outside of interrupt context). |
4311 | */ | 4346 | */ |
4312 | adapter->tx_timeout_count++; | ||
4313 | schedule_work(&adapter->reset_task); | 4347 | schedule_work(&adapter->reset_task); |
4314 | /* return immediately since reset is imminent */ | 4348 | /* return immediately since reset is imminent */ |
4315 | return; | 4349 | return; |
@@ -4338,19 +4372,12 @@ link_up: | |||
4338 | else | 4372 | else |
4339 | ew32(ICS, E1000_ICS_RXDMT0); | 4373 | ew32(ICS, E1000_ICS_RXDMT0); |
4340 | 4374 | ||
4375 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4376 | e1000e_flush_descriptors(adapter); | ||
4377 | |||
4341 | /* Force detection of hung controller every watchdog period */ | 4378 | /* Force detection of hung controller every watchdog period */ |
4342 | adapter->detect_tx_hung = 1; | 4379 | adapter->detect_tx_hung = 1; |
4343 | 4380 | ||
4344 | /* flush partial descriptors to memory before detecting Tx hang */ | ||
4345 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
4346 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
4347 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
4348 | /* | ||
4349 | * no need to flush the writes because the timeout code does | ||
4350 | * an er32 first thing | ||
4351 | */ | ||
4352 | } | ||
4353 | |||
4354 | /* | 4381 | /* |
4355 | * With 82571 controllers, LAA may be overwritten due to controller | 4382 | * With 82571 controllers, LAA may be overwritten due to controller |
4356 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4383 | * reset from the other port. Set the appropriate LAA in RAR[0] |
@@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) | |||
4888 | struct e1000_adapter *adapter; | 4915 | struct e1000_adapter *adapter; |
4889 | adapter = container_of(work, struct e1000_adapter, reset_task); | 4916 | adapter = container_of(work, struct e1000_adapter, reset_task); |
4890 | 4917 | ||
4918 | /* don't run the task if already down */ | ||
4919 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4920 | return; | ||
4921 | |||
4891 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | 4922 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && |
4892 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | 4923 | (adapter->flags & FLAG_RX_RESTART_NOW))) { |
4893 | e1000e_dump(adapter); | 4924 | e1000e_dump(adapter); |
@@ -5936,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5936 | /* APME bit in EEPROM is mapped to WUC.APME */ | 5967 | /* APME bit in EEPROM is mapped to WUC.APME */ |
5937 | eeprom_data = er32(WUC); | 5968 | eeprom_data = er32(WUC); |
5938 | eeprom_apme_mask = E1000_WUC_APME; | 5969 | eeprom_apme_mask = E1000_WUC_APME; |
5939 | if (eeprom_data & E1000_WUC_PHY_WAKE) | 5970 | if ((hw->mac.type > e1000_ich10lan) && |
5971 | (eeprom_data & E1000_WUC_PHY_WAKE)) | ||
5940 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; | 5972 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; |
5941 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | 5973 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { |
5942 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | 5974 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2a71373719ae..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = { | |||
74 | }, { | 74 | }, { |
75 | .name = "imx28-fec", | 75 | .name = "imx28-fec", |
76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | 76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, |
77 | } | 77 | }, |
78 | { } | ||
78 | }; | 79 | }; |
79 | 80 | ||
80 | static unsigned char macaddr[ETH_ALEN]; | 81 | static unsigned char macaddr[ETH_ALEN]; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5645 | goto out_error; | 5645 | goto out_error; |
5646 | } | 5646 | } |
5647 | 5647 | ||
5648 | netif_carrier_off(dev); | ||
5649 | |||
5648 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", | 5650 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", |
5649 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); | 5651 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
5650 | 5652 | ||
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
220 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 220 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
221 | * unless there are workarounds that change this. | 221 | * unless there are workarounds that change this. |
222 | **/ | 222 | **/ |
223 | void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
224 | u8 *mc_addr_list, u32 mc_addr_count, | 224 | u8 *mc_addr_list, u32 mc_addr_count, |
225 | u32 rar_used_count, u32 rar_count) | 225 | u32 rar_used_count, u32 rar_count) |
226 | { | 226 | { |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index d5ede2df3e42..ebbda7d15254 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) | |||
1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); | 1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); |
1371 | 1371 | ||
1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1373 | |||
1374 | /* clear VMDq pool/queue selection for RAR 0 */ | ||
1375 | hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); | ||
1373 | } | 1376 | } |
1374 | hw->addr_ctrl.overflow_promisc = 0; | 1377 | hw->addr_ctrl.overflow_promisc = 0; |
1375 | 1378 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 6342d4859790..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
159 | struct scatterlist *sg; | 159 | struct scatterlist *sg; |
160 | unsigned int i, j, dmacount; | 160 | unsigned int i, j, dmacount; |
161 | unsigned int len; | 161 | unsigned int len; |
162 | static const unsigned int bufflen = 4096; | 162 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
163 | unsigned int firstoff = 0; | 163 | unsigned int firstoff = 0; |
164 | unsigned int lastsize; | 164 | unsigned int lastsize; |
165 | unsigned int thisoff = 0; | 165 | unsigned int thisoff = 0; |
166 | unsigned int thislen = 0; | 166 | unsigned int thislen = 0; |
167 | u32 fcbuff, fcdmarw, fcfltrw; | 167 | u32 fcbuff, fcdmarw, fcfltrw; |
168 | dma_addr_t addr; | 168 | dma_addr_t addr = 0; |
169 | 169 | ||
170 | if (!netdev || !sgl) | 170 | if (!netdev || !sgl) |
171 | return 0; | 171 | return 0; |
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
254 | /* only the last buffer may have non-full bufflen */ | 254 | /* only the last buffer may have non-full bufflen */ |
255 | lastsize = thisoff + thislen; | 255 | lastsize = thisoff + thislen; |
256 | 256 | ||
257 | /* | ||
258 | * lastsize can not be buffer len. | ||
259 | * If it is then adding another buffer with lastsize = 1. | ||
260 | */ | ||
261 | if (lastsize == bufflen) { | ||
262 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
263 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
264 | "not enough user buffers. We need an extra " | ||
265 | "buffer because lastsize is bufflen.\n", | ||
266 | xid, i, j, dmacount, (u64)addr); | ||
267 | goto out_noddp_free; | ||
268 | } | ||
269 | |||
270 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
271 | j++; | ||
272 | lastsize = 1; | ||
273 | } | ||
274 | |||
257 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 275 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
258 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 276 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
259 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 277 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
532 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 550 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
533 | 551 | ||
534 | spin_lock_init(&fcoe->lock); | 552 | spin_lock_init(&fcoe->lock); |
553 | |||
554 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
555 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
556 | if (fcoe->extra_ddp_buffer == NULL) { | ||
557 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
558 | goto out_extra_ddp_buffer_alloc; | ||
559 | } | ||
560 | |||
561 | fcoe->extra_ddp_buffer_dma = | ||
562 | dma_map_single(&adapter->pdev->dev, | ||
563 | fcoe->extra_ddp_buffer, | ||
564 | IXGBE_FCBUFF_MIN, | ||
565 | DMA_FROM_DEVICE); | ||
566 | if (dma_mapping_error(&adapter->pdev->dev, | ||
567 | fcoe->extra_ddp_buffer_dma)) { | ||
568 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
569 | goto out_extra_ddp_buffer_dma; | ||
570 | } | ||
535 | } | 571 | } |
536 | 572 | ||
537 | /* Enable L2 eth type filter for FCoE */ | 573 | /* Enable L2 eth type filter for FCoE */ |
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
581 | } | 617 | } |
582 | } | 618 | } |
583 | #endif | 619 | #endif |
620 | |||
621 | return; | ||
622 | |||
623 | out_extra_ddp_buffer_dma: | ||
624 | kfree(fcoe->extra_ddp_buffer); | ||
625 | out_extra_ddp_buffer_alloc: | ||
626 | pci_pool_destroy(fcoe->pool); | ||
627 | fcoe->pool = NULL; | ||
584 | } | 628 | } |
585 | 629 | ||
586 | /** | 630 | /** |
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
600 | if (fcoe->pool) { | 644 | if (fcoe->pool) { |
601 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 645 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
602 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 646 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
647 | dma_unmap_single(&adapter->pdev->dev, | ||
648 | fcoe->extra_ddp_buffer_dma, | ||
649 | IXGBE_FCBUFF_MIN, | ||
650 | DMA_FROM_DEVICE); | ||
651 | kfree(fcoe->extra_ddp_buffer); | ||
603 | pci_pool_destroy(fcoe->pool); | 652 | pci_pool_destroy(fcoe->pool); |
604 | fcoe->pool = NULL; | 653 | fcoe->pool = NULL; |
605 | } | 654 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h | |||
@@ -70,6 +70,8 @@ struct ixgbe_fcoe { | |||
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct pci_pool *pool; | 71 | struct pci_pool *pool; |
72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; | 72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
73 | unsigned char *extra_ddp_buffer; | ||
74 | dma_addr_t extra_ddp_buffer_dma; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #endif /* _IXGBE_FCOE_H */ | 77 | #endif /* _IXGBE_FCOE_H */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 602078b84892..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
52 | static const char ixgbe_driver_string[] = | 52 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
54 | 54 | ||
55 | #define DRV_VERSION "3.0.12-k2" | 55 | #define DRV_VERSION "3.2.9-k2" |
56 | const char ixgbe_driver_version[] = DRV_VERSION; | 56 | const char ixgbe_driver_version[] = DRV_VERSION; |
57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
58 | 58 | ||
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
3176 | u32 mhadd, hlreg0; | 3176 | u32 mhadd, hlreg0; |
3177 | 3177 | ||
3178 | /* Decide whether to use packet split mode or not */ | 3178 | /* Decide whether to use packet split mode or not */ |
3179 | /* On by default */ | ||
3180 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
3181 | |||
3179 | /* Do not use packet split if we're in SR-IOV Mode */ | 3182 | /* Do not use packet split if we're in SR-IOV Mode */ |
3180 | if (!adapter->num_vfs) | 3183 | if (adapter->num_vfs) |
3181 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 3184 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; |
3185 | |||
3186 | /* Disable packet split due to 82599 erratum #45 */ | ||
3187 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3188 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
3182 | 3189 | ||
3183 | /* Set the RX buffer length according to the mode */ | 3190 | /* Set the RX buffer length according to the mode */ |
3184 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 3191 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |||
3721 | * We need to try and force an autonegotiation | 3728 | * We need to try and force an autonegotiation |
3722 | * session, then bring up link. | 3729 | * session, then bring up link. |
3723 | */ | 3730 | */ |
3724 | hw->mac.ops.setup_sfp(hw); | 3731 | if (hw->mac.ops.setup_sfp) |
3732 | hw->mac.ops.setup_sfp(hw); | ||
3725 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 3733 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
3726 | schedule_work(&adapter->multispeed_fiber_task); | 3734 | schedule_work(&adapter->multispeed_fiber_task); |
3727 | } else { | 3735 | } else { |
@@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4863 | { | 4871 | { |
4864 | int q_idx, num_q_vectors; | 4872 | int q_idx, num_q_vectors; |
4865 | struct ixgbe_q_vector *q_vector; | 4873 | struct ixgbe_q_vector *q_vector; |
4866 | int napi_vectors; | ||
4867 | int (*poll)(struct napi_struct *, int); | 4874 | int (*poll)(struct napi_struct *, int); |
4868 | 4875 | ||
4869 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 4876 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
4870 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4877 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4871 | napi_vectors = adapter->num_rx_queues; | ||
4872 | poll = &ixgbe_clean_rxtx_many; | 4878 | poll = &ixgbe_clean_rxtx_many; |
4873 | } else { | 4879 | } else { |
4874 | num_q_vectors = 1; | 4880 | num_q_vectors = 1; |
4875 | napi_vectors = 1; | ||
4876 | poll = &ixgbe_poll; | 4881 | poll = &ixgbe_poll; |
4877 | } | 4882 | } |
4878 | 4883 | ||
@@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5964 | unregister_netdev(adapter->netdev); | 5969 | unregister_netdev(adapter->netdev); |
5965 | return; | 5970 | return; |
5966 | } | 5971 | } |
5967 | hw->mac.ops.setup_sfp(hw); | 5972 | if (hw->mac.ops.setup_sfp) |
5973 | hw->mac.ops.setup_sfp(hw); | ||
5968 | 5974 | ||
5969 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 5975 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
5970 | /* This will also work for DA Twinax connections */ | 5976 | /* This will also work for DA Twinax connections */ |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 47b15738b009..187b3a16ec1f 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, | |||
110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | 110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
111 | } | 111 | } |
112 | 112 | ||
113 | |||
114 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) | 113 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
115 | { | 114 | { |
116 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | 115 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
117 | vmolr |= (IXGBE_VMOLR_ROMPE | | 116 | vmolr |= (IXGBE_VMOLR_ROMPE | |
118 | IXGBE_VMOLR_ROPE | | ||
119 | IXGBE_VMOLR_BAM); | 117 | IXGBE_VMOLR_BAM); |
120 | if (aupe) | 118 | if (aupe) |
121 | vmolr |= IXGBE_VMOLR_AUPE; | 119 | vmolr |= IXGBE_VMOLR_AUPE; |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 3a8923993ce3..f2518b01067d 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | 136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); |
137 | IXGBE_WRITE_FLUSH(hw); | 137 | IXGBE_WRITE_FLUSH(hw); |
138 | 138 | ||
139 | /* Poll for reset bit to self-clear indicating reset is complete */ | 139 | /* Poll for reset bit to self-clear indicating reset is complete */ |
140 | for (i = 0; i < 10; i++) { | 140 | for (i = 0; i < 10; i++) { |
141 | udelay(1); | 141 | udelay(1); |
142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
143 | if (!(ctrl & IXGBE_CTRL_RST)) | 143 | if (!(ctrl & reset_bit)) |
144 | break; | 144 | break; |
145 | } | 145 | } |
146 | if (ctrl & IXGBE_CTRL_RST) { | 146 | if (ctrl & reset_bit) { |
147 | status = IXGBE_ERR_RESET_FAILED; | 147 | status = IXGBE_ERR_RESET_FAILED; |
148 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 148 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
149 | } | 149 | } |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) | |||
260 | for (i = 0; i < PHY_MAX_ADDR; i++) | 260 | for (i = 0; i < PHY_MAX_ADDR; i++) |
261 | bp->mii_bus->irq[i] = PHY_POLL; | 261 | bp->mii_bus->irq[i] = PHY_POLL; |
262 | 262 | ||
263 | platform_set_drvdata(bp->dev, bp->mii_bus); | 263 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
264 | 264 | ||
265 | if (mdiobus_register(bp->mii_bus)) | 265 | if (mdiobus_register(bp->mii_bus)) |
266 | goto err_out_free_mdio_irq; | 266 | goto err_out_free_mdio_irq; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5933621ac3ff..fc27a9926d9e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, | |||
528 | vnet_hdr_len = q->vnet_hdr_sz; | 528 | vnet_hdr_len = q->vnet_hdr_sz; |
529 | 529 | ||
530 | err = -EINVAL; | 530 | err = -EINVAL; |
531 | if ((len -= vnet_hdr_len) < 0) | 531 | if (len < vnet_hdr_len) |
532 | goto err; | 532 | goto err; |
533 | len -= vnet_hdr_len; | ||
533 | 534 | ||
534 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, | 535 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, |
535 | sizeof(vnet_hdr)); | 536 | sizeof(vnet_hdr)); |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -73,7 +73,7 @@ struct pch_gbe_regs { | |||
73 | struct pch_gbe_regs_mac_adr mac_adr[16]; | 73 | struct pch_gbe_regs_mac_adr mac_adr[16]; |
74 | u32 ADDR_MASK; | 74 | u32 ADDR_MASK; |
75 | u32 MIIM; | 75 | u32 MIIM; |
76 | u32 reserve2; | 76 | u32 MAC_ADDR_LOAD; |
77 | u32 RGMII_ST; | 77 | u32 RGMII_ST; |
78 | u32 RGMII_CTRL; | 78 | u32 RGMII_CTRL; |
79 | u32 reserve3[3]; | 79 | u32 reserve3[3]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 1bf12339441b..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
29 | #define PCH_GBE_SHORT_PKT 64 | 29 | #define PCH_GBE_SHORT_PKT 64 |
30 | #define DSC_INIT16 0xC000 | 30 | #define DSC_INIT16 0xC000 |
31 | #define PCH_GBE_DMA_ALIGN 0 | 31 | #define PCH_GBE_DMA_ALIGN 0 |
32 | #define PCH_GBE_DMA_PADDING 2 | ||
32 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
33 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
34 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | |||
88 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); | 89 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); |
89 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, | 90 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, |
90 | int data); | 91 | int data); |
92 | |||
93 | inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) | ||
94 | { | ||
95 | iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); | ||
96 | } | ||
97 | |||
91 | /** | 98 | /** |
92 | * pch_gbe_mac_read_mac_addr - Read MAC address | 99 | * pch_gbe_mac_read_mac_addr - Read MAC address |
93 | * @hw: Pointer to the HW structure | 100 | * @hw: Pointer to the HW structure |
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
519 | struct pch_gbe_adapter *adapter; | 526 | struct pch_gbe_adapter *adapter; |
520 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); | 527 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); |
521 | 528 | ||
529 | rtnl_lock(); | ||
522 | pch_gbe_reinit_locked(adapter); | 530 | pch_gbe_reinit_locked(adapter); |
531 | rtnl_unlock(); | ||
523 | } | 532 | } |
524 | 533 | ||
525 | /** | 534 | /** |
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
528 | */ | 537 | */ |
529 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) | 538 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) |
530 | { | 539 | { |
531 | struct net_device *netdev = adapter->netdev; | 540 | pch_gbe_down(adapter); |
532 | 541 | pch_gbe_up(adapter); | |
533 | rtnl_lock(); | ||
534 | if (netif_running(netdev)) { | ||
535 | pch_gbe_down(adapter); | ||
536 | pch_gbe_up(adapter); | ||
537 | } | ||
538 | rtnl_unlock(); | ||
539 | } | 542 | } |
540 | 543 | ||
541 | /** | 544 | /** |
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1369 | struct pch_gbe_buffer *buffer_info; | 1372 | struct pch_gbe_buffer *buffer_info; |
1370 | struct pch_gbe_rx_desc *rx_desc; | 1373 | struct pch_gbe_rx_desc *rx_desc; |
1371 | u32 length; | 1374 | u32 length; |
1372 | unsigned char tmp_packet[ETH_HLEN]; | ||
1373 | unsigned int i; | 1375 | unsigned int i; |
1374 | unsigned int cleaned_count = 0; | 1376 | unsigned int cleaned_count = 0; |
1375 | bool cleaned = false; | 1377 | bool cleaned = false; |
1376 | struct sk_buff *skb; | 1378 | struct sk_buff *skb, *new_skb; |
1377 | u8 dma_status; | 1379 | u8 dma_status; |
1378 | u16 gbec_status; | 1380 | u16 gbec_status; |
1379 | u32 tcp_ip_status; | 1381 | u32 tcp_ip_status; |
1380 | u8 skb_copy_flag = 0; | ||
1381 | u8 skb_padding_flag = 0; | ||
1382 | 1382 | ||
1383 | i = rx_ring->next_to_clean; | 1383 | i = rx_ring->next_to_clean; |
1384 | 1384 | ||
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1422 | pr_err("Receive CRC Error\n"); | 1422 | pr_err("Receive CRC Error\n"); |
1423 | } else { | 1423 | } else { |
1424 | /* get receive length */ | 1424 | /* get receive length */ |
1425 | /* length convert[-3], padding[-2] */ | 1425 | /* length convert[-3] */ |
1426 | length = (rx_desc->rx_words_eob) - 3 - 2; | 1426 | length = (rx_desc->rx_words_eob) - 3; |
1427 | 1427 | ||
1428 | /* Decide the data conversion method */ | 1428 | /* Decide the data conversion method */ |
1429 | if (!adapter->rx_csum) { | 1429 | if (!adapter->rx_csum) { |
1430 | /* [Header:14][payload] */ | 1430 | /* [Header:14][payload] */ |
1431 | skb_padding_flag = 0; | 1431 | if (NET_IP_ALIGN) { |
1432 | skb_copy_flag = 1; | 1432 | /* Because alignment differs, |
1433 | * the new_skb is newly allocated, | ||
1434 | * and data is copied to new_skb.*/ | ||
1435 | new_skb = netdev_alloc_skb(netdev, | ||
1436 | length + NET_IP_ALIGN); | ||
1437 | if (!new_skb) { | ||
1438 | /* dorrop error */ | ||
1439 | pr_err("New skb allocation " | ||
1440 | "Error\n"); | ||
1441 | goto dorrop; | ||
1442 | } | ||
1443 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1444 | memcpy(new_skb->data, skb->data, | ||
1445 | length); | ||
1446 | skb = new_skb; | ||
1447 | } else { | ||
1448 | /* DMA buffer is used as SKB as it is.*/ | ||
1449 | buffer_info->skb = NULL; | ||
1450 | } | ||
1433 | } else { | 1451 | } else { |
1434 | /* [Header:14][padding:2][payload] */ | 1452 | /* [Header:14][padding:2][payload] */ |
1435 | skb_padding_flag = 1; | 1453 | /* The length includes padding length */ |
1436 | if (length < copybreak) | 1454 | length = length - PCH_GBE_DMA_PADDING; |
1437 | skb_copy_flag = 1; | 1455 | if ((length < copybreak) || |
1438 | else | 1456 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { |
1439 | skb_copy_flag = 0; | 1457 | /* Because alignment differs, |
1440 | } | 1458 | * the new_skb is newly allocated, |
1441 | 1459 | * and data is copied to new_skb. | |
1442 | /* Data conversion */ | 1460 | * Padding data is deleted |
1443 | if (skb_copy_flag) { /* recycle skb */ | 1461 | * at the time of a copy.*/ |
1444 | struct sk_buff *new_skb; | 1462 | new_skb = netdev_alloc_skb(netdev, |
1445 | new_skb = | 1463 | length + NET_IP_ALIGN); |
1446 | netdev_alloc_skb(netdev, | 1464 | if (!new_skb) { |
1447 | length + NET_IP_ALIGN); | 1465 | /* dorrop error */ |
1448 | if (new_skb) { | 1466 | pr_err("New skb allocation " |
1449 | if (!skb_padding_flag) { | 1467 | "Error\n"); |
1450 | skb_reserve(new_skb, | 1468 | goto dorrop; |
1451 | NET_IP_ALIGN); | ||
1452 | } | 1469 | } |
1470 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1453 | memcpy(new_skb->data, skb->data, | 1471 | memcpy(new_skb->data, skb->data, |
1454 | length); | 1472 | ETH_HLEN); |
1455 | /* save the skb | 1473 | memcpy(&new_skb->data[ETH_HLEN], |
1456 | * in buffer_info as good */ | 1474 | &skb->data[ETH_HLEN + |
1475 | PCH_GBE_DMA_PADDING], | ||
1476 | length - ETH_HLEN); | ||
1457 | skb = new_skb; | 1477 | skb = new_skb; |
1458 | } else if (!skb_padding_flag) { | 1478 | } else { |
1459 | /* dorrop error */ | 1479 | /* Padding data is deleted |
1460 | pr_err("New skb allocation Error\n"); | 1480 | * by moving header data.*/ |
1461 | goto dorrop; | 1481 | memmove(&skb->data[PCH_GBE_DMA_PADDING], |
1482 | &skb->data[0], ETH_HLEN); | ||
1483 | skb_reserve(skb, NET_IP_ALIGN); | ||
1484 | buffer_info->skb = NULL; | ||
1462 | } | 1485 | } |
1463 | } else { | ||
1464 | buffer_info->skb = NULL; | ||
1465 | } | 1486 | } |
1466 | if (skb_padding_flag) { | 1487 | /* The length includes FCS length */ |
1467 | memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); | 1488 | length = length - ETH_FCS_LEN; |
1468 | memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], | ||
1469 | ETH_HLEN); | ||
1470 | skb_reserve(skb, NET_IP_ALIGN); | ||
1471 | |||
1472 | } | ||
1473 | |||
1474 | /* update status of driver */ | 1489 | /* update status of driver */ |
1475 | adapter->stats.rx_bytes += length; | 1490 | adapter->stats.rx_bytes += length; |
1476 | adapter->stats.rx_packets++; | 1491 | adapter->stats.rx_packets++; |
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, | |||
2322 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 2337 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
2323 | pch_gbe_set_ethtool_ops(netdev); | 2338 | pch_gbe_set_ethtool_ops(netdev); |
2324 | 2339 | ||
2340 | pch_gbe_mac_load_mac_addr(&adapter->hw); | ||
2325 | pch_gbe_mac_reset_hw(&adapter->hw); | 2341 | pch_gbe_mac_reset_hw(&adapter->hw); |
2326 | 2342 | ||
2327 | /* setup the private structure */ | 2343 | /* setup the private structure */ |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | 692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), |
693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), | ||
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 695 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
695 | PCMCIA_DEVICE_NULL, | 696 | PCMCIA_DEVICE_NULL, |
696 | }; | 697 | }; |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 27e6f6d43cac..e3ebd90ae651 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -49,8 +49,8 @@ | |||
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | 50 | ||
51 | #define DRV_NAME "r6040" | 51 | #define DRV_NAME "r6040" |
52 | #define DRV_VERSION "0.26" | 52 | #define DRV_VERSION "0.27" |
53 | #define DRV_RELDATE "30May2010" | 53 | #define DRV_RELDATE "23Feb2011" |
54 | 54 | ||
55 | /* PHY CHIP Address */ | 55 | /* PHY CHIP Address */ |
56 | #define PHY1_ADDR 1 /* For MAC1 */ | 56 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -69,6 +69,8 @@ | |||
69 | 69 | ||
70 | /* MAC registers */ | 70 | /* MAC registers */ |
71 | #define MCR0 0x00 /* Control register 0 */ | 71 | #define MCR0 0x00 /* Control register 0 */ |
72 | #define MCR0_PROMISC 0x0020 /* Promiscuous mode */ | ||
73 | #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ | ||
72 | #define MCR1 0x04 /* Control register 1 */ | 74 | #define MCR1 0x04 /* Control register 1 */ |
73 | #define MAC_RST 0x0001 /* Reset the MAC */ | 75 | #define MAC_RST 0x0001 /* Reset the MAC */ |
74 | #define MBCR 0x08 /* Bus control */ | 76 | #define MBCR 0x08 /* Bus control */ |
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev) | |||
851 | { | 853 | { |
852 | struct r6040_private *lp = netdev_priv(dev); | 854 | struct r6040_private *lp = netdev_priv(dev); |
853 | void __iomem *ioaddr = lp->base; | 855 | void __iomem *ioaddr = lp->base; |
854 | u16 *adrp; | ||
855 | u16 reg; | ||
856 | unsigned long flags; | 856 | unsigned long flags; |
857 | struct netdev_hw_addr *ha; | 857 | struct netdev_hw_addr *ha; |
858 | int i; | 858 | int i; |
859 | u16 *adrp; | ||
860 | u16 hash_table[4] = { 0 }; | ||
861 | |||
862 | spin_lock_irqsave(&lp->lock, flags); | ||
859 | 863 | ||
860 | /* MAC Address */ | 864 | /* Keep our MAC Address */ |
861 | adrp = (u16 *)dev->dev_addr; | 865 | adrp = (u16 *)dev->dev_addr; |
862 | iowrite16(adrp[0], ioaddr + MID_0L); | 866 | iowrite16(adrp[0], ioaddr + MID_0L); |
863 | iowrite16(adrp[1], ioaddr + MID_0M); | 867 | iowrite16(adrp[1], ioaddr + MID_0M); |
864 | iowrite16(adrp[2], ioaddr + MID_0H); | 868 | iowrite16(adrp[2], ioaddr + MID_0H); |
865 | 869 | ||
866 | /* Promiscous Mode */ | ||
867 | spin_lock_irqsave(&lp->lock, flags); | ||
868 | |||
869 | /* Clear AMCP & PROM bits */ | 870 | /* Clear AMCP & PROM bits */ |
870 | reg = ioread16(ioaddr) & ~0x0120; | 871 | lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); |
871 | if (dev->flags & IFF_PROMISC) { | ||
872 | reg |= 0x0020; | ||
873 | lp->mcr0 |= 0x0020; | ||
874 | } | ||
875 | /* Too many multicast addresses | ||
876 | * accept all traffic */ | ||
877 | else if ((netdev_mc_count(dev) > MCAST_MAX) || | ||
878 | (dev->flags & IFF_ALLMULTI)) | ||
879 | reg |= 0x0020; | ||
880 | 872 | ||
881 | iowrite16(reg, ioaddr); | 873 | /* Promiscuous mode */ |
882 | spin_unlock_irqrestore(&lp->lock, flags); | 874 | if (dev->flags & IFF_PROMISC) |
875 | lp->mcr0 |= MCR0_PROMISC; | ||
883 | 876 | ||
884 | /* Build the hash table */ | 877 | /* Enable multicast hash table function to |
885 | if (netdev_mc_count(dev) > MCAST_MAX) { | 878 | * receive all multicast packets. */ |
886 | u16 hash_table[4]; | 879 | else if (dev->flags & IFF_ALLMULTI) { |
887 | u32 crc; | 880 | lp->mcr0 |= MCR0_HASH_EN; |
888 | 881 | ||
889 | for (i = 0; i < 4; i++) | 882 | for (i = 0; i < MCAST_MAX ; i++) { |
890 | hash_table[i] = 0; | 883 | iowrite16(0, ioaddr + MID_1L + 8 * i); |
884 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
885 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
886 | } | ||
891 | 887 | ||
888 | for (i = 0; i < 4; i++) | ||
889 | hash_table[i] = 0xffff; | ||
890 | } | ||
891 | /* Use internal multicast address registers if the number of | ||
892 | * multicast addresses is not greater than MCAST_MAX. */ | ||
893 | else if (netdev_mc_count(dev) <= MCAST_MAX) { | ||
894 | i = 0; | ||
892 | netdev_for_each_mc_addr(ha, dev) { | 895 | netdev_for_each_mc_addr(ha, dev) { |
893 | char *addrs = ha->addr; | 896 | u16 *adrp = (u16 *) ha->addr; |
897 | iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); | ||
898 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | ||
899 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | ||
900 | i++; | ||
901 | } | ||
902 | while (i < MCAST_MAX) { | ||
903 | iowrite16(0, ioaddr + MID_1L + 8 * i); | ||
904 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
905 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
906 | i++; | ||
907 | } | ||
908 | } | ||
909 | /* Otherwise, Enable multicast hash table function. */ | ||
910 | else { | ||
911 | u32 crc; | ||
894 | 912 | ||
895 | if (!(*addrs & 1)) | 913 | lp->mcr0 |= MCR0_HASH_EN; |
896 | continue; | 914 | |
915 | for (i = 0; i < MCAST_MAX ; i++) { | ||
916 | iowrite16(0, ioaddr + MID_1L + 8 * i); | ||
917 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
918 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
919 | } | ||
897 | 920 | ||
898 | crc = ether_crc_le(6, addrs); | 921 | /* Build multicast hash table */ |
922 | netdev_for_each_mc_addr(ha, dev) { | ||
923 | u8 *addrs = ha->addr; | ||
924 | |||
925 | crc = ether_crc(ETH_ALEN, addrs); | ||
899 | crc >>= 26; | 926 | crc >>= 26; |
900 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 927 | hash_table[crc >> 4] |= 1 << (crc & 0xf); |
901 | } | 928 | } |
902 | /* Fill the MAC hash tables with their values */ | 929 | } |
930 | |||
931 | iowrite16(lp->mcr0, ioaddr + MCR0); | ||
932 | |||
933 | /* Fill the MAC hash tables with their values */ | ||
934 | if (lp->mcr0 && MCR0_HASH_EN) { | ||
903 | iowrite16(hash_table[0], ioaddr + MAR0); | 935 | iowrite16(hash_table[0], ioaddr + MAR0); |
904 | iowrite16(hash_table[1], ioaddr + MAR1); | 936 | iowrite16(hash_table[1], ioaddr + MAR1); |
905 | iowrite16(hash_table[2], ioaddr + MAR2); | 937 | iowrite16(hash_table[2], ioaddr + MAR2); |
906 | iowrite16(hash_table[3], ioaddr + MAR3); | 938 | iowrite16(hash_table[3], ioaddr + MAR3); |
907 | } | 939 | } |
908 | /* Multicast Address 1~4 case */ | 940 | |
909 | i = 0; | 941 | spin_unlock_irqrestore(&lp->lock, flags); |
910 | netdev_for_each_mc_addr(ha, dev) { | ||
911 | if (i >= MCAST_MAX) | ||
912 | break; | ||
913 | adrp = (u16 *) ha->addr; | ||
914 | iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); | ||
915 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | ||
916 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | ||
917 | i++; | ||
918 | } | ||
919 | while (i < MCAST_MAX) { | ||
920 | iowrite16(0xffff, ioaddr + MID_1L + 8 * i); | ||
921 | iowrite16(0xffff, ioaddr + MID_1M + 8 * i); | ||
922 | iowrite16(0xffff, ioaddr + MID_1H + 8 * i); | ||
923 | i++; | ||
924 | } | ||
925 | } | 942 | } |
926 | 943 | ||
927 | static void netdev_get_drvinfo(struct net_device *dev, | 944 | static void netdev_get_drvinfo(struct net_device *dev, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c610..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
28 | #include <linux/pci-aspm.h> | ||
28 | 29 | ||
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) | |||
617 | } | 618 | } |
618 | } | 619 | } |
619 | 620 | ||
620 | static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | 621 | static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) |
621 | { | 622 | { |
623 | void __iomem *ioaddr = tp->mmio_addr; | ||
622 | int i; | 624 | int i; |
623 | 625 | ||
624 | RTL_W8(ERIDR, cmd); | 626 | RTL_W8(ERIDR, cmd); |
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | |||
630 | break; | 632 | break; |
631 | } | 633 | } |
632 | 634 | ||
633 | ocp_write(ioaddr, 0x1, 0x30, 0x00000001); | 635 | ocp_write(tp, 0x1, 0x30, 0x00000001); |
634 | } | 636 | } |
635 | 637 | ||
636 | #define OOB_CMD_RESET 0x00 | 638 | #define OOB_CMD_RESET 0x00 |
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2868 | { | 2870 | { |
2869 | void __iomem *ioaddr = tp->mmio_addr; | 2871 | void __iomem *ioaddr = tp->mmio_addr; |
2870 | 2872 | ||
2871 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2873 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2874 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2875 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2872 | return; | 2876 | return; |
2877 | } | ||
2873 | 2878 | ||
2874 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || | 2879 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || |
2875 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && | 2880 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && |
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2891 | switch (tp->mac_version) { | 2896 | switch (tp->mac_version) { |
2892 | case RTL_GIGA_MAC_VER_25: | 2897 | case RTL_GIGA_MAC_VER_25: |
2893 | case RTL_GIGA_MAC_VER_26: | 2898 | case RTL_GIGA_MAC_VER_26: |
2899 | case RTL_GIGA_MAC_VER_27: | ||
2900 | case RTL_GIGA_MAC_VER_28: | ||
2894 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); | 2901 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); |
2895 | break; | 2902 | break; |
2896 | } | 2903 | } |
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) | |||
2900 | { | 2907 | { |
2901 | void __iomem *ioaddr = tp->mmio_addr; | 2908 | void __iomem *ioaddr = tp->mmio_addr; |
2902 | 2909 | ||
2903 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2910 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2911 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2912 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2904 | return; | 2913 | return; |
2914 | } | ||
2905 | 2915 | ||
2906 | switch (tp->mac_version) { | 2916 | switch (tp->mac_version) { |
2907 | case RTL_GIGA_MAC_VER_25: | 2917 | case RTL_GIGA_MAC_VER_25: |
2908 | case RTL_GIGA_MAC_VER_26: | 2918 | case RTL_GIGA_MAC_VER_26: |
2919 | case RTL_GIGA_MAC_VER_27: | ||
2920 | case RTL_GIGA_MAC_VER_28: | ||
2909 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); | 2921 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); |
2910 | break; | 2922 | break; |
2911 | } | 2923 | } |
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3009 | mii->reg_num_mask = 0x1f; | 3021 | mii->reg_num_mask = 0x1f; |
3010 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); | 3022 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); |
3011 | 3023 | ||
3024 | /* disable ASPM completely as that cause random device stop working | ||
3025 | * problems as well as full system hangs for some PCIe devices users */ | ||
3026 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
3027 | PCIE_LINK_STATE_CLKPM); | ||
3028 | |||
3012 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 3029 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
3013 | rc = pci_enable_device(pdev); | 3030 | rc = pci_enable_device(pdev); |
3014 | if (rc < 0) { | 3031 | if (rc < 0) { |
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3042 | goto err_out_mwi_2; | 3059 | goto err_out_mwi_2; |
3043 | } | 3060 | } |
3044 | 3061 | ||
3045 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3062 | tp->cp_cmd = RxChkSum; |
3046 | 3063 | ||
3047 | if ((sizeof(dma_addr_t) > 4) && | 3064 | if ((sizeof(dma_addr_t) > 4) && |
3048 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 3065 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -3190,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | if (pci_dev_run_wake(pdev)) | 3207 | if (pci_dev_run_wake(pdev)) |
3191 | pm_runtime_put_noidle(&pdev->dev); | 3208 | pm_runtime_put_noidle(&pdev->dev); |
3192 | 3209 | ||
3210 | netif_carrier_off(dev); | ||
3211 | |||
3193 | out: | 3212 | out: |
3194 | return rc; | 3213 | return rc; |
3195 | 3214 | ||
@@ -3316,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3316 | /* Disable interrupts */ | 3335 | /* Disable interrupts */ |
3317 | rtl8169_irq_mask_and_ack(ioaddr); | 3336 | rtl8169_irq_mask_and_ack(ioaddr); |
3318 | 3337 | ||
3319 | if (tp->mac_version == RTL_GIGA_MAC_VER_28) { | 3338 | if (tp->mac_version == RTL_GIGA_MAC_VER_27 || |
3339 | tp->mac_version == RTL_GIGA_MAC_VER_28) { | ||
3320 | while (RTL_R8(TxPoll) & NPQ) | 3340 | while (RTL_R8(TxPoll) & NPQ) |
3321 | udelay(20); | 3341 | udelay(20); |
3322 | 3342 | ||
@@ -3845,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3845 | Cxpl_dbg_sel | \ | 3865 | Cxpl_dbg_sel | \ |
3846 | ASF | \ | 3866 | ASF | \ |
3847 | PktCntrDisable | \ | 3867 | PktCntrDisable | \ |
3848 | PCIDAC | \ | 3868 | Mac_dbgo_sel) |
3849 | PCIMulRW) | ||
3850 | 3869 | ||
3851 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | 3870 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) |
3852 | { | 3871 | { |
@@ -3876,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3876 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) | 3895 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) |
3877 | RTL_W8(Config1, cfg1 & ~LEDS0); | 3896 | RTL_W8(Config1, cfg1 & ~LEDS0); |
3878 | 3897 | ||
3879 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3880 | |||
3881 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); | 3898 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); |
3882 | } | 3899 | } |
3883 | 3900 | ||
@@ -3889,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3889 | 3906 | ||
3890 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); | 3907 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); |
3891 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); | 3908 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); |
3892 | |||
3893 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3894 | } | 3909 | } |
3895 | 3910 | ||
3896 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) | 3911 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) |
@@ -3916,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3916 | } | 3931 | } |
3917 | } | 3932 | } |
3918 | 3933 | ||
3934 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
3935 | |||
3919 | switch (tp->mac_version) { | 3936 | switch (tp->mac_version) { |
3920 | case RTL_GIGA_MAC_VER_07: | 3937 | case RTL_GIGA_MAC_VER_07: |
3921 | rtl_hw_start_8102e_1(ioaddr, pdev); | 3938 | rtl_hw_start_8102e_1(ioaddr, pdev); |
@@ -3930,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3930 | break; | 3947 | break; |
3931 | } | 3948 | } |
3932 | 3949 | ||
3933 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 3950 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3934 | 3951 | ||
3935 | RTL_W8(MaxTxPacketSize, TxPacketMax); | 3952 | RTL_W8(MaxTxPacketSize, TxPacketMax); |
3936 | 3953 | ||
3937 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); | 3954 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3938 | 3955 | ||
3939 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 3956 | tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; |
3940 | |||
3941 | RTL_W16(CPlusCmd, tp->cp_cmd); | 3957 | RTL_W16(CPlusCmd, tp->cp_cmd); |
3942 | 3958 | ||
3943 | RTL_W16(IntrMitigate, 0x0000); | 3959 | RTL_W16(IntrMitigate, 0x0000); |
@@ -3947,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3947 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | 3963 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
3948 | rtl_set_rx_tx_config_registers(tp); | 3964 | rtl_set_rx_tx_config_registers(tp); |
3949 | 3965 | ||
3950 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
3951 | |||
3952 | RTL_R8(IntrMask); | 3966 | RTL_R8(IntrMask); |
3953 | 3967 | ||
3954 | rtl_set_rx_mode(dev); | 3968 | rtl_set_rx_mode(dev); |
3955 | 3969 | ||
3956 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | ||
3957 | |||
3958 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); | 3970 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); |
3959 | 3971 | ||
3960 | RTL_W16(IntrMask, tp->intr_event); | 3972 | RTL_W16(IntrMask, tp->intr_event); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
569 | struct ethtool_test *test, u64 *data) | 569 | struct ethtool_test *test, u64 *data) |
570 | { | 570 | { |
571 | struct efx_nic *efx = netdev_priv(net_dev); | 571 | struct efx_nic *efx = netdev_priv(net_dev); |
572 | struct efx_self_tests efx_tests; | 572 | struct efx_self_tests *efx_tests; |
573 | int already_up; | 573 | int already_up; |
574 | int rc; | 574 | int rc = -ENOMEM; |
575 | |||
576 | efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); | ||
577 | if (!efx_tests) | ||
578 | goto fail; | ||
579 | |||
575 | 580 | ||
576 | ASSERT_RTNL(); | 581 | ASSERT_RTNL(); |
577 | if (efx->state != STATE_RUNNING) { | 582 | if (efx->state != STATE_RUNNING) { |
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
589 | if (rc) { | 594 | if (rc) { |
590 | netif_err(efx, drv, efx->net_dev, | 595 | netif_err(efx, drv, efx->net_dev, |
591 | "failed opening device.\n"); | 596 | "failed opening device.\n"); |
592 | goto fail2; | 597 | goto fail1; |
593 | } | 598 | } |
594 | } | 599 | } |
595 | 600 | ||
596 | memset(&efx_tests, 0, sizeof(efx_tests)); | 601 | rc = efx_selftest(efx, efx_tests, test->flags); |
597 | |||
598 | rc = efx_selftest(efx, &efx_tests, test->flags); | ||
599 | 602 | ||
600 | if (!already_up) | 603 | if (!already_up) |
601 | dev_close(efx->net_dev); | 604 | dev_close(efx->net_dev); |
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
604 | rc == 0 ? "passed" : "failed", | 607 | rc == 0 ? "passed" : "failed", |
605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 608 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
606 | 609 | ||
607 | fail2: | 610 | fail1: |
608 | fail1: | ||
609 | /* Fill ethtool results structures */ | 611 | /* Fill ethtool results structures */ |
610 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | 612 | efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); |
613 | kfree(efx_tests); | ||
614 | fail: | ||
611 | if (rc) | 615 | if (rc) |
612 | test->flags |= ETH_TEST_FL_FAILED; | 616 | test->flags |= ETH_TEST_FL_FAILED; |
613 | } | 617 | } |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 5976d1d51df1..640e368ebeee 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev) | |||
1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", | 1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", |
1778 | net_dev->name, sis_priv->cur_rx, | 1778 | net_dev->name, sis_priv->cur_rx, |
1779 | sis_priv->dirty_rx); | 1779 | sis_priv->dirty_rx); |
1780 | dev_kfree_skb(skb); | ||
1780 | break; | 1781 | break; |
1781 | } | 1782 | } |
1782 | 1783 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); | 3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); |
3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
3858 | 3858 | ||
3859 | /* device is off until link detection */ | ||
3860 | netif_carrier_off(dev); | ||
3861 | |||
3862 | return dev; | 3859 | return dev; |
3863 | } | 3860 | } |
3864 | 3861 | ||
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 64bfdae5956f..d70bde95460b 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev) | |||
1178 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); | 1178 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); |
1179 | smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); | 1179 | smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); |
1180 | 1180 | ||
1181 | /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ | ||
1182 | spin_lock_irq(&pdata->mac_lock); | ||
1183 | smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); | ||
1184 | spin_unlock_irq(&pdata->mac_lock); | ||
1185 | |||
1181 | /* Make sure EEPROM has finished loading before setting GPIO_CFG */ | 1186 | /* Make sure EEPROM has finished loading before setting GPIO_CFG */ |
1182 | timeout = 50; | 1187 | timeout = 50; |
1183 | while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && | 1188 | while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1560 | 1560 | ||
1561 | priv->hw = device; | 1561 | priv->hw = device; |
1562 | 1562 | ||
1563 | if (device_can_wakeup(priv->device)) | 1563 | if (device_can_wakeup(priv->device)) { |
1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1565 | enable_irq_wake(dev->irq); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
1567 | } | 1569 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11159 | break; /* We have no PHY */ | 11159 | break; /* We have no PHY */ |
11160 | 11160 | ||
11161 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11161 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11162 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11163 | !netif_running(dev))) | ||
11162 | return -EAGAIN; | 11164 | return -EAGAIN; |
11163 | 11165 | ||
11164 | spin_lock_bh(&tp->lock); | 11166 | spin_lock_bh(&tp->lock); |
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11174 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11176 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11175 | break; /* We have no PHY */ | 11177 | break; /* We have no PHY */ |
11176 | 11178 | ||
11177 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11179 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11180 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11181 | !netif_running(dev))) | ||
11178 | return -EAGAIN; | 11182 | return -EAGAIN; |
11179 | 11183 | ||
11180 | spin_lock_bh(&tp->lock); | 11184 | spin_lock_bh(&tp->lock); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 04e8ce14a1d0..7113168473cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * cdc_ncm.c | 2 | * cdc_ncm.c |
3 | * | 3 | * |
4 | * Copyright (C) ST-Ericsson 2010 | 4 | * Copyright (C) ST-Ericsson 2010-2011 |
5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> | 5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> |
6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> | 6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> |
7 | * | 7 | * |
@@ -54,7 +54,7 @@ | |||
54 | #include <linux/usb/usbnet.h> | 54 | #include <linux/usb/usbnet.h> |
55 | #include <linux/usb/cdc.h> | 55 | #include <linux/usb/cdc.h> |
56 | 56 | ||
57 | #define DRIVER_VERSION "17-Jan-2011" | 57 | #define DRIVER_VERSION "7-Feb-2011" |
58 | 58 | ||
59 | /* CDC NCM subclass 3.2.1 */ | 59 | /* CDC NCM subclass 3.2.1 */ |
60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 | 60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 |
@@ -77,6 +77,9 @@ | |||
77 | */ | 77 | */ |
78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 | 78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 |
79 | 79 | ||
80 | /* Maximum amount of IN datagrams in NTB */ | ||
81 | #define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */ | ||
82 | |||
80 | /* Restart the timer, if amount of datagrams is less than given value */ | 83 | /* Restart the timer, if amount of datagrams is less than given value */ |
81 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 | 84 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 |
82 | 85 | ||
@@ -85,11 +88,6 @@ | |||
85 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ | 88 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ |
86 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) | 89 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) |
87 | 90 | ||
88 | struct connection_speed_change { | ||
89 | __le32 USBitRate; /* holds 3GPP downlink value, bits per second */ | ||
90 | __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */ | ||
91 | } __attribute__ ((packed)); | ||
92 | |||
93 | struct cdc_ncm_data { | 91 | struct cdc_ncm_data { |
94 | struct usb_cdc_ncm_nth16 nth16; | 92 | struct usb_cdc_ncm_nth16 nth16; |
95 | struct usb_cdc_ncm_ndp16 ndp16; | 93 | struct usb_cdc_ncm_ndp16 ndp16; |
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
198 | { | 196 | { |
199 | struct usb_cdc_notification req; | 197 | struct usb_cdc_notification req; |
200 | u32 val; | 198 | u32 val; |
201 | __le16 max_datagram_size; | ||
202 | u8 flags; | 199 | u8 flags; |
203 | u8 iface_no; | 200 | u8 iface_no; |
204 | int err; | 201 | int err; |
202 | u16 ntb_fmt_supported; | ||
205 | 203 | ||
206 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; | 204 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; |
207 | 205 | ||
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
223 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); | 221 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
224 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); | 222 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
225 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); | 223 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
224 | /* devices prior to NCM Errata shall set this field to zero */ | ||
225 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); | ||
226 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); | ||
226 | 227 | ||
227 | if (ctx->func_desc != NULL) | 228 | if (ctx->func_desc != NULL) |
228 | flags = ctx->func_desc->bmNetworkCapabilities; | 229 | flags = ctx->func_desc->bmNetworkCapabilities; |
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
231 | 232 | ||
232 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " | 233 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " |
233 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " | 234 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " |
234 | "wNdpOutAlignment=%u flags=0x%x\n", | 235 | "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", |
235 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, | 236 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, |
236 | ctx->tx_ndp_modulus, flags); | 237 | ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); |
237 | 238 | ||
238 | /* max count of tx datagrams without terminating NULL entry */ | 239 | /* max count of tx datagrams */ |
239 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | 240 | if ((ctx->tx_max_datagrams == 0) || |
241 | (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) | ||
242 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | ||
240 | 243 | ||
241 | /* verify maximum size of received NTB in bytes */ | 244 | /* verify maximum size of received NTB in bytes */ |
242 | if ((ctx->rx_max < | 245 | if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { |
243 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 246 | pr_debug("Using min receive length=%d\n", |
244 | (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { | 247 | USB_CDC_NCM_NTB_MIN_IN_SIZE); |
248 | ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; | ||
249 | } | ||
250 | |||
251 | if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { | ||
245 | pr_debug("Using default maximum receive length=%d\n", | 252 | pr_debug("Using default maximum receive length=%d\n", |
246 | CDC_NCM_NTB_MAX_SIZE_RX); | 253 | CDC_NCM_NTB_MAX_SIZE_RX); |
247 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; | 254 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; |
248 | } | 255 | } |
249 | 256 | ||
257 | /* inform device about NTB input size changes */ | ||
258 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | ||
259 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | ||
260 | USB_RECIP_INTERFACE; | ||
261 | req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; | ||
262 | req.wValue = 0; | ||
263 | req.wIndex = cpu_to_le16(iface_no); | ||
264 | |||
265 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | ||
266 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | ||
267 | |||
268 | req.wLength = 8; | ||
269 | ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
270 | ndp_in_sz.wNtbInMaxDatagrams = | ||
271 | cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); | ||
272 | ndp_in_sz.wReserved = 0; | ||
273 | err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, | ||
274 | 1000); | ||
275 | } else { | ||
276 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
277 | |||
278 | req.wLength = 4; | ||
279 | err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, | ||
280 | NULL, 1000); | ||
281 | } | ||
282 | |||
283 | if (err) | ||
284 | pr_debug("Setting NTB Input Size failed\n"); | ||
285 | } | ||
286 | |||
250 | /* verify maximum size of transmitted NTB in bytes */ | 287 | /* verify maximum size of transmitted NTB in bytes */ |
251 | if ((ctx->tx_max < | 288 | if ((ctx->tx_max < |
252 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 289 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || |
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
297 | /* additional configuration */ | 334 | /* additional configuration */ |
298 | 335 | ||
299 | /* set CRC Mode */ | 336 | /* set CRC Mode */ |
300 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 337 | if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { |
301 | req.bNotificationType = USB_CDC_SET_CRC_MODE; | 338 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
302 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); | 339 | USB_RECIP_INTERFACE; |
303 | req.wIndex = cpu_to_le16(iface_no); | 340 | req.bNotificationType = USB_CDC_SET_CRC_MODE; |
304 | req.wLength = 0; | 341 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); |
305 | 342 | req.wIndex = cpu_to_le16(iface_no); | |
306 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 343 | req.wLength = 0; |
307 | if (err) | 344 | |
308 | pr_debug("Setting CRC mode off failed\n"); | 345 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); |
346 | if (err) | ||
347 | pr_debug("Setting CRC mode off failed\n"); | ||
348 | } | ||
309 | 349 | ||
310 | /* set NTB format */ | 350 | /* set NTB format, if both formats are supported */ |
311 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 351 | if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { |
312 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; | 352 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
313 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); | 353 | USB_RECIP_INTERFACE; |
314 | req.wIndex = cpu_to_le16(iface_no); | 354 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; |
315 | req.wLength = 0; | 355 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); |
356 | req.wIndex = cpu_to_le16(iface_no); | ||
357 | req.wLength = 0; | ||
358 | |||
359 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | ||
360 | if (err) | ||
361 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
362 | } | ||
316 | 363 | ||
317 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 364 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; |
318 | if (err) | ||
319 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
320 | 365 | ||
321 | /* set Max Datagram Size (MTU) */ | 366 | /* set Max Datagram Size (MTU) */ |
322 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; | 367 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
323 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | 368 | __le16 max_datagram_size; |
324 | req.wValue = 0; | 369 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
325 | req.wIndex = cpu_to_le16(iface_no); | 370 | |
326 | req.wLength = cpu_to_le16(2); | 371 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | |
372 | USB_RECIP_INTERFACE; | ||
373 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | ||
374 | req.wValue = 0; | ||
375 | req.wIndex = cpu_to_le16(iface_no); | ||
376 | req.wLength = cpu_to_le16(2); | ||
377 | |||
378 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, | ||
379 | 1000); | ||
380 | if (err) { | ||
381 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | ||
382 | CDC_NCM_MIN_DATAGRAM_SIZE); | ||
383 | } else { | ||
384 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | ||
385 | /* Check Eth descriptor value */ | ||
386 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { | ||
387 | if (ctx->max_datagram_size > eth_max_sz) | ||
388 | ctx->max_datagram_size = eth_max_sz; | ||
389 | } else { | ||
390 | if (ctx->max_datagram_size > | ||
391 | CDC_NCM_MAX_DATAGRAM_SIZE) | ||
392 | ctx->max_datagram_size = | ||
393 | CDC_NCM_MAX_DATAGRAM_SIZE; | ||
394 | } | ||
327 | 395 | ||
328 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); | 396 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) |
329 | if (err) { | 397 | ctx->max_datagram_size = |
330 | pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", | 398 | CDC_NCM_MIN_DATAGRAM_SIZE; |
331 | CDC_NCM_MIN_DATAGRAM_SIZE); | 399 | |
332 | /* use default */ | 400 | /* if value changed, update device */ |
333 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | 401 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
334 | } else { | 402 | USB_RECIP_INTERFACE; |
335 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | 403 | req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; |
404 | req.wValue = 0; | ||
405 | req.wIndex = cpu_to_le16(iface_no); | ||
406 | req.wLength = 2; | ||
407 | max_datagram_size = cpu_to_le16(ctx->max_datagram_size); | ||
408 | |||
409 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, | ||
410 | 0, NULL, 1000); | ||
411 | if (err) | ||
412 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | ||
413 | } | ||
336 | 414 | ||
337 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) | ||
338 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | ||
339 | else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) | ||
340 | ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; | ||
341 | } | 415 | } |
342 | 416 | ||
343 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) | 417 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) |
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
466 | 540 | ||
467 | ctx->ether_desc = | 541 | ctx->ether_desc = |
468 | (const struct usb_cdc_ether_desc *)buf; | 542 | (const struct usb_cdc_ether_desc *)buf; |
469 | |||
470 | dev->hard_mtu = | 543 | dev->hard_mtu = |
471 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 544 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
472 | 545 | ||
473 | if (dev->hard_mtu < | 546 | if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE) |
474 | (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) | 547 | dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE; |
475 | dev->hard_mtu = | 548 | else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE) |
476 | CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; | 549 | dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE; |
477 | |||
478 | else if (dev->hard_mtu > | ||
479 | (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN)) | ||
480 | dev->hard_mtu = | ||
481 | CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN; | ||
482 | break; | 550 | break; |
483 | 551 | ||
484 | case USB_CDC_NCM_TYPE: | 552 | case USB_CDC_NCM_TYPE: |
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
628 | u32 offset; | 696 | u32 offset; |
629 | u32 last_offset; | 697 | u32 last_offset; |
630 | u16 n = 0; | 698 | u16 n = 0; |
631 | u8 timeout = 0; | 699 | u8 ready2send = 0; |
632 | 700 | ||
633 | /* if there is a remaining skb, it gets priority */ | 701 | /* if there is a remaining skb, it gets priority */ |
634 | if (skb != NULL) | 702 | if (skb != NULL) |
635 | swap(skb, ctx->tx_rem_skb); | 703 | swap(skb, ctx->tx_rem_skb); |
636 | else | 704 | else |
637 | timeout = 1; | 705 | ready2send = 1; |
638 | 706 | ||
639 | /* | 707 | /* |
640 | * +----------------+ | 708 | * +----------------+ |
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
682 | 750 | ||
683 | for (; n < ctx->tx_max_datagrams; n++) { | 751 | for (; n < ctx->tx_max_datagrams; n++) { |
684 | /* check if end of transmit buffer is reached */ | 752 | /* check if end of transmit buffer is reached */ |
685 | if (offset >= ctx->tx_max) | 753 | if (offset >= ctx->tx_max) { |
754 | ready2send = 1; | ||
686 | break; | 755 | break; |
687 | 756 | } | |
688 | /* compute maximum buffer size */ | 757 | /* compute maximum buffer size */ |
689 | rem = ctx->tx_max - offset; | 758 | rem = ctx->tx_max - offset; |
690 | 759 | ||
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
711 | } | 780 | } |
712 | ctx->tx_rem_skb = skb; | 781 | ctx->tx_rem_skb = skb; |
713 | skb = NULL; | 782 | skb = NULL; |
714 | 783 | ready2send = 1; | |
715 | /* loop one more time */ | ||
716 | timeout = 1; | ||
717 | } | 784 | } |
718 | break; | 785 | break; |
719 | } | 786 | } |
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
756 | ctx->tx_curr_last_offset = last_offset; | 823 | ctx->tx_curr_last_offset = last_offset; |
757 | goto exit_no_skb; | 824 | goto exit_no_skb; |
758 | 825 | ||
759 | } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { | 826 | } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { |
760 | /* wait for more frames */ | 827 | /* wait for more frames */ |
761 | /* push variables */ | 828 | /* push variables */ |
762 | ctx->tx_curr_skb = skb_out; | 829 | ctx->tx_curr_skb = skb_out; |
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
813 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); | 880 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); |
814 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); | 881 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); |
815 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); | 882 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); |
816 | ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), | 883 | ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), |
817 | ctx->tx_ndp_modulus); | 884 | ctx->tx_ndp_modulus); |
818 | 885 | ||
819 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); | 886 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); |
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
825 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * | 892 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * |
826 | sizeof(struct usb_cdc_ncm_dpe16)); | 893 | sizeof(struct usb_cdc_ncm_dpe16)); |
827 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); | 894 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); |
828 | ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ | 895 | ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ |
829 | 896 | ||
830 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, | 897 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, |
831 | &(ctx->tx_ncm.ndp16), | 898 | &(ctx->tx_ncm.ndp16), |
832 | sizeof(ctx->tx_ncm.ndp16)); | 899 | sizeof(ctx->tx_ncm.ndp16)); |
833 | 900 | ||
834 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + | 901 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + |
835 | sizeof(ctx->tx_ncm.ndp16), | 902 | sizeof(ctx->tx_ncm.ndp16), |
836 | &(ctx->tx_ncm.dpe16), | 903 | &(ctx->tx_ncm.dpe16), |
837 | (ctx->tx_curr_frame_num + 1) * | 904 | (ctx->tx_curr_frame_num + 1) * |
@@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) | |||
961 | goto error; | 1028 | goto error; |
962 | } | 1029 | } |
963 | 1030 | ||
964 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); | 1031 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex); |
965 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { | 1032 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { |
966 | pr_debug("invalid DPT16 index\n"); | 1033 | pr_debug("invalid DPT16 index\n"); |
967 | goto error; | 1034 | goto error; |
@@ -1048,10 +1115,10 @@ error: | |||
1048 | 1115 | ||
1049 | static void | 1116 | static void |
1050 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, | 1117 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, |
1051 | struct connection_speed_change *data) | 1118 | struct usb_cdc_speed_change *data) |
1052 | { | 1119 | { |
1053 | uint32_t rx_speed = le32_to_cpu(data->USBitRate); | 1120 | uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); |
1054 | uint32_t tx_speed = le32_to_cpu(data->DSBitRate); | 1121 | uint32_t tx_speed = le32_to_cpu(data->ULBitRate); |
1055 | 1122 | ||
1056 | /* | 1123 | /* |
1057 | * Currently the USB-NET API does not support reporting the actual | 1124 | * Currently the USB-NET API does not support reporting the actual |
@@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1092 | /* test for split data in 8-byte chunks */ | 1159 | /* test for split data in 8-byte chunks */ |
1093 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { | 1160 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { |
1094 | cdc_ncm_speed_change(ctx, | 1161 | cdc_ncm_speed_change(ctx, |
1095 | (struct connection_speed_change *)urb->transfer_buffer); | 1162 | (struct usb_cdc_speed_change *)urb->transfer_buffer); |
1096 | return; | 1163 | return; |
1097 | } | 1164 | } |
1098 | 1165 | ||
@@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1120 | break; | 1187 | break; |
1121 | 1188 | ||
1122 | case USB_CDC_NOTIFY_SPEED_CHANGE: | 1189 | case USB_CDC_NOTIFY_SPEED_CHANGE: |
1123 | if (urb->actual_length < | 1190 | if (urb->actual_length < (sizeof(*event) + |
1124 | (sizeof(*event) + sizeof(struct connection_speed_change))) | 1191 | sizeof(struct usb_cdc_speed_change))) |
1125 | set_bit(EVENT_STS_SPLIT, &dev->flags); | 1192 | set_bit(EVENT_STS_SPLIT, &dev->flags); |
1126 | else | 1193 | else |
1127 | cdc_ncm_speed_change(ctx, | 1194 | cdc_ncm_speed_change(ctx, |
1128 | (struct connection_speed_change *) &event[1]); | 1195 | (struct usb_cdc_speed_change *) &event[1]); |
1129 | break; | 1196 | break; |
1130 | 1197 | ||
1131 | default: | 1198 | default: |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { | |||
651 | .driver_info = (unsigned long)&dm9601_info, | 651 | .driver_info = (unsigned long)&dm9601_info, |
652 | }, | 652 | }, |
653 | { | 653 | { |
654 | USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ | ||
655 | .driver_info = (unsigned long)&dm9601_info, | ||
656 | }, | ||
657 | { | ||
654 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ | 658 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ |
655 | .driver_info = (unsigned long)&dm9601_info, | 659 | .driver_info = (unsigned long)&dm9601_info, |
656 | }, | 660 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2628,15 +2628,15 @@ exit: | |||
2628 | 2628 | ||
2629 | static void hso_free_tiomget(struct hso_serial *serial) | 2629 | static void hso_free_tiomget(struct hso_serial *serial) |
2630 | { | 2630 | { |
2631 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 2631 | struct hso_tiocmget *tiocmget; |
2632 | if (!serial) | ||
2633 | return; | ||
2634 | tiocmget = serial->tiocmget; | ||
2632 | if (tiocmget) { | 2635 | if (tiocmget) { |
2633 | if (tiocmget->urb) { | 2636 | usb_free_urb(tiocmget->urb); |
2634 | usb_free_urb(tiocmget->urb); | 2637 | tiocmget->urb = NULL; |
2635 | tiocmget->urb = NULL; | ||
2636 | } | ||
2637 | serial->tiocmget = NULL; | 2638 | serial->tiocmget = NULL; |
2638 | kfree(tiocmget); | 2639 | kfree(tiocmget); |
2639 | |||
2640 | } | 2640 | } |
2641 | } | 2641 | } |
2642 | 2642 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -931,8 +931,10 @@ fail_halt: | |||
931 | if (urb != NULL) { | 931 | if (urb != NULL) { |
932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); | 932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); |
933 | status = usb_autopm_get_interface(dev->intf); | 933 | status = usb_autopm_get_interface(dev->intf); |
934 | if (status < 0) | 934 | if (status < 0) { |
935 | usb_free_urb(urb); | ||
935 | goto fail_lowmem; | 936 | goto fail_lowmem; |
937 | } | ||
936 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) | 938 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) |
937 | resched = 0; | 939 | resched = 0; |
938 | usb_autopm_put_interface(dev->intf); | 940 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90a23e410d1b..82dba5aaf423 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | static void virtnet_napi_enable(struct virtnet_info *vi) | ||
450 | { | ||
451 | napi_enable(&vi->napi); | ||
452 | |||
453 | /* If all buffers were filled by other side before we napi_enabled, we | ||
454 | * won't get another interrupt, so process any outstanding packets | ||
455 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
456 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
457 | if (napi_schedule_prep(&vi->napi)) { | ||
458 | virtqueue_disable_cb(vi->rvq); | ||
459 | __napi_schedule(&vi->napi); | ||
460 | } | ||
461 | } | ||
462 | |||
449 | static void refill_work(struct work_struct *work) | 463 | static void refill_work(struct work_struct *work) |
450 | { | 464 | { |
451 | struct virtnet_info *vi; | 465 | struct virtnet_info *vi; |
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work) | |||
454 | vi = container_of(work, struct virtnet_info, refill.work); | 468 | vi = container_of(work, struct virtnet_info, refill.work); |
455 | napi_disable(&vi->napi); | 469 | napi_disable(&vi->napi); |
456 | still_empty = !try_fill_recv(vi, GFP_KERNEL); | 470 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
457 | napi_enable(&vi->napi); | 471 | virtnet_napi_enable(vi); |
458 | 472 | ||
459 | /* In theory, this can happen: if we don't get any buffers in | 473 | /* In theory, this can happen: if we don't get any buffers in |
460 | * we will *never* try to fill again. */ | 474 | * we will *never* try to fill again. */ |
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev) | |||
638 | { | 652 | { |
639 | struct virtnet_info *vi = netdev_priv(dev); | 653 | struct virtnet_info *vi = netdev_priv(dev); |
640 | 654 | ||
641 | napi_enable(&vi->napi); | 655 | virtnet_napi_enable(vi); |
642 | |||
643 | /* If all buffers were filled by other side before we napi_enabled, we | ||
644 | * won't get another interrupt, so process any outstanding packets | ||
645 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
646 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
647 | if (napi_schedule_prep(&vi->napi)) { | ||
648 | virtqueue_disable_cb(vi->rvq); | ||
649 | __napi_schedule(&vi->napi); | ||
650 | } | ||
651 | return 0; | 656 | return 0; |
652 | } | 657 | } |
653 | 658 | ||
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c | |||
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | /* | ||
286 | * Wait for synth to settle | ||
287 | */ | ||
288 | static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, | ||
289 | struct ieee80211_channel *channel) | ||
290 | { | ||
291 | /* | ||
292 | * On 5211+ read activation -> rx delay | ||
293 | * and use it (100ns steps). | ||
294 | */ | ||
295 | if (ah->ah_version != AR5K_AR5210) { | ||
296 | u32 delay; | ||
297 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | ||
298 | AR5K_PHY_RX_DELAY_M; | ||
299 | delay = (channel->hw_value & CHANNEL_CCK) ? | ||
300 | ((delay << 2) / 22) : (delay / 10); | ||
301 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
302 | delay = delay << 1; | ||
303 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
304 | delay = delay << 2; | ||
305 | /* XXX: /2 on turbo ? Let's be safe | ||
306 | * for now */ | ||
307 | udelay(100 + delay); | ||
308 | } else { | ||
309 | mdelay(1); | ||
310 | } | ||
311 | } | ||
312 | |||
285 | 313 | ||
286 | /**********************\ | 314 | /**********************\ |
287 | * RF Gain optimization * | 315 | * RF Gain optimization * |
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, | |||
1253 | case AR5K_RF5111: | 1281 | case AR5K_RF5111: |
1254 | ret = ath5k_hw_rf5111_channel(ah, channel); | 1282 | ret = ath5k_hw_rf5111_channel(ah, channel); |
1255 | break; | 1283 | break; |
1284 | case AR5K_RF2317: | ||
1256 | case AR5K_RF2425: | 1285 | case AR5K_RF2425: |
1257 | ret = ath5k_hw_rf2425_channel(ah, channel); | 1286 | ret = ath5k_hw_rf2425_channel(ah, channel); |
1258 | break; | 1287 | break; |
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3237 | /* Failed */ | 3266 | /* Failed */ |
3238 | if (i >= 100) | 3267 | if (i >= 100) |
3239 | return -EIO; | 3268 | return -EIO; |
3269 | |||
3270 | /* Set channel and wait for synth */ | ||
3271 | ret = ath5k_hw_channel(ah, channel); | ||
3272 | if (ret) | ||
3273 | return ret; | ||
3274 | |||
3275 | ath5k_hw_wait_for_synth(ah, channel); | ||
3240 | } | 3276 | } |
3241 | 3277 | ||
3242 | /* | 3278 | /* |
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3251 | if (ret) | 3287 | if (ret) |
3252 | return ret; | 3288 | return ret; |
3253 | 3289 | ||
3290 | /* Write OFDM timings on 5212*/ | ||
3291 | if (ah->ah_version == AR5K_AR5212 && | ||
3292 | channel->hw_value & CHANNEL_OFDM) { | ||
3293 | |||
3294 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3295 | if (ret) | ||
3296 | return ret; | ||
3297 | |||
3298 | /* Spur info is available only from EEPROM versions | ||
3299 | * greater than 5.3, but the EEPROM routines will use | ||
3300 | * static values for older versions */ | ||
3301 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3302 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3303 | channel); | ||
3304 | } | ||
3305 | |||
3306 | /* If we used fast channel switching | ||
3307 | * we are done, release RF bus and | ||
3308 | * fire up NF calibration. | ||
3309 | * | ||
3310 | * Note: Only NF calibration due to | ||
3311 | * channel change, not AGC calibration | ||
3312 | * since AGC is still running ! | ||
3313 | */ | ||
3314 | if (fast) { | ||
3315 | /* | ||
3316 | * Release RF Bus grant | ||
3317 | */ | ||
3318 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3319 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3320 | |||
3321 | /* | ||
3322 | * Start NF calibration | ||
3323 | */ | ||
3324 | AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, | ||
3325 | AR5K_PHY_AGCCTL_NF); | ||
3326 | |||
3327 | return ret; | ||
3328 | } | ||
3329 | |||
3254 | /* | 3330 | /* |
3255 | * For 5210 we do all initialization using | 3331 | * For 5210 we do all initialization using |
3256 | * initvals, so we don't have to modify | 3332 | * initvals, so we don't have to modify |
3257 | * any settings (5210 also only supports | 3333 | * any settings (5210 also only supports |
3258 | * a/aturbo modes) | 3334 | * a/aturbo modes) |
3259 | */ | 3335 | */ |
3260 | if ((ah->ah_version != AR5K_AR5210) && !fast) { | 3336 | if (ah->ah_version != AR5K_AR5210) { |
3261 | 3337 | ||
3262 | /* | 3338 | /* |
3263 | * Write initial RF gain settings | 3339 | * Write initial RF gain settings |
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3276 | if (ret) | 3352 | if (ret) |
3277 | return ret; | 3353 | return ret; |
3278 | 3354 | ||
3279 | /* Write OFDM timings on 5212*/ | ||
3280 | if (ah->ah_version == AR5K_AR5212 && | ||
3281 | channel->hw_value & CHANNEL_OFDM) { | ||
3282 | |||
3283 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3284 | if (ret) | ||
3285 | return ret; | ||
3286 | |||
3287 | /* Spur info is available only from EEPROM versions | ||
3288 | * greater than 5.3, but the EEPROM routines will use | ||
3289 | * static values for older versions */ | ||
3290 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3291 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3292 | channel); | ||
3293 | } | ||
3294 | |||
3295 | /*Enable/disable 802.11b mode on 5111 | 3355 | /*Enable/disable 802.11b mode on 5111 |
3296 | (enable 2111 frequency converter + CCK)*/ | 3356 | (enable 2111 frequency converter + CCK)*/ |
3297 | if (ah->ah_radio == AR5K_RF5111) { | 3357 | if (ah->ah_radio == AR5K_RF5111) { |
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3322 | */ | 3382 | */ |
3323 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); | 3383 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); |
3324 | 3384 | ||
3385 | ath5k_hw_wait_for_synth(ah, channel); | ||
3386 | |||
3325 | /* | 3387 | /* |
3326 | * On 5211+ read activation -> rx delay | 3388 | * Perform ADC test to see if baseband is ready |
3327 | * and use it. | 3389 | * Set tx hold and check adc test register |
3328 | */ | 3390 | */ |
3329 | if (ah->ah_version != AR5K_AR5210) { | 3391 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); |
3330 | u32 delay; | 3392 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); |
3331 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | 3393 | for (i = 0; i <= 20; i++) { |
3332 | AR5K_PHY_RX_DELAY_M; | 3394 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) |
3333 | delay = (channel->hw_value & CHANNEL_CCK) ? | 3395 | break; |
3334 | ((delay << 2) / 22) : (delay / 10); | 3396 | udelay(200); |
3335 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
3336 | delay = delay << 1; | ||
3337 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
3338 | delay = delay << 2; | ||
3339 | /* XXX: /2 on turbo ? Let's be safe | ||
3340 | * for now */ | ||
3341 | udelay(100 + delay); | ||
3342 | } else { | ||
3343 | mdelay(1); | ||
3344 | } | ||
3345 | |||
3346 | if (fast) | ||
3347 | /* | ||
3348 | * Release RF Bus grant | ||
3349 | */ | ||
3350 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3351 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3352 | else { | ||
3353 | /* | ||
3354 | * Perform ADC test to see if baseband is ready | ||
3355 | * Set tx hold and check adc test register | ||
3356 | */ | ||
3357 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); | ||
3358 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); | ||
3359 | for (i = 0; i <= 20; i++) { | ||
3360 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) | ||
3361 | break; | ||
3362 | udelay(200); | ||
3363 | } | ||
3364 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3365 | } | 3397 | } |
3398 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3366 | 3399 | ||
3367 | /* | 3400 | /* |
3368 | * Start automatic gain control calibration | 3401 | * Start automatic gain control calibration |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 3681caf54282..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/pm_qos_params.h> | ||
25 | 24 | ||
26 | #include "debug.h" | 25 | #include "debug.h" |
27 | #include "common.h" | 26 | #include "common.h" |
@@ -57,8 +56,6 @@ struct ath_node; | |||
57 | 56 | ||
58 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) | 57 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) |
59 | 58 | ||
60 | #define ATH9K_PM_QOS_DEFAULT_VALUE 55 | ||
61 | |||
62 | #define TSF_TO_TU(_h,_l) \ | 59 | #define TSF_TO_TU(_h,_l) \ |
63 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) | 60 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) |
64 | 61 | ||
@@ -218,6 +215,7 @@ struct ath_frame_info { | |||
218 | struct ath_buf_state { | 215 | struct ath_buf_state { |
219 | u8 bf_type; | 216 | u8 bf_type; |
220 | u8 bfs_paprd; | 217 | u8 bfs_paprd; |
218 | unsigned long bfs_paprd_timestamp; | ||
221 | enum ath9k_internal_frame_type bfs_ftype; | 219 | enum ath9k_internal_frame_type bfs_ftype; |
222 | }; | 220 | }; |
223 | 221 | ||
@@ -593,7 +591,6 @@ struct ath_softc { | |||
593 | struct work_struct paprd_work; | 591 | struct work_struct paprd_work; |
594 | struct work_struct hw_check_work; | 592 | struct work_struct hw_check_work; |
595 | struct completion paprd_complete; | 593 | struct completion paprd_complete; |
596 | bool paprd_pending; | ||
597 | 594 | ||
598 | u32 intrstatus; | 595 | u32 intrstatus; |
599 | u32 sc_flags; /* SC_OP_* */ | 596 | u32 sc_flags; /* SC_OP_* */ |
@@ -633,8 +630,6 @@ struct ath_softc { | |||
633 | struct ath_descdma txsdma; | 630 | struct ath_descdma txsdma; |
634 | 631 | ||
635 | struct ath_ant_comb ant_comb; | 632 | struct ath_ant_comb ant_comb; |
636 | |||
637 | struct pm_qos_request_list pm_qos_req; | ||
638 | }; | 633 | }; |
639 | 634 | ||
640 | struct ath_wiphy { | 635 | struct ath_wiphy { |
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) | |||
666 | extern struct ieee80211_ops ath9k_ops; | 661 | extern struct ieee80211_ops ath9k_ops; |
667 | extern int ath9k_modparam_nohwcrypt; | 662 | extern int ath9k_modparam_nohwcrypt; |
668 | extern int led_blink; | 663 | extern int led_blink; |
669 | extern int ath9k_pm_qos_value; | ||
670 | extern bool is_ath9k_unloaded; | 664 | extern bool is_ath9k_unloaded; |
671 | 665 | ||
672 | irqreturn_t ath_isr(int irq, void *dev); | 666 | irqreturn_t ath_isr(int irq, void *dev); |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
219 | struct tx_buf *tx_buf = NULL; | 219 | struct tx_buf *tx_buf = NULL; |
220 | struct sk_buff *nskb = NULL; | 220 | struct sk_buff *nskb = NULL; |
221 | int ret = 0, i; | 221 | int ret = 0, i; |
222 | u16 *hdr, tx_skb_cnt = 0; | 222 | u16 tx_skb_cnt = 0; |
223 | u8 *buf; | 223 | u8 *buf; |
224 | __le16 *hdr; | ||
224 | 225 | ||
225 | if (hif_dev->tx.tx_skb_cnt == 0) | 226 | if (hif_dev->tx.tx_skb_cnt == 0) |
226 | return 0; | 227 | return 0; |
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
245 | 246 | ||
246 | buf = tx_buf->buf; | 247 | buf = tx_buf->buf; |
247 | buf += tx_buf->offset; | 248 | buf += tx_buf->offset; |
248 | hdr = (u16 *)buf; | 249 | hdr = (__le16 *)buf; |
249 | *hdr++ = nskb->len; | 250 | *hdr++ = cpu_to_le16(nskb->len); |
250 | *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; | 251 | *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); |
251 | buf += 4; | 252 | buf += 4; |
252 | memcpy(buf, nskb->data, nskb->len); | 253 | memcpy(buf, nskb->data, nskb->len); |
253 | tx_buf->len = nskb->len + 4; | 254 | tx_buf->len = nskb->len + 4; |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 087a6a95edd5..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; | |||
41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); | 41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); |
42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); | 42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); |
43 | 43 | ||
44 | int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; | ||
45 | module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
46 | MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); | ||
47 | |||
48 | bool is_ath9k_unloaded; | 44 | bool is_ath9k_unloaded; |
49 | /* We use the hw_value as an index into our private channel structure */ | 45 | /* We use the hw_value as an index into our private channel structure */ |
50 | 46 | ||
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, | |||
762 | ath_init_leds(sc); | 758 | ath_init_leds(sc); |
763 | ath_start_rfkill_poll(sc); | 759 | ath_start_rfkill_poll(sc); |
764 | 760 | ||
765 | pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
766 | PM_QOS_DEFAULT_VALUE); | ||
767 | |||
768 | return 0; | 761 | return 0; |
769 | 762 | ||
770 | error_world: | 763 | error_world: |
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
831 | } | 824 | } |
832 | 825 | ||
833 | ieee80211_unregister_hw(hw); | 826 | ieee80211_unregister_hw(hw); |
834 | pm_qos_remove_request(&sc->pm_qos_req); | ||
835 | ath_rx_cleanup(sc); | 827 | ath_rx_cleanup(sc); |
836 | ath_tx_cleanup(sc); | 828 | ath_tx_cleanup(sc); |
837 | ath9k_deinit_softc(sc); | 829 | ath9k_deinit_softc(sc); |
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
885 | struct ath_common *common = ath9k_hw_common(ah); | 885 | struct ath_common *common = ath9k_hw_common(ah); |
886 | 886 | ||
887 | if (!(ints & ATH9K_INT_GLOBAL)) | 887 | if (!(ints & ATH9K_INT_GLOBAL)) |
888 | ath9k_hw_enable_interrupts(ah); | 888 | ath9k_hw_disable_interrupts(ah); |
889 | 889 | ||
890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); | 890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); |
891 | 891 | ||
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); | 963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); |
964 | } | 964 | } |
965 | 965 | ||
966 | ath9k_hw_enable_interrupts(ah); | 966 | if (ints & ATH9K_INT_GLOBAL) |
967 | ath9k_hw_enable_interrupts(ah); | ||
967 | 968 | ||
968 | return; | 969 | return; |
969 | } | 970 | } |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9040c2ff1909..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -342,7 +342,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
342 | tx_info->control.rates[1].idx = -1; | 342 | tx_info->control.rates[1].idx = -1; |
343 | 343 | ||
344 | init_completion(&sc->paprd_complete); | 344 | init_completion(&sc->paprd_complete); |
345 | sc->paprd_pending = true; | ||
346 | txctl.paprd = BIT(chain); | 345 | txctl.paprd = BIT(chain); |
347 | 346 | ||
348 | if (ath_tx_start(hw, skb, &txctl) != 0) { | 347 | if (ath_tx_start(hw, skb, &txctl) != 0) { |
@@ -353,7 +352,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
353 | 352 | ||
354 | time_left = wait_for_completion_timeout(&sc->paprd_complete, | 353 | time_left = wait_for_completion_timeout(&sc->paprd_complete, |
355 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); | 354 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); |
356 | sc->paprd_pending = false; | ||
357 | 355 | ||
358 | if (!time_left) | 356 | if (!time_left) |
359 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, | 357 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, |
@@ -1175,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1175 | ath9k_btcoex_timer_resume(sc); | 1173 | ath9k_btcoex_timer_resume(sc); |
1176 | } | 1174 | } |
1177 | 1175 | ||
1178 | /* User has the option to provide pm-qos value as a module | ||
1179 | * parameter rather than using the default value of | ||
1180 | * 'ATH9K_PM_QOS_DEFAULT_VALUE'. | ||
1181 | */ | ||
1182 | pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); | ||
1183 | |||
1184 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) | 1176 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) |
1185 | common->bus_ops->extn_synch_en(common); | 1177 | common->bus_ops->extn_synch_en(common); |
1186 | 1178 | ||
@@ -1347,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1347 | 1339 | ||
1348 | sc->sc_flags |= SC_OP_INVALID; | 1340 | sc->sc_flags |= SC_OP_INVALID; |
1349 | 1341 | ||
1350 | pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); | ||
1351 | |||
1352 | mutex_unlock(&sc->mutex); | 1342 | mutex_unlock(&sc->mutex); |
1353 | 1343 | ||
1354 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); | 1344 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 33a37edbaf79..07b7804aec5b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, | 1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, |
1726 | bf->bf_state.bfs_paprd); | 1726 | bf->bf_state.bfs_paprd); |
1727 | 1727 | ||
1728 | if (txctl->paprd) | ||
1729 | bf->bf_state.bfs_paprd_timestamp = jiffies; | ||
1730 | |||
1728 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); | 1731 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); |
1729 | } | 1732 | } |
1730 | 1733 | ||
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1886 | bf->bf_buf_addr = 0; | 1889 | bf->bf_buf_addr = 0; |
1887 | 1890 | ||
1888 | if (bf->bf_state.bfs_paprd) { | 1891 | if (bf->bf_state.bfs_paprd) { |
1889 | if (!sc->paprd_pending) | 1892 | if (time_after(jiffies, |
1893 | bf->bf_state.bfs_paprd_timestamp + | ||
1894 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) | ||
1890 | dev_kfree_skb_any(skb); | 1895 | dev_kfree_skb_any(skb); |
1891 | else | 1896 | else |
1892 | complete(&sc->paprd_complete); | 1897 | complete(&sc->paprd_complete); |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 939a0e96ed1f..84866a4b8350 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c | |||
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) | |||
564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); | 564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); |
565 | 565 | ||
566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ | 566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ |
567 | cam = !!(tim_ie->bitmap_ctrl & 0x01); | 567 | cam |= !!(tim_ie->bitmap_ctrl & 0x01); |
568 | 568 | ||
569 | if (!cam) { | 569 | if (!cam) { |
570 | /* back to low-power land. */ | 570 | /* back to low-power land. */ |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
118 | { USB_DEVICE(0x057c, 0x8402) }, | 118 | { USB_DEVICE(0x057c, 0x8402) }, |
119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ | 119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ |
120 | { USB_DEVICE(0x1668, 0x1200) }, | 120 | { USB_DEVICE(0x1668, 0x1200) }, |
121 | /* Airlive X.USB a/b/g/n */ | ||
122 | { USB_DEVICE(0x1b75, 0x9170) }, | ||
121 | 123 | ||
122 | /* terminate */ | 124 | /* terminate */ |
123 | {} | 125 | {} |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, | |||
402 | } | 402 | } |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /** | ||
406 | * iwl3945_good_plcp_health - checks for plcp error. | ||
407 | * | ||
408 | * When the plcp error is exceeding the thresholds, reset the radio | ||
409 | * to improve the throughput. | ||
410 | */ | ||
411 | static bool iwl3945_good_plcp_health(struct iwl_priv *priv, | ||
412 | struct iwl_rx_packet *pkt) | ||
413 | { | ||
414 | bool rc = true; | ||
415 | struct iwl3945_notif_statistics current_stat; | ||
416 | int combined_plcp_delta; | ||
417 | unsigned int plcp_msec; | ||
418 | unsigned long plcp_received_jiffies; | ||
419 | |||
420 | if (priv->cfg->base_params->plcp_delta_threshold == | ||
421 | IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { | ||
422 | IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | memcpy(¤t_stat, pkt->u.raw, sizeof(struct | ||
426 | iwl3945_notif_statistics)); | ||
427 | /* | ||
428 | * check for plcp_err and trigger radio reset if it exceeds | ||
429 | * the plcp error threshold plcp_delta. | ||
430 | */ | ||
431 | plcp_received_jiffies = jiffies; | ||
432 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | ||
433 | (long) priv->plcp_jiffies); | ||
434 | priv->plcp_jiffies = plcp_received_jiffies; | ||
435 | /* | ||
436 | * check to make sure plcp_msec is not 0 to prevent division | ||
437 | * by zero. | ||
438 | */ | ||
439 | if (plcp_msec) { | ||
440 | combined_plcp_delta = | ||
441 | (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - | ||
442 | le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); | ||
443 | |||
444 | if ((combined_plcp_delta > 0) && | ||
445 | ((combined_plcp_delta * 100) / plcp_msec) > | ||
446 | priv->cfg->base_params->plcp_delta_threshold) { | ||
447 | /* | ||
448 | * if plcp_err exceed the threshold, the following | ||
449 | * data is printed in csv format: | ||
450 | * Text: plcp_err exceeded %d, | ||
451 | * Received ofdm.plcp_err, | ||
452 | * Current ofdm.plcp_err, | ||
453 | * combined_plcp_delta, | ||
454 | * plcp_msec | ||
455 | */ | ||
456 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " | ||
457 | "%u, %d, %u mSecs\n", | ||
458 | priv->cfg->base_params->plcp_delta_threshold, | ||
459 | le32_to_cpu(current_stat.rx.ofdm.plcp_err), | ||
460 | combined_plcp_delta, plcp_msec); | ||
461 | /* | ||
462 | * Reset the RF radio due to the high plcp | ||
463 | * error rate | ||
464 | */ | ||
465 | rc = false; | ||
466 | } | ||
467 | } | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, | 405 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, |
472 | struct iwl_rx_mem_buffer *rxb) | 406 | struct iwl_rx_mem_buffer *rxb) |
473 | { | 407 | { |
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { | |||
2734 | .isr_ops = { | 2668 | .isr_ops = { |
2735 | .isr = iwl_isr_legacy, | 2669 | .isr = iwl_isr_legacy, |
2736 | }, | 2670 | }, |
2737 | .check_plcp_health = iwl3945_good_plcp_health, | ||
2738 | 2671 | ||
2739 | .debugfs_ops = { | 2672 | .debugfs_ops = { |
2740 | .rx_stats_read = iwl3945_ucode_rx_stats_read, | 2673 | .rx_stats_read = iwl3945_ucode_rx_stats_read, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "iwl-agn-debugfs.h" | 51 | #include "iwl-agn-debugfs.h" |
52 | 52 | ||
53 | /* Highest firmware API version supported */ | 53 | /* Highest firmware API version supported */ |
54 | #define IWL5000_UCODE_API_MAX 2 | 54 | #define IWL5000_UCODE_API_MAX 5 |
55 | #define IWL5150_UCODE_API_MAX 2 | 55 | #define IWL5150_UCODE_API_MAX 2 |
56 | 56 | ||
57 | /* Lowest firmware API version supported */ | 57 | /* Lowest firmware API version supported */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af505bcd7ae0..ef36aff1bb43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
681 | .fw_name_pre = IWL6050_FW_PRE, \ | 681 | .fw_name_pre = IWL6050_FW_PRE, \ |
682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ | 682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ |
683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ | 683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ |
684 | .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
685 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
684 | .ops = &iwl6050_ops, \ | 686 | .ops = &iwl6050_ops, \ |
685 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ | 687 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ |
686 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ | 688 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 36335b1b54d4..c1cfd9952e52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) | |||
1157 | /* only Re-enable if disabled by irq */ | 1157 | /* only Re-enable if disabled by irq */ |
1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1159 | iwl_enable_interrupts(priv); | 1159 | iwl_enable_interrupts(priv); |
1160 | /* Re-enable RF_KILL if it occurred */ | ||
1161 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1162 | iwl_enable_rfkill_int(priv); | ||
1160 | 1163 | ||
1161 | #ifdef CONFIG_IWLWIFI_DEBUG | 1164 | #ifdef CONFIG_IWLWIFI_DEBUG |
1162 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | 1165 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { |
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) | |||
1371 | /* only Re-enable if disabled by irq */ | 1374 | /* only Re-enable if disabled by irq */ |
1372 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1375 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1373 | iwl_enable_interrupts(priv); | 1376 | iwl_enable_interrupts(priv); |
1377 | /* Re-enable RF_KILL if it occurred */ | ||
1378 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1379 | iwl_enable_rfkill_int(priv); | ||
1374 | } | 1380 | } |
1375 | 1381 | ||
1376 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ | 1382 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
199 | while (i != idx) { | 199 | while (i != idx) { |
200 | u16 len; | 200 | u16 len; |
201 | struct sk_buff *skb; | 201 | struct sk_buff *skb; |
202 | dma_addr_t dma_addr; | ||
202 | desc = &ring[i]; | 203 | desc = &ring[i]; |
203 | len = le16_to_cpu(desc->len); | 204 | len = le16_to_cpu(desc->len); |
204 | skb = rx_buf[i]; | 205 | skb = rx_buf[i]; |
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
216 | 217 | ||
217 | len = priv->common.rx_mtu; | 218 | len = priv->common.rx_mtu; |
218 | } | 219 | } |
220 | dma_addr = le32_to_cpu(desc->host_addr); | ||
221 | pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, | ||
222 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
219 | skb_put(skb, len); | 223 | skb_put(skb, len); |
220 | 224 | ||
221 | if (p54_rx(dev, skb)) { | 225 | if (p54_rx(dev, skb)) { |
222 | pci_unmap_single(priv->pdev, | 226 | pci_unmap_single(priv->pdev, dma_addr, |
223 | le32_to_cpu(desc->host_addr), | 227 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); |
224 | priv->common.rx_mtu + 32, | ||
225 | PCI_DMA_FROMDEVICE); | ||
226 | rx_buf[i] = NULL; | 228 | rx_buf[i] = NULL; |
227 | desc->host_addr = 0; | 229 | desc->host_addr = cpu_to_le32(0); |
228 | } else { | 230 | } else { |
229 | skb_trim(skb, 0); | 231 | skb_trim(skb, 0); |
232 | pci_dma_sync_single_for_device(priv->pdev, dma_addr, | ||
233 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
230 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); | 234 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); |
231 | } | 235 | } |
232 | 236 | ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ | 98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ |
99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ | 99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ |
100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ | 100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ |
101 | {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ | ||
101 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ | 102 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ |
102 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ | 103 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ |
103 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ | 104 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2597 | __le32 mode; | 2597 | __le32 mode; |
2598 | int ret; | 2598 | int ret; |
2599 | 2599 | ||
2600 | if (priv->device_type != RNDIS_BCM4320B) | ||
2601 | return -ENOTSUPP; | ||
2602 | |||
2600 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, | 2603 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, |
2601 | enabled ? "enabled" : "disabled", | 2604 | enabled ? "enabled" : "disabled", |
2602 | timeout); | 2605 | timeout); |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, | |||
652 | */ | 652 | */ |
653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
654 | 654 | ||
655 | /* | ||
656 | * The hardware has already checked the Michael Mic and has | ||
657 | * stripped it from the frame. Signal this to mac80211. | ||
658 | */ | ||
659 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
660 | |||
655 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 661 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
656 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 662 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
657 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 663 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { | |||
1065 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1071 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1066 | #endif | 1072 | #endif |
1067 | #ifdef CONFIG_RT2800PCI_RT35XX | 1073 | #ifdef CONFIG_RT2800PCI_RT35XX |
1074 | { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1075 | { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1068 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1076 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1069 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1077 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1070 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1078 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, | |||
486 | */ | 486 | */ |
487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
488 | 488 | ||
489 | /* | ||
490 | * The hardware has already checked the Michael Mic and has | ||
491 | * stripped it from the frame. Signal this to mac80211. | ||
492 | */ | ||
493 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
494 | |||
489 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 495 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
490 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 496 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
491 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 497 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 012e1a4016fe..40372bac9482 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c | |||
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, | |||
1039 | 1039 | ||
1040 | if (changed & BSS_CHANGED_BEACON) { | 1040 | if (changed & BSS_CHANGED_BEACON) { |
1041 | beacon = ieee80211_beacon_get(hw, vif); | 1041 | beacon = ieee80211_beacon_get(hw, vif); |
1042 | if (!beacon) | ||
1043 | goto out_sleep; | ||
1044 | |||
1042 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, | 1045 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, |
1043 | beacon->len); | 1046 | beacon->len); |
1044 | 1047 | ||