aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c35
14 files changed, 119 insertions, 146 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 24220992413f..4833b6a9031c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2957 skb_shinfo(skb)->nr_frags + 2957 skb_shinfo(skb)->nr_frags +
2958 BDS_PER_TX_PKT + 2958 BDS_PER_TX_PKT +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 2960 /* Handle special storage cases separately */
2961 netif_tx_stop_queue(txq); 2961 if (txdata->tx_ring_size != 0) {
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2963 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964 netif_tx_stop_queue(txq);
2965 }
2966
2963 return NETDEV_TX_BUSY; 2967 return NETDEV_TX_BUSY;
2964 } 2968 }
2965 2969
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 71971a161bd1..614981c02264 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
126 /* Check if this request is ok */ 126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem); 127 rc = o->validate(bp, o->owner, elem);
128 if (rc) { 128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc); 129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130 goto free_and_exit; 130 goto free_and_exit;
131 } 131 }
132 } 132 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a4da893ac1e1..378988b5709a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -251,6 +251,8 @@ struct adapter_params {
251 unsigned char rev; /* chip revision */ 251 unsigned char rev; /* chip revision */
252 unsigned char offload; 252 unsigned char offload;
253 253
254 unsigned char bypass;
255
254 unsigned int ofldq_wr_cred; 256 unsigned int ofldq_wr_cred;
255}; 257};
256 258
@@ -642,6 +644,23 @@ extern int dbfifo_int_thresh;
642#define for_each_port(adapter, iter) \ 644#define for_each_port(adapter, iter) \
643 for (iter = 0; iter < (adapter)->params.nports; ++iter) 645 for (iter = 0; iter < (adapter)->params.nports; ++iter)
644 646
647static inline int is_bypass(struct adapter *adap)
648{
649 return adap->params.bypass;
650}
651
652static inline int is_bypass_device(int device)
653{
654 /* this should be set based upon device capabilities */
655 switch (device) {
656 case 0x440b:
657 case 0x440c:
658 return 1;
659 default:
660 return 0;
661 }
662}
663
645static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 664static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
646{ 665{
647 return adap->params.vpd.cclk / 1000; 666 return adap->params.vpd.cclk / 1000;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604f4f87f550..c1cde11b0c6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3513,18 +3513,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3513 if (ret < 0) 3513 if (ret < 0)
3514 goto bye; 3514 goto bye;
3515 3515
3516#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3517 /*
3518 * If we're a pure NIC driver then disable all offloading facilities.
3519 * This will allow the firmware to optimize aspects of the hardware
3520 * configuration which will result in improved performance.
3521 */
3522 caps_cmd.ofldcaps = 0;
3523 caps_cmd.iscsicaps = 0;
3524 caps_cmd.rdmacaps = 0;
3525 caps_cmd.fcoecaps = 0;
3526#endif
3527
3528 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3516 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3529 if (!vf_acls) 3517 if (!vf_acls)
3530 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3518 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
@@ -3745,6 +3733,7 @@ static int adap_init0(struct adapter *adap)
3745 u32 v, port_vec; 3733 u32 v, port_vec;
3746 enum dev_state state; 3734 enum dev_state state;
3747 u32 params[7], val[7]; 3735 u32 params[7], val[7];
3736 struct fw_caps_config_cmd caps_cmd;
3748 int reset = 1, j; 3737 int reset = 1, j;
3749 3738
3750 /* 3739 /*
@@ -3898,6 +3887,9 @@ static int adap_init0(struct adapter *adap)
3898 goto bye; 3887 goto bye;
3899 } 3888 }
3900 3889
3890 if (is_bypass_device(adap->pdev->device))
3891 adap->params.bypass = 1;
3892
3901 /* 3893 /*
3902 * Grab some of our basic fundamental operating parameters. 3894 * Grab some of our basic fundamental operating parameters.
3903 */ 3895 */
@@ -3940,13 +3932,12 @@ static int adap_init0(struct adapter *adap)
3940 adap->tids.aftid_end = val[1]; 3932 adap->tids.aftid_end = val[1];
3941 } 3933 }
3942 3934
3943#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3944 /* 3935 /*
3945 * Get device capabilities so we can determine what resources we need 3936 * Get device capabilities so we can determine what resources we need
3946 * to manage. 3937 * to manage.
3947 */ 3938 */
3948 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3939 memset(&caps_cmd, 0, sizeof(caps_cmd));
3949 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3940 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3950 FW_CMD_REQUEST | FW_CMD_READ); 3941 FW_CMD_REQUEST | FW_CMD_READ);
3951 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3942 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3952 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3943 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
@@ -3991,15 +3982,6 @@ static int adap_init0(struct adapter *adap)
3991 adap->vres.ddp.size = val[4] - val[3] + 1; 3982 adap->vres.ddp.size = val[4] - val[3] + 1;
3992 adap->params.ofldq_wr_cred = val[5]; 3983 adap->params.ofldq_wr_cred = val[5];
3993 3984
3994 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3995 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3996 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3997 params, val);
3998 if ((val[0] != val[1]) && (ret >= 0)) {
3999 adap->tids.uotid_base = val[0];
4000 adap->tids.nuotids = val[1] - val[0] + 1;
4001 }
4002
4003 adap->params.offload = 1; 3985 adap->params.offload = 1;
4004 } 3986 }
4005 if (caps_cmd.rdmacaps) { 3987 if (caps_cmd.rdmacaps) {
@@ -4048,7 +4030,6 @@ static int adap_init0(struct adapter *adap)
4048 } 4030 }
4049#undef FW_PARAM_PFVF 4031#undef FW_PARAM_PFVF
4050#undef FW_PARAM_DEV 4032#undef FW_PARAM_DEV
4051#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
4052 4033
4053 /* 4034 /*
4054 * These are finalized by FW initialization, load their values now. 4035 * These are finalized by FW initialization, load their values now.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b899fea1a91..39bec73ff87c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -102,6 +102,9 @@ struct tid_info {
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base; 103 unsigned int aftid_base;
104 unsigned int aftid_end; 104 unsigned int aftid_end;
105 /* Server filter region */
106 unsigned int sftid_base;
107 unsigned int nsftids;
105 108
106 spinlock_t atid_lock ____cacheline_aligned_in_smp; 109 spinlock_t atid_lock ____cacheline_aligned_in_smp;
107 union aopen_entry *afree; 110 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b9db0e040563..2e5daee0438a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
478 pr_err("no resource\n"); 478 pr_err("no resource\n");
479 goto no_resource; 479 goto no_resource;
480 } 480 }
481 if (request_resource(&ioport_resource, etsects->rsrc)) { 481 if (request_resource(&iomem_resource, etsects->rsrc)) {
482 pr_err("resource busy\n"); 482 pr_err("resource busy\n");
483 goto no_resource; 483 goto no_resource;
484 } 484 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10e3a6de09f..b35094c590ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
143 mlx4_bf_free(mdev->dev, &ring->bf); 143 mlx4_bf_free(mdev->dev, &ring->bf);
144 mlx4_qp_remove(mdev->dev, &ring->qp); 144 mlx4_qp_remove(mdev->dev, &ring->qp);
145 mlx4_qp_free(mdev->dev, &ring->qp); 145 mlx4_qp_free(mdev->dev, &ring->qp);
146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
147 mlx4_en_unmap_buffer(&ring->wqres.buf); 146 mlx4_en_unmap_buffer(&ring->wqres.buf);
148 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 147 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
149 kfree(ring->bounce_buf); 148 kfree(ring->bounce_buf);
@@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 711 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 712 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 713
715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 714 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 715 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
717 op_own |= htonl((bf_index & 0xffff) << 8); 716 op_own |= htonl((bf_index & 0xffff) << 8);
718 /* Ensure new descirptor hits memory 717 /* Ensure new descirptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 51c764901ad2..b84a88bc44dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
329 ctx = &priv->mfunc.master.slave_state[slave]; 329 ctx = &priv->mfunc.master.slave_state[slave];
330 spin_lock_irqsave(&ctx->lock, flags); 330 spin_lock_irqsave(&ctx->lock, flags);
331 331
332 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
333 __func__, slave, cur_state, event);
334
335 switch (cur_state) { 332 switch (cur_state) {
336 case SLAVE_PORT_DOWN: 333 case SLAVE_PORT_DOWN:
337 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 334 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
@@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
366 goto out; 363 goto out;
367 } 364 }
368 ret = mlx4_get_slave_port_state(dev, slave, port); 365 ret = mlx4_get_slave_port_state(dev, slave, port);
369 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
370 " :%d gen_event: %d\n",
371 __func__, slave, cur_state, event, *gen_event);
372 366
373out: 367out:
374 spin_unlock_irqrestore(&ctx->lock, flags); 368 spin_unlock_irqrestore(&ctx->lock, flags);
@@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
843 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 837 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
844} 838}
845 839
840static void mlx4_unmap_uar(struct mlx4_dev *dev)
841{
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int i;
844
845 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
846 if (priv->eq_table.uar_map[i]) {
847 iounmap(priv->eq_table.uar_map[i]);
848 priv->eq_table.uar_map[i] = NULL;
849 }
850}
851
846static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 852static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
847 u8 intr, struct mlx4_eq *eq) 853 u8 intr, struct mlx4_eq *eq)
848{ 854{
@@ -1207,6 +1213,7 @@ err_out_unmap:
1207 mlx4_free_irqs(dev); 1213 mlx4_free_irqs(dev);
1208 1214
1209err_out_bitmap: 1215err_out_bitmap:
1216 mlx4_unmap_uar(dev);
1210 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1217 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1211 1218
1212err_out_free: 1219err_out_free:
@@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1231 if (!mlx4_is_slave(dev)) 1238 if (!mlx4_is_slave(dev))
1232 mlx4_unmap_clr_int(dev); 1239 mlx4_unmap_clr_int(dev);
1233 1240
1234 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1241 mlx4_unmap_uar(dev);
1235 if (priv->eq_table.uar_map[i])
1236 iounmap(priv->eq_table.uar_map[i]);
1237
1238 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1242 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1239 1243
1240 kfree(priv->eq_table.uar_map); 1244 kfree(priv->eq_table.uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 80df2ab0177c..2aa80afd98d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1405,7 +1405,10 @@ unmap_bf:
1405 unmap_bf_area(dev); 1405 unmap_bf_area(dev);
1406 1406
1407err_close: 1407err_close:
1408 mlx4_close_hca(dev); 1408 if (mlx4_is_slave(dev))
1409 mlx4_slave_exit(dev);
1410 else
1411 mlx4_CLOSE_HCA(dev, 0);
1409 1412
1410err_free_icm: 1413err_free_icm:
1411 if (!mlx4_is_slave(dev)) 1414 if (!mlx4_is_slave(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 926c911c0ac4..b05705f50f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave,
330 330
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index; 332 *(u8 *)(inbox->buf + 35) = new_index;
333
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
336} 333}
337 334
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 335static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
@@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 348 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F; 349 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 } 350 }
354
355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356 slave, qp_ctx->pri_path.mgid_index);
357} 351}
358 352
359static int mpt_mask(struct mlx4_dev *dev) 353static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b2a94d02a521..4c4fe5b1a29a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
339} 339}
340 340
341/** 341/**
342 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
343 * @reg: Pointer of register
344 * @busy: Busy bit
345 */
346static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
347{
348 u32 tmp;
349 int ret = -1;
350 /* wait busy */
351 tmp = 20;
352 while ((ioread32(reg) & bit) && --tmp)
353 udelay(5);
354 if (!tmp)
355 pr_err("Error: busy bit is not cleared\n");
356 else
357 ret = 0;
358 return ret;
359}
360
361/**
362 * pch_gbe_mac_mar_set - Set MAC address register 342 * pch_gbe_mac_mar_set - Set MAC address register
363 * @hw: Pointer to the HW structure 343 * @hw: Pointer to the HW structure
364 * @addr: Pointer to the MAC address 344 * @addr: Pointer to the MAC address
@@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
409 return; 389 return;
410} 390}
411 391
412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 392static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
413{ 393{
414 /* Read the MAC addresses. and store to the private data */ 394 u32 rctl;
415 pch_gbe_mac_read_mac_addr(hw); 395 /* Disables Receive MAC */
416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 396 rctl = ioread32(&hw->reg->MAC_RX_EN);
417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 397 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
418 /* Setup the MAC addresses */ 398}
419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 399
420 return; 400static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
401{
402 u32 rctl;
403 /* Enables Receive MAC */
404 rctl = ioread32(&hw->reg->MAC_RX_EN);
405 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
421} 406}
422 407
423/** 408/**
@@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
913static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) 898static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
914{ 899{
915 struct pch_gbe_hw *hw = &adapter->hw; 900 struct pch_gbe_hw *hw = &adapter->hw;
916 u32 rdba, rdlen, rctl, rxdma; 901 u32 rdba, rdlen, rxdma;
917 902
918 pr_debug("dma adr = 0x%08llx size = 0x%08x\n", 903 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
919 (unsigned long long)adapter->rx_ring->dma, 904 (unsigned long long)adapter->rx_ring->dma,
@@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
921 906
922 pch_gbe_mac_force_mac_fc(hw); 907 pch_gbe_mac_force_mac_fc(hw);
923 908
924 /* Disables Receive MAC */ 909 pch_gbe_disable_mac_rx(hw);
925 rctl = ioread32(&hw->reg->MAC_RX_EN);
926 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
927 910
928 /* Disables Receive DMA */ 911 /* Disables Receive DMA */
929 rxdma = ioread32(&hw->reg->DMA_CTRL); 912 rxdma = ioread32(&hw->reg->DMA_CTRL);
@@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1316 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1299 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1317} 1300}
1318 1301
1319static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1302static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1320{ 1303{
1321 struct pch_gbe_hw *hw = &adapter->hw;
1322 u32 rxdma; 1304 u32 rxdma;
1323 u16 value;
1324 int ret;
1325 1305
1326 /* Disable Receive DMA */ 1306 /* Disable Receive DMA */
1327 rxdma = ioread32(&hw->reg->DMA_CTRL); 1307 rxdma = ioread32(&hw->reg->DMA_CTRL);
1328 rxdma &= ~PCH_GBE_RX_DMA_EN; 1308 rxdma &= ~PCH_GBE_RX_DMA_EN;
1329 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1309 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1330 /* Wait Rx DMA BUS is IDLE */
1331 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1332 if (ret) {
1333 /* Disable Bus master */
1334 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1335 value &= ~PCI_COMMAND_MASTER;
1336 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1337 /* Stop Receive */
1338 pch_gbe_mac_reset_rx(hw);
1339 /* Enable Bus master */
1340 value |= PCI_COMMAND_MASTER;
1341 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1342 } else {
1343 /* Stop Receive */
1344 pch_gbe_mac_reset_rx(hw);
1345 }
1346 /* reprogram multicast address register after reset */
1347 pch_gbe_set_multi(adapter->netdev);
1348} 1310}
1349 1311
1350static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1312static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1351{ 1313{
1352 u32 rxdma; 1314 u32 rxdma;
1353 1315
@@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1355 rxdma = ioread32(&hw->reg->DMA_CTRL); 1317 rxdma = ioread32(&hw->reg->DMA_CTRL);
1356 rxdma |= PCH_GBE_RX_DMA_EN; 1318 rxdma |= PCH_GBE_RX_DMA_EN;
1357 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1319 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1358 /* Enables Receive */
1359 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1360 return;
1361} 1320}
1362 1321
1363/** 1322/**
@@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1393 int_en = ioread32(&hw->reg->INT_EN); 1352 int_en = ioread32(&hw->reg->INT_EN);
1394 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1353 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1395 &hw->reg->INT_EN); 1354 &hw->reg->INT_EN);
1396 pch_gbe_stop_receive(adapter); 1355 pch_gbe_disable_dma_rx(&adapter->hw);
1397 int_st |= ioread32(&hw->reg->INT_ST); 1356 int_st |= ioread32(&hw->reg->INT_ST);
1398 int_st = int_st & ioread32(&hw->reg->INT_EN); 1357 int_st = int_st & ioread32(&hw->reg->INT_EN);
1399 } 1358 }
@@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1971 struct net_device *netdev = adapter->netdev; 1930 struct net_device *netdev = adapter->netdev;
1972 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 1931 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1973 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1932 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1974 int err; 1933 int err = -EINVAL;
1975 1934
1976 /* Ensure we have a valid MAC */ 1935 /* Ensure we have a valid MAC */
1977 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1936 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1978 pr_err("Error: Invalid MAC address\n"); 1937 pr_err("Error: Invalid MAC address\n");
1979 return -EINVAL; 1938 goto out;
1980 } 1939 }
1981 1940
1982 /* hardware has been reset, we need to reload some things */ 1941 /* hardware has been reset, we need to reload some things */
@@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1989 1948
1990 err = pch_gbe_request_irq(adapter); 1949 err = pch_gbe_request_irq(adapter);
1991 if (err) { 1950 if (err) {
1992 pr_err("Error: can't bring device up\n"); 1951 pr_err("Error: can't bring device up - irq request failed\n");
1993 return err; 1952 goto out;
1994 } 1953 }
1995 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1954 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1996 if (err) { 1955 if (err) {
1997 pr_err("Error: can't bring device up\n"); 1956 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1998 return err; 1957 goto freeirq;
1999 } 1958 }
2000 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1959 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
2001 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1960 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
2002 adapter->tx_queue_len = netdev->tx_queue_len; 1961 adapter->tx_queue_len = netdev->tx_queue_len;
2003 pch_gbe_start_receive(&adapter->hw); 1962 pch_gbe_enable_dma_rx(&adapter->hw);
1963 pch_gbe_enable_mac_rx(&adapter->hw);
2004 1964
2005 mod_timer(&adapter->watchdog_timer, jiffies); 1965 mod_timer(&adapter->watchdog_timer, jiffies);
2006 1966
@@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
2009 netif_start_queue(adapter->netdev); 1969 netif_start_queue(adapter->netdev);
2010 1970
2011 return 0; 1971 return 0;
1972
1973freeirq:
1974 pch_gbe_free_irq(adapter);
1975out:
1976 return err;
2012} 1977}
2013 1978
2014/** 1979/**
@@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2405 int work_done = 0; 2370 int work_done = 0;
2406 bool poll_end_flag = false; 2371 bool poll_end_flag = false;
2407 bool cleaned = false; 2372 bool cleaned = false;
2408 u32 int_en;
2409 2373
2410 pr_debug("budget : %d\n", budget); 2374 pr_debug("budget : %d\n", budget);
2411 2375
@@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2422 2386
2423 if (poll_end_flag) { 2387 if (poll_end_flag) {
2424 napi_complete(napi); 2388 napi_complete(napi);
2425 if (adapter->rx_stop_flag) {
2426 adapter->rx_stop_flag = false;
2427 pch_gbe_start_receive(&adapter->hw);
2428 }
2429 pch_gbe_irq_enable(adapter); 2389 pch_gbe_irq_enable(adapter);
2430 } else 2390 }
2431 if (adapter->rx_stop_flag) { 2391
2432 adapter->rx_stop_flag = false; 2392 if (adapter->rx_stop_flag) {
2433 pch_gbe_start_receive(&adapter->hw); 2393 adapter->rx_stop_flag = false;
2434 int_en = ioread32(&adapter->hw.reg->INT_EN); 2394 pch_gbe_enable_dma_rx(&adapter->hw);
2435 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2395 }
2436 &adapter->hw.reg->INT_EN);
2437 }
2438 2396
2439 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2397 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2440 poll_end_flag, work_done, budget); 2398 poll_end_flag, work_done, budget);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index df09b1cb742f..6407d0d77e81 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2525 qdev->req_q_size = 2525 qdev->req_q_size =
2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2527 2527
2528 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2529
2530 /* The barrier is required to ensure request and response queue
2531 * addr writes to the registers.
2532 */
2533 wmb();
2534
2528 qdev->req_q_virt_addr = 2535 qdev->req_q_virt_addr =
2529 pci_alloc_consistent(qdev->pdev, 2536 pci_alloc_consistent(qdev->pdev,
2530 (size_t) qdev->req_q_size, 2537 (size_t) qdev->req_q_size,
@@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2536 return -ENOMEM; 2543 return -ENOMEM;
2537 } 2544 }
2538 2545
2539 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2540
2541 qdev->rsp_q_virt_addr = 2546 qdev->rsp_q_virt_addr =
2542 pci_alloc_consistent(qdev->pdev, 2547 pci_alloc_consistent(qdev->pdev,
2543 (size_t) qdev->rsp_q_size, 2548 (size_t) qdev->rsp_q_size,
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b26cbda5efa9..2c41894d5472 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_TI 5config NET_VENDOR_TI
6 bool "Texas Instruments (TI) devices" 6 bool "Texas Instruments (TI) devices"
7 default y 7 default y
8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) 8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e2a1628484d..4e9810013850 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb)
1334{ 1334{
1335 struct skb_shared_info *sh = skb_shinfo(skb); 1335 struct skb_shared_info *sh = skb_shinfo(skb);
1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1337 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1337 unsigned int data_len = skb->len - sh_len;
1338 unsigned int p_len = sh->gso_size; 1338 unsigned int p_len = sh->gso_size;
1339 long f_id = -1; /* id of the current fragment */ 1339 long f_id = -1; /* id of the current fragment */
1340 long f_size = skb->hdr_len; /* size of the current fragment */ 1340 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1341 long f_used = sh_len; /* bytes used from the current fragment */ 1341 long f_used = 0; /* bytes used from the current fragment */
1342 long n; /* size of the current piece of payload */ 1342 long n; /* size of the current piece of payload */
1343 int num_edescs = 0; 1343 int num_edescs = 0;
1344 int segment; 1344 int segment;
@@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb)
1353 /* Advance as needed. */ 1353 /* Advance as needed. */
1354 while (f_used >= f_size) { 1354 while (f_used >= f_size) {
1355 f_id++; 1355 f_id++;
1356 f_size = sh->frags[f_id].size; 1356 f_size = skb_frag_size(&sh->frags[f_id]);
1357 f_used = 0; 1357 f_used = 0;
1358 } 1358 }
1359 1359
@@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1384 struct iphdr *ih; 1384 struct iphdr *ih;
1385 struct tcphdr *th; 1385 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1387 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1388 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1389 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1390 unsigned int isum_seed, tsum_seed, id, seq;
1391 long f_id = -1; /* id of the current fragment */ 1391 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb->hdr_len; /* size of the current fragment */ 1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = sh_len; /* bytes used from the current fragment */ 1393 long f_used = 0; /* bytes used from the current fragment */
1394 long n; /* size of the current piece of payload */ 1394 long n; /* size of the current piece of payload */
1395 int segment; 1395 int segment;
1396 1396
@@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1405 isum_seed = ((0xFFFF - ih->check) + 1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) + 1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id)); 1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); 1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id); 1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1410 seq = ntohl(th->seq);
1411 1411
@@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1444 /* Advance as needed. */ 1444 /* Advance as needed. */
1445 while (f_used >= f_size) { 1445 while (f_used >= f_size) {
1446 f_id++; 1446 f_id++;
1447 f_size = sh->frags[f_id].size; 1447 f_size = skb_frag_size(&sh->frags[f_id]);
1448 f_used = 0; 1448 f_used = 0;
1449 } 1449 }
1450 1450
@@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1478 struct tile_net_priv *priv = netdev_priv(dev); 1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1479 struct skb_shared_info *sh = skb_shinfo(skb);
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1481 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1482 unsigned int p_len = sh->gso_size;
1483 gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1483 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1484 gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1484 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1485 long f_id = -1; /* id of the current fragment */ 1485 long f_id = -1; /* id of the current fragment */
1486 long f_size = skb->hdr_len; /* size of the current fragment */ 1486 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1487 long f_used = sh_len; /* bytes used from the current fragment */ 1487 long f_used = 0; /* bytes used from the current fragment */
1488 void *f_data = skb->data; 1488 void *f_data = skb->data + sh_len;
1489 long n; /* size of the current piece of payload */ 1489 long n; /* size of the current piece of payload */
1490 unsigned long tx_packets = 0, tx_bytes = 0; 1490 unsigned long tx_packets = 0, tx_bytes = 0;
1491 unsigned int csum_start; 1491 unsigned int csum_start;
@@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1516 1516
1517 /* Egress the payload. */ 1517 /* Egress the payload. */
1518 while (p_used < p_len) { 1518 while (p_used < p_len) {
1519 void *va;
1519 1520
1520 /* Advance as needed. */ 1521 /* Advance as needed. */
1521 while (f_used >= f_size) { 1522 while (f_used >= f_size) {
1522 f_id++; 1523 f_id++;
1523 f_size = sh->frags[f_id].size; 1524 f_size = skb_frag_size(&sh->frags[f_id]);
1524 f_used = 0;
1525 f_data = tile_net_frag_buf(&sh->frags[f_id]); 1525 f_data = tile_net_frag_buf(&sh->frags[f_id]);
1526 f_used = 0;
1526 } 1527 }
1527 1528
1529 va = f_data + f_used;
1530
1528 /* Use bytes from the current fragment. */ 1531 /* Use bytes from the current fragment. */
1529 n = p_len - p_used; 1532 n = p_len - p_used;
1530 if (n > f_size - f_used) 1533 if (n > f_size - f_used)
@@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1533 p_used += n; 1536 p_used += n;
1534 1537
1535 /* Egress a piece of the payload. */ 1538 /* Egress a piece of the payload. */
1536 edesc_body.va = va_to_tile_io_addr(f_data) + f_used; 1539 edesc_body.va = va_to_tile_io_addr(va);
1537 edesc_body.xfer_size = n; 1540 edesc_body.xfer_size = n;
1538 edesc_body.bound = !(p_used < p_len); 1541 edesc_body.bound = !(p_used < p_len);
1539 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1542 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);