aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichał Mirosław <mirq-linux@rere.qmqm.pl>2011-04-18 09:31:21 -0400
committerDavid S. Miller <davem@davemloft.net>2011-04-19 02:03:59 -0400
commitfeb990d467f76abe90ae68437eb1db351e67c674 (patch)
treeb08458ac6f8be26055bbc0731cb103461fb7c902
parent30f554f925335abad89aaa38eec6828242b27527 (diff)
net: vxge: convert to hw_features
Side effect: ->gro_enable is removed as napi_gro_receive() does the fallback itself. Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/vxge/vxge-ethtool.c72
-rw-r--r--drivers/net/vxge/vxge-main.c100
-rw-r--r--drivers/net/vxge/vxge-main.h14
3 files changed, 59 insertions, 127 deletions
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 43c458323f83..5aef6c893aee 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1071,35 +1071,6 @@ static int vxge_ethtool_get_regs_len(struct net_device *dev)
1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1072} 1072}
1073 1073
1074static u32 vxge_get_rx_csum(struct net_device *dev)
1075{
1076 struct vxgedev *vdev = netdev_priv(dev);
1077
1078 return vdev->rx_csum;
1079}
1080
1081static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1082{
1083 struct vxgedev *vdev = netdev_priv(dev);
1084
1085 if (data)
1086 vdev->rx_csum = 1;
1087 else
1088 vdev->rx_csum = 0;
1089
1090 return 0;
1091}
1092
1093static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1094{
1095 if (data)
1096 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1097 else
1098 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1099
1100 return 0;
1101}
1102
1103static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) 1074static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1104{ 1075{
1105 struct vxgedev *vdev = netdev_priv(dev); 1076 struct vxgedev *vdev = netdev_priv(dev);
@@ -1119,40 +1090,6 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1119 } 1090 }
1120} 1091}
1121 1092
1122static int vxge_set_flags(struct net_device *dev, u32 data)
1123{
1124 struct vxgedev *vdev = netdev_priv(dev);
1125 enum vxge_hw_status status;
1126
1127 if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
1128 return -EINVAL;
1129
1130 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1131 return 0;
1132
1133 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1134 return -EINVAL;
1135
1136 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1137
1138 /* Enabling RTH requires some of the logic in vxge_device_register and a
1139 * vpath reset. Due to these restrictions, only allow modification
1140 * while the interface is down.
1141 */
1142 status = vxge_reset_all_vpaths(vdev);
1143 if (status != VXGE_HW_OK) {
1144 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1145 return -EFAULT;
1146 }
1147
1148 if (vdev->devh->config.rth_en)
1149 dev->features |= NETIF_F_RXHASH;
1150 else
1151 dev->features &= ~NETIF_F_RXHASH;
1152
1153 return 0;
1154}
1155
1156static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) 1093static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1157{ 1094{
1158 struct vxgedev *vdev = netdev_priv(dev); 1095 struct vxgedev *vdev = netdev_priv(dev);
@@ -1181,19 +1118,10 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1181 .get_link = ethtool_op_get_link, 1118 .get_link = ethtool_op_get_link,
1182 .get_pauseparam = vxge_ethtool_getpause_data, 1119 .get_pauseparam = vxge_ethtool_getpause_data,
1183 .set_pauseparam = vxge_ethtool_setpause_data, 1120 .set_pauseparam = vxge_ethtool_setpause_data,
1184 .get_rx_csum = vxge_get_rx_csum,
1185 .set_rx_csum = vxge_set_rx_csum,
1186 .get_tx_csum = ethtool_op_get_tx_csum,
1187 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1188 .get_sg = ethtool_op_get_sg,
1189 .set_sg = ethtool_op_set_sg,
1190 .get_tso = ethtool_op_get_tso,
1191 .set_tso = vxge_ethtool_op_set_tso,
1192 .get_strings = vxge_ethtool_get_strings, 1121 .get_strings = vxge_ethtool_get_strings,
1193 .set_phys_id = vxge_ethtool_idnic, 1122 .set_phys_id = vxge_ethtool_idnic,
1194 .get_sset_count = vxge_ethtool_get_sset_count, 1123 .get_sset_count = vxge_ethtool_get_sset_count,
1195 .get_ethtool_stats = vxge_get_ethtool_stats, 1124 .get_ethtool_stats = vxge_get_ethtool_stats,
1196 .set_flags = vxge_set_flags,
1197 .flash_device = vxge_fw_flash, 1125 .flash_device = vxge_fw_flash,
1198}; 1126};
1199 1127
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index d192dad8ff21..fc837cf6bd4d 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -304,22 +304,14 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
304 "%s: %s:%d skb protocol = %d", 304 "%s: %s:%d skb protocol = %d",
305 ring->ndev->name, __func__, __LINE__, skb->protocol); 305 ring->ndev->name, __func__, __LINE__, skb->protocol);
306 306
307 if (ring->gro_enable) { 307 if (ring->vlgrp && ext_info->vlan &&
308 if (ring->vlgrp && ext_info->vlan && 308 (ring->vlan_tag_strip ==
309 (ring->vlan_tag_strip == 309 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 310 vlan_gro_receive(ring->napi_p, ring->vlgrp,
311 vlan_gro_receive(ring->napi_p, ring->vlgrp, 311 ext_info->vlan, skb);
312 ext_info->vlan, skb); 312 else
313 else 313 napi_gro_receive(ring->napi_p, skb);
314 napi_gro_receive(ring->napi_p, skb); 314
315 } else {
316 if (ring->vlgrp && vlan &&
317 (ring->vlan_tag_strip ==
318 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
319 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
320 else
321 netif_receive_skb(skb);
322 }
323 vxge_debug_entryexit(VXGE_TRACE, 315 vxge_debug_entryexit(VXGE_TRACE,
324 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 316 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
325} 317}
@@ -490,7 +482,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
490 482
491 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && 483 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
492 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && 484 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
493 ring->rx_csum && /* Offload Rx side CSUM */ 485 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
494 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && 486 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
495 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 487 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
496 skb->ip_summed = CHECKSUM_UNNECESSARY; 488 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2094,11 +2086,9 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2094 vdev->config.fifo_indicate_max_pkts; 2086 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0; 2087 vpath->fifo.tx_vector_no = 0;
2096 vpath->ring.rx_vector_no = 0; 2088 vpath->ring.rx_vector_no = 0;
2097 vpath->ring.rx_csum = vdev->rx_csum;
2098 vpath->ring.rx_hwts = vdev->rx_hwts; 2089 vpath->ring.rx_hwts = vdev->rx_hwts;
2099 vpath->is_open = 1; 2090 vpath->is_open = 1;
2100 vdev->vp_handles[i] = vpath->handle; 2091 vdev->vp_handles[i] = vpath->handle;
2101 vpath->ring.gro_enable = vdev->config.gro_enable;
2102 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; 2092 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2103 vdev->stats.vpaths_open++; 2093 vdev->stats.vpaths_open++;
2104 } else { 2094 } else {
@@ -2670,6 +2660,40 @@ static void vxge_poll_vp_lockup(unsigned long data)
2670 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); 2660 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2671} 2661}
2672 2662
2663static u32 vxge_fix_features(struct net_device *dev, u32 features)
2664{
2665 u32 changed = dev->features ^ features;
2666
2667 /* Enabling RTH requires some of the logic in vxge_device_register and a
2668 * vpath reset. Due to these restrictions, only allow modification
2669 * while the interface is down.
2670 */
2671 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2672 features ^= NETIF_F_RXHASH;
2673
2674 return features;
2675}
2676
2677static int vxge_set_features(struct net_device *dev, u32 features)
2678{
2679 struct vxgedev *vdev = netdev_priv(dev);
2680 u32 changed = dev->features ^ features;
2681
2682 if (!(changed & NETIF_F_RXHASH))
2683 return 0;
2684
2685 /* !netif_running() ensured by vxge_fix_features() */
2686
2687 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2688 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2689 dev->features = features ^ NETIF_F_RXHASH;
2690 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2691 return -EIO;
2692 }
2693
2694 return 0;
2695}
2696
2673/** 2697/**
2674 * vxge_open 2698 * vxge_open
2675 * @dev: pointer to the device structure. 2699 * @dev: pointer to the device structure.
@@ -3369,6 +3393,8 @@ static const struct net_device_ops vxge_netdev_ops = {
3369 .ndo_do_ioctl = vxge_ioctl, 3393 .ndo_do_ioctl = vxge_ioctl,
3370 .ndo_set_mac_address = vxge_set_mac_addr, 3394 .ndo_set_mac_address = vxge_set_mac_addr,
3371 .ndo_change_mtu = vxge_change_mtu, 3395 .ndo_change_mtu = vxge_change_mtu,
3396 .ndo_fix_features = vxge_fix_features,
3397 .ndo_set_features = vxge_set_features,
3372 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3398 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3373 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3399 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3374 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3400 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
@@ -3415,14 +3441,21 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3415 vdev->devh = hldev; 3441 vdev->devh = hldev;
3416 vdev->pdev = hldev->pdev; 3442 vdev->pdev = hldev->pdev;
3417 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3443 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3418 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3419 vdev->rx_hwts = 0; 3444 vdev->rx_hwts = 0;
3420 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); 3445 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3421 3446
3422 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3447 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3423 3448
3424 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 3449 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3425 NETIF_F_HW_VLAN_FILTER; 3450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3451 NETIF_F_TSO | NETIF_F_TSO6 |
3452 NETIF_F_HW_VLAN_TX;
3453 if (vdev->config.rth_steering != NO_STEERING)
3454 ndev->hw_features |= NETIF_F_RXHASH;
3455
3456 ndev->features |= ndev->hw_features |
3457 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3458
3426 /* Driver entry points */ 3459 /* Driver entry points */
3427 ndev->irq = vdev->pdev->irq; 3460 ndev->irq = vdev->pdev->irq;
3428 ndev->base_addr = (unsigned long) hldev->bar0; 3461 ndev->base_addr = (unsigned long) hldev->bar0;
@@ -3434,11 +3467,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3434 3467
3435 vxge_initialize_ethtool_ops(ndev); 3468 vxge_initialize_ethtool_ops(ndev);
3436 3469
3437 if (vdev->config.rth_steering != NO_STEERING) {
3438 ndev->features |= NETIF_F_RXHASH;
3439 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3440 }
3441
3442 /* Allocate memory for vpath */ 3470 /* Allocate memory for vpath */
3443 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3471 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3444 no_of_vpath, GFP_KERNEL); 3472 no_of_vpath, GFP_KERNEL);
@@ -3450,9 +3478,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3450 goto _out1; 3478 goto _out1;
3451 } 3479 }
3452 3480
3453 ndev->features |= NETIF_F_SG;
3454
3455 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3456 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3457 "%s : checksuming enabled", __func__); 3482 "%s : checksuming enabled", __func__);
3458 3483
@@ -3462,11 +3487,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3462 "%s : using High DMA", __func__); 3487 "%s : using High DMA", __func__);
3463 } 3488 }
3464 3489
3465 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3466
3467 if (vdev->config.gro_enable)
3468 ndev->features |= NETIF_F_GRO;
3469
3470 ret = register_netdev(ndev); 3490 ret = register_netdev(ndev);
3471 if (ret) { 3491 if (ret) {
3472 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3492 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
@@ -3996,15 +4016,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3996 vdev->config.tx_steering_type = 0; 4016 vdev->config.tx_steering_type = 0;
3997 } 4017 }
3998 4018
3999 if (vdev->config.gro_enable) {
4000 vxge_debug_init(VXGE_ERR,
4001 "%s: Generic receive offload enabled",
4002 vdev->ndev->name);
4003 } else
4004 vxge_debug_init(VXGE_TRACE,
4005 "%s: Generic receive offload disabled",
4006 vdev->ndev->name);
4007
4008 if (vdev->config.addr_learn_en) 4019 if (vdev->config.addr_learn_en)
4009 vxge_debug_init(VXGE_TRACE, 4020 vxge_debug_init(VXGE_TRACE,
4010 "%s: MAC Address learning enabled", vdev->ndev->name); 4021 "%s: MAC Address learning enabled", vdev->ndev->name);
@@ -4589,7 +4600,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4589 /* set private device info */ 4600 /* set private device info */
4590 pci_set_drvdata(pdev, hldev); 4601 pci_set_drvdata(pdev, hldev);
4591 4602
4592 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4593 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4603 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4594 ll_config->addr_learn_en = addr_learn_en; 4604 ll_config->addr_learn_en = addr_learn_en;
4595 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4605 ll_config->rth_algorithm = RTH_ALG_JENKINS;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 40474f0da576..ed120aba443d 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -168,9 +168,6 @@ struct vxge_config {
168 168
169#define NEW_NAPI_WEIGHT 64 169#define NEW_NAPI_WEIGHT 64
170 int napi_weight; 170 int napi_weight;
171#define VXGE_GRO_DONOT_AGGREGATE 0
172#define VXGE_GRO_ALWAYS_AGGREGATE 1
173 int gro_enable;
174 int intr_type; 171 int intr_type;
175#define INTA 0 172#define INTA 0
176#define MSI 1 173#define MSI 1
@@ -290,13 +287,11 @@ struct vxge_ring {
290 unsigned long interrupt_count; 287 unsigned long interrupt_count;
291 unsigned long jiffies; 288 unsigned long jiffies;
292 289
293 /* copy of the flag indicating whether rx_csum is to be used */ 290 /* copy of the flag indicating whether rx_hwts is to be used */
294 u32 rx_csum:1, 291 u32 rx_hwts:1;
295 rx_hwts:1;
296 292
297 int pkts_processed; 293 int pkts_processed;
298 int budget; 294 int budget;
299 int gro_enable;
300 295
301 struct napi_struct napi; 296 struct napi_struct napi;
302 struct napi_struct *napi_p; 297 struct napi_struct *napi_p;
@@ -369,9 +364,8 @@ struct vxgedev {
369 */ 364 */
370 u16 all_multi_flg; 365 u16 all_multi_flg;
371 366
372 /* A flag indicating whether rx_csum is to be used or not. */ 367 /* A flag indicating whether rx_hwts is to be used or not. */
373 u32 rx_csum:1, 368 u32 rx_hwts:1,
374 rx_hwts:1,
375 titan1:1; 369 titan1:1;
376 370
377 struct vxge_msix_entry *vxge_entries; 371 struct vxge_msix_entry *vxge_entries;