diff options
| author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
|---|---|---|
| committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
| commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
| tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/enic/enic_main.c | |
| parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
| parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) | |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/enic/enic_main.c')
| -rw-r--r-- | drivers/net/enic/enic_main.c | 1032 |
1 files changed, 486 insertions, 546 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 9aab85366d21..2f433fbfca0c 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/ipv6.h> | 35 | #include <linux/ipv6.h> |
| 36 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
| 37 | #include <linux/rtnetlink.h> | 37 | #include <linux/rtnetlink.h> |
| 38 | #include <linux/prefetch.h> | ||
| 38 | #include <net/ip6_checksum.h> | 39 | #include <net/ip6_checksum.h> |
| 39 | 40 | ||
| 40 | #include "cq_enet_desc.h" | 41 | #include "cq_enet_desc.h" |
| @@ -44,6 +45,8 @@ | |||
| 44 | #include "vnic_vic.h" | 45 | #include "vnic_vic.h" |
| 45 | #include "enic_res.h" | 46 | #include "enic_res.h" |
| 46 | #include "enic.h" | 47 | #include "enic.h" |
| 48 | #include "enic_dev.h" | ||
| 49 | #include "enic_pp.h" | ||
| 47 | 50 | ||
| 48 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | 51 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) |
| 49 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) | 52 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) |
| @@ -122,6 +125,51 @@ static int enic_is_dynamic(struct enic *enic) | |||
| 122 | return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; | 125 | return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; |
| 123 | } | 126 | } |
| 124 | 127 | ||
| 128 | static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) | ||
| 129 | { | ||
| 130 | return rq; | ||
| 131 | } | ||
| 132 | |||
| 133 | static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) | ||
| 134 | { | ||
| 135 | return enic->rq_count + wq; | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline unsigned int enic_legacy_io_intr(void) | ||
| 139 | { | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | static inline unsigned int enic_legacy_err_intr(void) | ||
| 144 | { | ||
| 145 | return 1; | ||
| 146 | } | ||
| 147 | |||
| 148 | static inline unsigned int enic_legacy_notify_intr(void) | ||
| 149 | { | ||
| 150 | return 2; | ||
| 151 | } | ||
| 152 | |||
| 153 | static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) | ||
| 154 | { | ||
| 155 | return rq; | ||
| 156 | } | ||
| 157 | |||
| 158 | static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq) | ||
| 159 | { | ||
| 160 | return enic->rq_count + wq; | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline unsigned int enic_msix_err_intr(struct enic *enic) | ||
| 164 | { | ||
| 165 | return enic->rq_count + enic->wq_count; | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline unsigned int enic_msix_notify_intr(struct enic *enic) | ||
| 169 | { | ||
| 170 | return enic->rq_count + enic->wq_count + 1; | ||
| 171 | } | ||
| 172 | |||
| 125 | static int enic_get_settings(struct net_device *netdev, | 173 | static int enic_get_settings(struct net_device *netdev, |
| 126 | struct ethtool_cmd *ecmd) | 174 | struct ethtool_cmd *ecmd) |
| 127 | { | 175 | { |
| @@ -133,10 +181,10 @@ static int enic_get_settings(struct net_device *netdev, | |||
| 133 | ecmd->transceiver = XCVR_EXTERNAL; | 181 | ecmd->transceiver = XCVR_EXTERNAL; |
| 134 | 182 | ||
| 135 | if (netif_carrier_ok(netdev)) { | 183 | if (netif_carrier_ok(netdev)) { |
| 136 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | 184 | ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); |
| 137 | ecmd->duplex = DUPLEX_FULL; | 185 | ecmd->duplex = DUPLEX_FULL; |
| 138 | } else { | 186 | } else { |
| 139 | ecmd->speed = -1; | 187 | ethtool_cmd_speed_set(ecmd, -1); |
| 140 | ecmd->duplex = -1; | 188 | ecmd->duplex = -1; |
| 141 | } | 189 | } |
| 142 | 190 | ||
| @@ -145,18 +193,6 @@ static int enic_get_settings(struct net_device *netdev, | |||
| 145 | return 0; | 193 | return 0; |
| 146 | } | 194 | } |
| 147 | 195 | ||
| 148 | static int enic_dev_fw_info(struct enic *enic, | ||
| 149 | struct vnic_devcmd_fw_info **fw_info) | ||
| 150 | { | ||
| 151 | int err; | ||
| 152 | |||
| 153 | spin_lock(&enic->devcmd_lock); | ||
| 154 | err = vnic_dev_fw_info(enic->vdev, fw_info); | ||
| 155 | spin_unlock(&enic->devcmd_lock); | ||
| 156 | |||
| 157 | return err; | ||
| 158 | } | ||
| 159 | |||
| 160 | static void enic_get_drvinfo(struct net_device *netdev, | 196 | static void enic_get_drvinfo(struct net_device *netdev, |
| 161 | struct ethtool_drvinfo *drvinfo) | 197 | struct ethtool_drvinfo *drvinfo) |
| 162 | { | 198 | { |
| @@ -201,17 +237,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset) | |||
| 201 | } | 237 | } |
| 202 | } | 238 | } |
| 203 | 239 | ||
| 204 | static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) | ||
| 205 | { | ||
| 206 | int err; | ||
| 207 | |||
| 208 | spin_lock(&enic->devcmd_lock); | ||
| 209 | err = vnic_dev_stats_dump(enic->vdev, vstats); | ||
| 210 | spin_unlock(&enic->devcmd_lock); | ||
| 211 | |||
| 212 | return err; | ||
| 213 | } | ||
| 214 | |||
| 215 | static void enic_get_ethtool_stats(struct net_device *netdev, | 240 | static void enic_get_ethtool_stats(struct net_device *netdev, |
| 216 | struct ethtool_stats *stats, u64 *data) | 241 | struct ethtool_stats *stats, u64 *data) |
| 217 | { | 242 | { |
| @@ -227,56 +252,6 @@ static void enic_get_ethtool_stats(struct net_device *netdev, | |||
| 227 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | 252 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; |
| 228 | } | 253 | } |
| 229 | 254 | ||
| 230 | static u32 enic_get_rx_csum(struct net_device *netdev) | ||
| 231 | { | ||
| 232 | struct enic *enic = netdev_priv(netdev); | ||
| 233 | return enic->csum_rx_enabled; | ||
| 234 | } | ||
| 235 | |||
| 236 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | ||
| 237 | { | ||
| 238 | struct enic *enic = netdev_priv(netdev); | ||
| 239 | |||
| 240 | if (data && !ENIC_SETTING(enic, RXCSUM)) | ||
| 241 | return -EINVAL; | ||
| 242 | |||
| 243 | enic->csum_rx_enabled = !!data; | ||
| 244 | |||
| 245 | return 0; | ||
| 246 | } | ||
| 247 | |||
| 248 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | ||
| 249 | { | ||
| 250 | struct enic *enic = netdev_priv(netdev); | ||
| 251 | |||
| 252 | if (data && !ENIC_SETTING(enic, TXCSUM)) | ||
| 253 | return -EINVAL; | ||
| 254 | |||
| 255 | if (data) | ||
| 256 | netdev->features |= NETIF_F_HW_CSUM; | ||
| 257 | else | ||
| 258 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | static int enic_set_tso(struct net_device *netdev, u32 data) | ||
| 264 | { | ||
| 265 | struct enic *enic = netdev_priv(netdev); | ||
| 266 | |||
| 267 | if (data && !ENIC_SETTING(enic, TSO)) | ||
| 268 | return -EINVAL; | ||
| 269 | |||
| 270 | if (data) | ||
| 271 | netdev->features |= | ||
| 272 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | ||
| 273 | else | ||
| 274 | netdev->features &= | ||
| 275 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | ||
| 276 | |||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | |||
| 280 | static u32 enic_get_msglevel(struct net_device *netdev) | 255 | static u32 enic_get_msglevel(struct net_device *netdev) |
| 281 | { | 256 | { |
| 282 | struct enic *enic = netdev_priv(netdev); | 257 | struct enic *enic = netdev_priv(netdev); |
| @@ -306,6 +281,7 @@ static int enic_set_coalesce(struct net_device *netdev, | |||
| 306 | struct enic *enic = netdev_priv(netdev); | 281 | struct enic *enic = netdev_priv(netdev); |
| 307 | u32 tx_coalesce_usecs; | 282 | u32 tx_coalesce_usecs; |
| 308 | u32 rx_coalesce_usecs; | 283 | u32 rx_coalesce_usecs; |
| 284 | unsigned int i, intr; | ||
| 309 | 285 | ||
| 310 | tx_coalesce_usecs = min_t(u32, | 286 | tx_coalesce_usecs = min_t(u32, |
| 311 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), | 287 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), |
| @@ -319,7 +295,8 @@ static int enic_set_coalesce(struct net_device *netdev, | |||
| 319 | if (tx_coalesce_usecs != rx_coalesce_usecs) | 295 | if (tx_coalesce_usecs != rx_coalesce_usecs) |
| 320 | return -EINVAL; | 296 | return -EINVAL; |
| 321 | 297 | ||
| 322 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ], | 298 | intr = enic_legacy_io_intr(); |
| 299 | vnic_intr_coalescing_timer_set(&enic->intr[intr], | ||
| 323 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | 300 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); |
| 324 | break; | 301 | break; |
| 325 | case VNIC_DEV_INTR_MODE_MSI: | 302 | case VNIC_DEV_INTR_MODE_MSI: |
| @@ -330,10 +307,18 @@ static int enic_set_coalesce(struct net_device *netdev, | |||
| 330 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | 307 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); |
| 331 | break; | 308 | break; |
| 332 | case VNIC_DEV_INTR_MODE_MSIX: | 309 | case VNIC_DEV_INTR_MODE_MSIX: |
| 333 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ], | 310 | for (i = 0; i < enic->wq_count; i++) { |
| 334 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | 311 | intr = enic_msix_wq_intr(enic, i); |
| 335 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ], | 312 | vnic_intr_coalescing_timer_set(&enic->intr[intr], |
| 336 | INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); | 313 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); |
| 314 | } | ||
| 315 | |||
| 316 | for (i = 0; i < enic->rq_count; i++) { | ||
| 317 | intr = enic_msix_rq_intr(enic, i); | ||
| 318 | vnic_intr_coalescing_timer_set(&enic->intr[intr], | ||
| 319 | INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); | ||
| 320 | } | ||
| 321 | |||
| 337 | break; | 322 | break; |
| 338 | default: | 323 | default: |
| 339 | break; | 324 | break; |
| @@ -354,17 +339,8 @@ static const struct ethtool_ops enic_ethtool_ops = { | |||
| 354 | .get_strings = enic_get_strings, | 339 | .get_strings = enic_get_strings, |
| 355 | .get_sset_count = enic_get_sset_count, | 340 | .get_sset_count = enic_get_sset_count, |
| 356 | .get_ethtool_stats = enic_get_ethtool_stats, | 341 | .get_ethtool_stats = enic_get_ethtool_stats, |
| 357 | .get_rx_csum = enic_get_rx_csum, | ||
| 358 | .set_rx_csum = enic_set_rx_csum, | ||
| 359 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
| 360 | .set_tx_csum = enic_set_tx_csum, | ||
| 361 | .get_sg = ethtool_op_get_sg, | ||
| 362 | .set_sg = ethtool_op_set_sg, | ||
| 363 | .get_tso = ethtool_op_get_tso, | ||
| 364 | .set_tso = enic_set_tso, | ||
| 365 | .get_coalesce = enic_get_coalesce, | 342 | .get_coalesce = enic_get_coalesce, |
| 366 | .set_coalesce = enic_set_coalesce, | 343 | .set_coalesce = enic_set_coalesce, |
| 367 | .get_flags = ethtool_op_get_flags, | ||
| 368 | }; | 344 | }; |
| 369 | 345 | ||
| 370 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | 346 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) |
| @@ -482,34 +458,37 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) | |||
| 482 | { | 458 | { |
| 483 | struct net_device *netdev = data; | 459 | struct net_device *netdev = data; |
| 484 | struct enic *enic = netdev_priv(netdev); | 460 | struct enic *enic = netdev_priv(netdev); |
| 461 | unsigned int io_intr = enic_legacy_io_intr(); | ||
| 462 | unsigned int err_intr = enic_legacy_err_intr(); | ||
| 463 | unsigned int notify_intr = enic_legacy_notify_intr(); | ||
| 485 | u32 pba; | 464 | u32 pba; |
| 486 | 465 | ||
| 487 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | 466 | vnic_intr_mask(&enic->intr[io_intr]); |
| 488 | 467 | ||
| 489 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | 468 | pba = vnic_intr_legacy_pba(enic->legacy_pba); |
| 490 | if (!pba) { | 469 | if (!pba) { |
| 491 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | 470 | vnic_intr_unmask(&enic->intr[io_intr]); |
| 492 | return IRQ_NONE; /* not our interrupt */ | 471 | return IRQ_NONE; /* not our interrupt */ |
| 493 | } | 472 | } |
| 494 | 473 | ||
| 495 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { | 474 | if (ENIC_TEST_INTR(pba, notify_intr)) { |
| 496 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); | 475 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); |
| 497 | enic_notify_check(enic); | 476 | enic_notify_check(enic); |
| 498 | } | 477 | } |
| 499 | 478 | ||
| 500 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | 479 | if (ENIC_TEST_INTR(pba, err_intr)) { |
| 501 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); | 480 | vnic_intr_return_all_credits(&enic->intr[err_intr]); |
| 502 | enic_log_q_error(enic); | 481 | enic_log_q_error(enic); |
| 503 | /* schedule recovery from WQ/RQ error */ | 482 | /* schedule recovery from WQ/RQ error */ |
| 504 | schedule_work(&enic->reset); | 483 | schedule_work(&enic->reset); |
| 505 | return IRQ_HANDLED; | 484 | return IRQ_HANDLED; |
| 506 | } | 485 | } |
| 507 | 486 | ||
| 508 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | 487 | if (ENIC_TEST_INTR(pba, io_intr)) { |
| 509 | if (napi_schedule_prep(&enic->napi)) | 488 | if (napi_schedule_prep(&enic->napi[0])) |
| 510 | __napi_schedule(&enic->napi); | 489 | __napi_schedule(&enic->napi[0]); |
| 511 | } else { | 490 | } else { |
| 512 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | 491 | vnic_intr_unmask(&enic->intr[io_intr]); |
| 513 | } | 492 | } |
| 514 | 493 | ||
| 515 | return IRQ_HANDLED; | 494 | return IRQ_HANDLED; |
| @@ -535,17 +514,17 @@ static irqreturn_t enic_isr_msi(int irq, void *data) | |||
| 535 | * writes). | 514 | * writes). |
| 536 | */ | 515 | */ |
| 537 | 516 | ||
| 538 | napi_schedule(&enic->napi); | 517 | napi_schedule(&enic->napi[0]); |
| 539 | 518 | ||
| 540 | return IRQ_HANDLED; | 519 | return IRQ_HANDLED; |
| 541 | } | 520 | } |
| 542 | 521 | ||
| 543 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | 522 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) |
| 544 | { | 523 | { |
| 545 | struct enic *enic = data; | 524 | struct napi_struct *napi = data; |
| 546 | 525 | ||
| 547 | /* schedule NAPI polling for RQ cleanup */ | 526 | /* schedule NAPI polling for RQ cleanup */ |
| 548 | napi_schedule(&enic->napi); | 527 | napi_schedule(napi); |
| 549 | 528 | ||
| 550 | return IRQ_HANDLED; | 529 | return IRQ_HANDLED; |
| 551 | } | 530 | } |
| @@ -553,13 +532,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |||
| 553 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | 532 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) |
| 554 | { | 533 | { |
| 555 | struct enic *enic = data; | 534 | struct enic *enic = data; |
| 535 | unsigned int cq = enic_cq_wq(enic, 0); | ||
| 536 | unsigned int intr = enic_msix_wq_intr(enic, 0); | ||
| 556 | unsigned int wq_work_to_do = -1; /* no limit */ | 537 | unsigned int wq_work_to_do = -1; /* no limit */ |
| 557 | unsigned int wq_work_done; | 538 | unsigned int wq_work_done; |
| 558 | 539 | ||
| 559 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | 540 | wq_work_done = vnic_cq_service(&enic->cq[cq], |
| 560 | wq_work_to_do, enic_wq_service, NULL); | 541 | wq_work_to_do, enic_wq_service, NULL); |
| 561 | 542 | ||
| 562 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | 543 | vnic_intr_return_credits(&enic->intr[intr], |
| 563 | wq_work_done, | 544 | wq_work_done, |
| 564 | 1 /* unmask intr */, | 545 | 1 /* unmask intr */, |
| 565 | 1 /* reset intr timer */); | 546 | 1 /* reset intr timer */); |
| @@ -570,8 +551,9 @@ static irqreturn_t enic_isr_msix_wq(int irq, void *data) | |||
| 570 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | 551 | static irqreturn_t enic_isr_msix_err(int irq, void *data) |
| 571 | { | 552 | { |
| 572 | struct enic *enic = data; | 553 | struct enic *enic = data; |
| 554 | unsigned int intr = enic_msix_err_intr(enic); | ||
| 573 | 555 | ||
| 574 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); | 556 | vnic_intr_return_all_credits(&enic->intr[intr]); |
| 575 | 557 | ||
| 576 | enic_log_q_error(enic); | 558 | enic_log_q_error(enic); |
| 577 | 559 | ||
| @@ -584,8 +566,9 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data) | |||
| 584 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | 566 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) |
| 585 | { | 567 | { |
| 586 | struct enic *enic = data; | 568 | struct enic *enic = data; |
| 569 | unsigned int intr = enic_msix_notify_intr(enic); | ||
| 587 | 570 | ||
| 588 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); | 571 | vnic_intr_return_all_credits(&enic->intr[intr]); |
| 589 | enic_notify_check(enic); | 572 | enic_notify_check(enic); |
| 590 | 573 | ||
| 591 | return IRQ_HANDLED; | 574 | return IRQ_HANDLED; |
| @@ -640,7 +623,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |||
| 640 | { | 623 | { |
| 641 | unsigned int head_len = skb_headlen(skb); | 624 | unsigned int head_len = skb_headlen(skb); |
| 642 | unsigned int len_left = skb->len - head_len; | 625 | unsigned int len_left = skb->len - head_len; |
| 643 | unsigned int hdr_len = skb_transport_offset(skb); | 626 | unsigned int hdr_len = skb_checksum_start_offset(skb); |
| 644 | unsigned int csum_offset = hdr_len + skb->csum_offset; | 627 | unsigned int csum_offset = hdr_len + skb->csum_offset; |
| 645 | int eop = (len_left == 0); | 628 | int eop = (len_left == 0); |
| 646 | 629 | ||
| @@ -743,7 +726,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, | |||
| 743 | int vlan_tag_insert = 0; | 726 | int vlan_tag_insert = 0; |
| 744 | int loopback = 0; | 727 | int loopback = 0; |
| 745 | 728 | ||
| 746 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | 729 | if (vlan_tx_tag_present(skb)) { |
| 747 | /* VLAN tag from trunking driver */ | 730 | /* VLAN tag from trunking driver */ |
| 748 | vlan_tag_insert = 1; | 731 | vlan_tag_insert = 1; |
| 749 | vlan_tag = vlan_tx_tag_get(skb); | 732 | vlan_tag = vlan_tx_tag_get(skb); |
| @@ -834,9 +817,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev) | |||
| 834 | return net_stats; | 817 | return net_stats; |
| 835 | } | 818 | } |
| 836 | 819 | ||
| 837 | static void enic_reset_multicast_list(struct enic *enic) | 820 | void enic_reset_addr_lists(struct enic *enic) |
| 838 | { | 821 | { |
| 839 | enic->mc_count = 0; | 822 | enic->mc_count = 0; |
| 823 | enic->uc_count = 0; | ||
| 840 | enic->flags = 0; | 824 | enic->flags = 0; |
| 841 | } | 825 | } |
| 842 | 826 | ||
| @@ -857,32 +841,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |||
| 857 | return 0; | 841 | return 0; |
| 858 | } | 842 | } |
| 859 | 843 | ||
| 860 | static int enic_dev_add_station_addr(struct enic *enic) | ||
| 861 | { | ||
| 862 | int err = 0; | ||
| 863 | |||
| 864 | if (is_valid_ether_addr(enic->netdev->dev_addr)) { | ||
| 865 | spin_lock(&enic->devcmd_lock); | ||
| 866 | err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); | ||
| 867 | spin_unlock(&enic->devcmd_lock); | ||
| 868 | } | ||
| 869 | |||
| 870 | return err; | ||
| 871 | } | ||
| 872 | |||
| 873 | static int enic_dev_del_station_addr(struct enic *enic) | ||
| 874 | { | ||
| 875 | int err = 0; | ||
| 876 | |||
| 877 | if (is_valid_ether_addr(enic->netdev->dev_addr)) { | ||
| 878 | spin_lock(&enic->devcmd_lock); | ||
| 879 | err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); | ||
| 880 | spin_unlock(&enic->devcmd_lock); | ||
| 881 | } | ||
| 882 | |||
| 883 | return err; | ||
| 884 | } | ||
| 885 | |||
| 886 | static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) | 844 | static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) |
| 887 | { | 845 | { |
| 888 | struct enic *enic = netdev_priv(netdev); | 846 | struct enic *enic = netdev_priv(netdev); |
| @@ -911,67 +869,35 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) | |||
| 911 | 869 | ||
| 912 | static int enic_set_mac_address(struct net_device *netdev, void *p) | 870 | static int enic_set_mac_address(struct net_device *netdev, void *p) |
| 913 | { | 871 | { |
| 914 | return -EOPNOTSUPP; | 872 | struct sockaddr *saddr = p; |
| 915 | } | 873 | char *addr = saddr->sa_data; |
| 916 | 874 | struct enic *enic = netdev_priv(netdev); | |
| 917 | static int enic_dev_packet_filter(struct enic *enic, int directed, | ||
| 918 | int multicast, int broadcast, int promisc, int allmulti) | ||
| 919 | { | ||
| 920 | int err; | ||
| 921 | |||
| 922 | spin_lock(&enic->devcmd_lock); | ||
| 923 | err = vnic_dev_packet_filter(enic->vdev, directed, | ||
| 924 | multicast, broadcast, promisc, allmulti); | ||
| 925 | spin_unlock(&enic->devcmd_lock); | ||
| 926 | |||
| 927 | return err; | ||
| 928 | } | ||
| 929 | |||
| 930 | static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr) | ||
| 931 | { | ||
| 932 | int err; | 875 | int err; |
| 933 | 876 | ||
| 934 | spin_lock(&enic->devcmd_lock); | 877 | err = enic_dev_del_station_addr(enic); |
| 935 | err = vnic_dev_add_addr(enic->vdev, addr); | 878 | if (err) |
| 936 | spin_unlock(&enic->devcmd_lock); | 879 | return err; |
| 937 | |||
| 938 | return err; | ||
| 939 | } | ||
| 940 | |||
| 941 | static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr) | ||
| 942 | { | ||
| 943 | int err; | ||
| 944 | 880 | ||
| 945 | spin_lock(&enic->devcmd_lock); | 881 | err = enic_set_mac_addr(netdev, addr); |
| 946 | err = vnic_dev_del_addr(enic->vdev, addr); | 882 | if (err) |
| 947 | spin_unlock(&enic->devcmd_lock); | 883 | return err; |
| 948 | 884 | ||
| 949 | return err; | 885 | return enic_dev_add_station_addr(enic); |
| 950 | } | 886 | } |
| 951 | 887 | ||
| 952 | /* netif_tx_lock held, BHs disabled */ | 888 | static void enic_update_multicast_addr_list(struct enic *enic) |
| 953 | static void enic_set_multicast_list(struct net_device *netdev) | ||
| 954 | { | 889 | { |
| 955 | struct enic *enic = netdev_priv(netdev); | 890 | struct net_device *netdev = enic->netdev; |
| 956 | struct netdev_hw_addr *ha; | 891 | struct netdev_hw_addr *ha; |
| 957 | int directed = 1; | ||
| 958 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | ||
| 959 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | ||
| 960 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | ||
| 961 | unsigned int mc_count = netdev_mc_count(netdev); | 892 | unsigned int mc_count = netdev_mc_count(netdev); |
| 962 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | ||
| 963 | mc_count > ENIC_MULTICAST_PERFECT_FILTERS; | ||
| 964 | unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); | ||
| 965 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | 893 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; |
| 966 | unsigned int i, j; | 894 | unsigned int i, j; |
| 967 | 895 | ||
| 968 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | 896 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) { |
| 897 | netdev_warn(netdev, "Registering only %d out of %d " | ||
| 898 | "multicast addresses\n", | ||
| 899 | ENIC_MULTICAST_PERFECT_FILTERS, mc_count); | ||
| 969 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | 900 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; |
| 970 | |||
| 971 | if (enic->flags != flags) { | ||
| 972 | enic->flags = flags; | ||
| 973 | enic_dev_packet_filter(enic, directed, | ||
| 974 | multicast, broadcast, promisc, allmulti); | ||
| 975 | } | 901 | } |
| 976 | 902 | ||
| 977 | /* Is there an easier way? Trying to minimize to | 903 | /* Is there an easier way? Trying to minimize to |
| @@ -993,7 +919,7 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
| 993 | mc_addr[j]) == 0) | 919 | mc_addr[j]) == 0) |
| 994 | break; | 920 | break; |
| 995 | if (j == mc_count) | 921 | if (j == mc_count) |
| 996 | enic_dev_del_multicast_addr(enic, enic->mc_addr[i]); | 922 | enic_dev_del_addr(enic, enic->mc_addr[i]); |
| 997 | } | 923 | } |
| 998 | 924 | ||
| 999 | for (i = 0; i < mc_count; i++) { | 925 | for (i = 0; i < mc_count; i++) { |
| @@ -1002,7 +928,7 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
| 1002 | enic->mc_addr[j]) == 0) | 928 | enic->mc_addr[j]) == 0) |
| 1003 | break; | 929 | break; |
| 1004 | if (j == enic->mc_count) | 930 | if (j == enic->mc_count) |
| 1005 | enic_dev_add_multicast_addr(enic, mc_addr[i]); | 931 | enic_dev_add_addr(enic, mc_addr[i]); |
| 1006 | } | 932 | } |
| 1007 | 933 | ||
| 1008 | /* Save the list to compare against next time | 934 | /* Save the list to compare against next time |
| @@ -1014,151 +940,140 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
| 1014 | enic->mc_count = mc_count; | 940 | enic->mc_count = mc_count; |
| 1015 | } | 941 | } |
| 1016 | 942 | ||
| 1017 | /* rtnl lock is held */ | 943 | static void enic_update_unicast_addr_list(struct enic *enic) |
| 1018 | static void enic_vlan_rx_register(struct net_device *netdev, | ||
| 1019 | struct vlan_group *vlan_group) | ||
| 1020 | { | 944 | { |
| 1021 | struct enic *enic = netdev_priv(netdev); | 945 | struct net_device *netdev = enic->netdev; |
| 1022 | enic->vlan_group = vlan_group; | 946 | struct netdev_hw_addr *ha; |
| 1023 | } | 947 | unsigned int uc_count = netdev_uc_count(netdev); |
| 948 | u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN]; | ||
| 949 | unsigned int i, j; | ||
| 1024 | 950 | ||
| 1025 | /* rtnl lock is held */ | 951 | if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) { |
| 1026 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 952 | netdev_warn(netdev, "Registering only %d out of %d " |
| 1027 | { | 953 | "unicast addresses\n", |
| 1028 | struct enic *enic = netdev_priv(netdev); | 954 | ENIC_UNICAST_PERFECT_FILTERS, uc_count); |
| 955 | uc_count = ENIC_UNICAST_PERFECT_FILTERS; | ||
| 956 | } | ||
| 1029 | 957 | ||
| 1030 | spin_lock(&enic->devcmd_lock); | 958 | /* Is there an easier way? Trying to minimize to |
| 1031 | enic_add_vlan(enic, vid); | 959 | * calls to add/del unicast addrs. We keep the |
| 1032 | spin_unlock(&enic->devcmd_lock); | 960 | * addrs from the last call in enic->uc_addr and |
| 1033 | } | 961 | * look for changes to add/del. |
| 962 | */ | ||
| 1034 | 963 | ||
| 1035 | /* rtnl lock is held */ | 964 | i = 0; |
| 1036 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 965 | netdev_for_each_uc_addr(ha, netdev) { |
| 1037 | { | 966 | if (i == uc_count) |
| 1038 | struct enic *enic = netdev_priv(netdev); | 967 | break; |
| 968 | memcpy(uc_addr[i++], ha->addr, ETH_ALEN); | ||
| 969 | } | ||
| 1039 | 970 | ||
| 1040 | spin_lock(&enic->devcmd_lock); | 971 | for (i = 0; i < enic->uc_count; i++) { |
| 1041 | enic_del_vlan(enic, vid); | 972 | for (j = 0; j < uc_count; j++) |
| 1042 | spin_unlock(&enic->devcmd_lock); | 973 | if (compare_ether_addr(enic->uc_addr[i], |
| 1043 | } | 974 | uc_addr[j]) == 0) |
| 975 | break; | ||
| 976 | if (j == uc_count) | ||
| 977 | enic_dev_del_addr(enic, enic->uc_addr[i]); | ||
| 978 | } | ||
| 1044 | 979 | ||
| 1045 | /* netif_tx_lock held, BHs disabled */ | 980 | for (i = 0; i < uc_count; i++) { |
| 1046 | static void enic_tx_timeout(struct net_device *netdev) | 981 | for (j = 0; j < enic->uc_count; j++) |
| 1047 | { | 982 | if (compare_ether_addr(uc_addr[i], |
| 1048 | struct enic *enic = netdev_priv(netdev); | 983 | enic->uc_addr[j]) == 0) |
| 1049 | schedule_work(&enic->reset); | 984 | break; |
| 1050 | } | 985 | if (j == enic->uc_count) |
| 986 | enic_dev_add_addr(enic, uc_addr[i]); | ||
| 987 | } | ||
| 1051 | 988 | ||
| 1052 | static int enic_vnic_dev_deinit(struct enic *enic) | 989 | /* Save the list to compare against next time |
| 1053 | { | 990 | */ |
| 1054 | int err; | ||
| 1055 | 991 | ||
| 1056 | spin_lock(&enic->devcmd_lock); | 992 | for (i = 0; i < uc_count; i++) |
| 1057 | err = vnic_dev_deinit(enic->vdev); | 993 | memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN); |
| 1058 | spin_unlock(&enic->devcmd_lock); | ||
| 1059 | 994 | ||
| 1060 | return err; | 995 | enic->uc_count = uc_count; |
| 1061 | } | 996 | } |
| 1062 | 997 | ||
| 1063 | static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp) | 998 | /* netif_tx_lock held, BHs disabled */ |
| 999 | static void enic_set_rx_mode(struct net_device *netdev) | ||
| 1064 | { | 1000 | { |
| 1065 | int err; | 1001 | struct enic *enic = netdev_priv(netdev); |
| 1002 | int directed = 1; | ||
| 1003 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | ||
| 1004 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | ||
| 1005 | int promisc = (netdev->flags & IFF_PROMISC) || | ||
| 1006 | netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; | ||
| 1007 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | ||
| 1008 | netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; | ||
| 1009 | unsigned int flags = netdev->flags | | ||
| 1010 | (allmulti ? IFF_ALLMULTI : 0) | | ||
| 1011 | (promisc ? IFF_PROMISC : 0); | ||
| 1066 | 1012 | ||
| 1067 | spin_lock(&enic->devcmd_lock); | 1013 | if (enic->flags != flags) { |
| 1068 | err = vnic_dev_init_prov(enic->vdev, | 1014 | enic->flags = flags; |
| 1069 | (u8 *)vp, vic_provinfo_size(vp)); | 1015 | enic_dev_packet_filter(enic, directed, |
| 1070 | spin_unlock(&enic->devcmd_lock); | 1016 | multicast, broadcast, promisc, allmulti); |
| 1017 | } | ||
| 1071 | 1018 | ||
| 1072 | return err; | 1019 | if (!promisc) { |
| 1020 | enic_update_unicast_addr_list(enic); | ||
| 1021 | if (!allmulti) | ||
| 1022 | enic_update_multicast_addr_list(enic); | ||
| 1023 | } | ||
| 1073 | } | 1024 | } |
| 1074 | 1025 | ||
| 1075 | static int enic_dev_init_done(struct enic *enic, int *done, int *error) | 1026 | /* rtnl lock is held */ |
| 1027 | static void enic_vlan_rx_register(struct net_device *netdev, | ||
| 1028 | struct vlan_group *vlan_group) | ||
| 1076 | { | 1029 | { |
| 1077 | int err; | 1030 | struct enic *enic = netdev_priv(netdev); |
| 1078 | 1031 | enic->vlan_group = vlan_group; | |
| 1079 | spin_lock(&enic->devcmd_lock); | ||
| 1080 | err = vnic_dev_init_done(enic->vdev, done, error); | ||
| 1081 | spin_unlock(&enic->devcmd_lock); | ||
| 1082 | |||
| 1083 | return err; | ||
| 1084 | } | 1032 | } |
| 1085 | 1033 | ||
| 1086 | static int enic_set_port_profile(struct enic *enic, u8 *mac) | 1034 | /* netif_tx_lock held, BHs disabled */ |
| 1035 | static void enic_tx_timeout(struct net_device *netdev) | ||
| 1087 | { | 1036 | { |
| 1088 | struct vic_provinfo *vp; | 1037 | struct enic *enic = netdev_priv(netdev); |
| 1089 | u8 oui[3] = VIC_PROVINFO_CISCO_OUI; | 1038 | schedule_work(&enic->reset); |
| 1090 | char uuid_str[38]; | 1039 | } |
| 1091 | int err; | ||
| 1092 | |||
| 1093 | err = enic_vnic_dev_deinit(enic); | ||
| 1094 | if (err) | ||
| 1095 | return err; | ||
| 1096 | |||
| 1097 | switch (enic->pp.request) { | ||
| 1098 | |||
| 1099 | case PORT_REQUEST_ASSOCIATE: | ||
| 1100 | |||
| 1101 | if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name)) | ||
| 1102 | return -EINVAL; | ||
| 1103 | |||
| 1104 | if (!is_valid_ether_addr(mac)) | ||
| 1105 | return -EADDRNOTAVAIL; | ||
| 1106 | |||
| 1107 | vp = vic_provinfo_alloc(GFP_KERNEL, oui, | ||
| 1108 | VIC_PROVINFO_LINUX_TYPE); | ||
| 1109 | if (!vp) | ||
| 1110 | return -ENOMEM; | ||
| 1111 | |||
| 1112 | vic_provinfo_add_tlv(vp, | ||
| 1113 | VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, | ||
| 1114 | strlen(enic->pp.name) + 1, enic->pp.name); | ||
| 1115 | |||
| 1116 | vic_provinfo_add_tlv(vp, | ||
| 1117 | VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, | ||
| 1118 | ETH_ALEN, mac); | ||
| 1119 | |||
| 1120 | if (enic->pp.set & ENIC_SET_INSTANCE) { | ||
| 1121 | sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); | ||
| 1122 | vic_provinfo_add_tlv(vp, | ||
| 1123 | VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, | ||
| 1124 | sizeof(uuid_str), uuid_str); | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | if (enic->pp.set & ENIC_SET_HOST) { | ||
| 1128 | sprintf(uuid_str, "%pUB", enic->pp.host_uuid); | ||
| 1129 | vic_provinfo_add_tlv(vp, | ||
| 1130 | VIC_LINUX_PROV_TLV_HOST_UUID_STR, | ||
| 1131 | sizeof(uuid_str), uuid_str); | ||
| 1132 | } | ||
| 1133 | 1040 | ||
| 1134 | err = enic_dev_init_prov(enic, vp); | 1041 | static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) |
| 1135 | vic_provinfo_free(vp); | 1042 | { |
| 1136 | if (err) | 1043 | struct enic *enic = netdev_priv(netdev); |
| 1137 | return err; | ||
| 1138 | break; | ||
| 1139 | 1044 | ||
| 1140 | case PORT_REQUEST_DISASSOCIATE: | 1045 | if (vf != PORT_SELF_VF) |
| 1141 | break; | 1046 | return -EOPNOTSUPP; |
| 1142 | 1047 | ||
| 1143 | default: | 1048 | /* Ignore the vf argument for now. We can assume the request |
| 1049 | * is coming on a vf. | ||
| 1050 | */ | ||
| 1051 | if (is_valid_ether_addr(mac)) { | ||
| 1052 | memcpy(enic->pp.vf_mac, mac, ETH_ALEN); | ||
| 1053 | return 0; | ||
| 1054 | } else | ||
| 1144 | return -EINVAL; | 1055 | return -EINVAL; |
| 1145 | } | ||
| 1146 | |||
| 1147 | enic->pp.set |= ENIC_SET_APPLIED; | ||
| 1148 | return 0; | ||
| 1149 | } | 1056 | } |
| 1150 | 1057 | ||
| 1151 | static int enic_set_vf_port(struct net_device *netdev, int vf, | 1058 | static int enic_set_vf_port(struct net_device *netdev, int vf, |
| 1152 | struct nlattr *port[]) | 1059 | struct nlattr *port[]) |
| 1153 | { | 1060 | { |
| 1154 | struct enic *enic = netdev_priv(netdev); | 1061 | struct enic *enic = netdev_priv(netdev); |
| 1062 | struct enic_port_profile prev_pp; | ||
| 1063 | int err = 0, restore_pp = 1; | ||
| 1064 | |||
| 1065 | /* don't support VFs, yet */ | ||
| 1066 | if (vf != PORT_SELF_VF) | ||
| 1067 | return -EOPNOTSUPP; | ||
| 1155 | 1068 | ||
| 1069 | if (!port[IFLA_PORT_REQUEST]) | ||
| 1070 | return -EOPNOTSUPP; | ||
| 1071 | |||
| 1072 | memcpy(&prev_pp, &enic->pp, sizeof(enic->pp)); | ||
| 1156 | memset(&enic->pp, 0, sizeof(enic->pp)); | 1073 | memset(&enic->pp, 0, sizeof(enic->pp)); |
| 1157 | 1074 | ||
| 1158 | if (port[IFLA_PORT_REQUEST]) { | 1075 | enic->pp.set |= ENIC_SET_REQUEST; |
| 1159 | enic->pp.set |= ENIC_SET_REQUEST; | 1076 | enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); |
| 1160 | enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); | ||
| 1161 | } | ||
| 1162 | 1077 | ||
| 1163 | if (port[IFLA_PORT_PROFILE]) { | 1078 | if (port[IFLA_PORT_PROFILE]) { |
| 1164 | enic->pp.set |= ENIC_SET_NAME; | 1079 | enic->pp.set |= ENIC_SET_NAME; |
| @@ -1178,59 +1093,55 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, | |||
| 1178 | nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); | 1093 | nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); |
| 1179 | } | 1094 | } |
| 1180 | 1095 | ||
| 1181 | /* don't support VFs, yet */ | 1096 | /* Special case handling: mac came from IFLA_VF_MAC */ |
| 1182 | if (vf != PORT_SELF_VF) | 1097 | if (!is_zero_ether_addr(prev_pp.vf_mac)) |
| 1183 | return -EOPNOTSUPP; | 1098 | memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN); |
| 1184 | 1099 | ||
| 1185 | if (!(enic->pp.set & ENIC_SET_REQUEST)) | 1100 | if (is_zero_ether_addr(netdev->dev_addr)) |
| 1186 | return -EOPNOTSUPP; | 1101 | random_ether_addr(netdev->dev_addr); |
| 1187 | |||
| 1188 | if (enic->pp.request == PORT_REQUEST_ASSOCIATE) { | ||
| 1189 | 1102 | ||
| 1190 | /* If the interface mac addr hasn't been assigned, | 1103 | err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp); |
| 1191 | * assign a random mac addr before setting port- | 1104 | if (err) { |
| 1192 | * profile. | 1105 | if (restore_pp) { |
| 1106 | /* Things are still the way they were: Implicit | ||
| 1107 | * DISASSOCIATE failed | ||
| 1108 | */ | ||
| 1109 | memcpy(&enic->pp, &prev_pp, sizeof(enic->pp)); | ||
| 1110 | } else { | ||
| 1111 | memset(&enic->pp, 0, sizeof(enic->pp)); | ||
| 1112 | memset(netdev->dev_addr, 0, ETH_ALEN); | ||
| 1113 | } | ||
| 1114 | } else { | ||
| 1115 | /* Set flag to indicate that the port assoc/disassoc | ||
| 1116 | * request has been sent out to fw | ||
| 1193 | */ | 1117 | */ |
| 1118 | enic->pp.set |= ENIC_PORT_REQUEST_APPLIED; | ||
| 1194 | 1119 | ||
| 1195 | if (is_zero_ether_addr(netdev->dev_addr)) | 1120 | /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ |
| 1196 | random_ether_addr(netdev->dev_addr); | 1121 | if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) { |
| 1122 | memset(enic->pp.mac_addr, 0, ETH_ALEN); | ||
| 1123 | memset(netdev->dev_addr, 0, ETH_ALEN); | ||
| 1124 | } | ||
| 1197 | } | 1125 | } |
| 1198 | 1126 | ||
| 1199 | return enic_set_port_profile(enic, netdev->dev_addr); | 1127 | memset(enic->pp.vf_mac, 0, ETH_ALEN); |
| 1128 | |||
| 1129 | return err; | ||
| 1200 | } | 1130 | } |
| 1201 | 1131 | ||
| 1202 | static int enic_get_vf_port(struct net_device *netdev, int vf, | 1132 | static int enic_get_vf_port(struct net_device *netdev, int vf, |
| 1203 | struct sk_buff *skb) | 1133 | struct sk_buff *skb) |
| 1204 | { | 1134 | { |
| 1205 | struct enic *enic = netdev_priv(netdev); | 1135 | struct enic *enic = netdev_priv(netdev); |
| 1206 | int err, error, done; | ||
| 1207 | u16 response = PORT_PROFILE_RESPONSE_SUCCESS; | 1136 | u16 response = PORT_PROFILE_RESPONSE_SUCCESS; |
| 1137 | int err; | ||
| 1208 | 1138 | ||
| 1209 | if (!(enic->pp.set & ENIC_SET_APPLIED)) | 1139 | if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) |
| 1210 | return -ENODATA; | 1140 | return -ENODATA; |
| 1211 | 1141 | ||
| 1212 | err = enic_dev_init_done(enic, &done, &error); | 1142 | err = enic_process_get_pp_request(enic, enic->pp.request, &response); |
| 1213 | if (err) | 1143 | if (err) |
| 1214 | error = err; | 1144 | return err; |
| 1215 | |||
| 1216 | switch (error) { | ||
| 1217 | case ERR_SUCCESS: | ||
| 1218 | if (!done) | ||
| 1219 | response = PORT_PROFILE_RESPONSE_INPROGRESS; | ||
| 1220 | break; | ||
| 1221 | case ERR_EINVAL: | ||
| 1222 | response = PORT_PROFILE_RESPONSE_INVALID; | ||
| 1223 | break; | ||
| 1224 | case ERR_EBADSTATE: | ||
| 1225 | response = PORT_PROFILE_RESPONSE_BADSTATE; | ||
| 1226 | break; | ||
| 1227 | case ERR_ENOMEM: | ||
| 1228 | response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES; | ||
| 1229 | break; | ||
| 1230 | default: | ||
| 1231 | response = PORT_PROFILE_RESPONSE_ERROR; | ||
| 1232 | break; | ||
| 1233 | } | ||
| 1234 | 1145 | ||
| 1235 | NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); | 1146 | NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); |
| 1236 | NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); | 1147 | NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); |
| @@ -1284,62 +1195,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) | |||
| 1284 | return 0; | 1195 | return 0; |
| 1285 | } | 1196 | } |
| 1286 | 1197 | ||
| 1287 | static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) | ||
| 1288 | { | ||
| 1289 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | ||
| 1290 | |||
| 1291 | if (vnic_rq_posting_soon(rq)) { | ||
| 1292 | |||
| 1293 | /* SW workaround for A0 HW erratum: if we're just about | ||
| 1294 | * to write posted_index, insert a dummy desc | ||
| 1295 | * of type resvd | ||
| 1296 | */ | ||
| 1297 | |||
| 1298 | rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); | ||
| 1299 | vnic_rq_post(rq, 0, 0, 0, 0); | ||
| 1300 | } else { | ||
| 1301 | return enic_rq_alloc_buf(rq); | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | return 0; | ||
| 1305 | } | ||
| 1306 | |||
| 1307 | static int enic_dev_hw_version(struct enic *enic, | ||
| 1308 | enum vnic_dev_hw_version *hw_ver) | ||
| 1309 | { | ||
| 1310 | int err; | ||
| 1311 | |||
| 1312 | spin_lock(&enic->devcmd_lock); | ||
| 1313 | err = vnic_dev_hw_version(enic->vdev, hw_ver); | ||
| 1314 | spin_unlock(&enic->devcmd_lock); | ||
| 1315 | |||
| 1316 | return err; | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | static int enic_set_rq_alloc_buf(struct enic *enic) | ||
| 1320 | { | ||
| 1321 | enum vnic_dev_hw_version hw_ver; | ||
| 1322 | int err; | ||
| 1323 | |||
| 1324 | err = enic_dev_hw_version(enic, &hw_ver); | ||
| 1325 | if (err) | ||
| 1326 | return err; | ||
| 1327 | |||
| 1328 | switch (hw_ver) { | ||
| 1329 | case VNIC_DEV_HW_VER_A1: | ||
| 1330 | enic->rq_alloc_buf = enic_rq_alloc_buf_a1; | ||
| 1331 | break; | ||
| 1332 | case VNIC_DEV_HW_VER_A2: | ||
| 1333 | case VNIC_DEV_HW_VER_UNKNOWN: | ||
| 1334 | enic->rq_alloc_buf = enic_rq_alloc_buf; | ||
| 1335 | break; | ||
| 1336 | default: | ||
| 1337 | return -ENODEV; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | return 0; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | 1198 | static void enic_rq_indicate_buf(struct vnic_rq *rq, |
| 1344 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | 1199 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, |
| 1345 | int skipped, void *opaque) | 1200 | int skipped, void *opaque) |
| @@ -1396,7 +1251,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
| 1396 | skb_put(skb, bytes_written); | 1251 | skb_put(skb, bytes_written); |
| 1397 | skb->protocol = eth_type_trans(skb, netdev); | 1252 | skb->protocol = eth_type_trans(skb, netdev); |
| 1398 | 1253 | ||
| 1399 | if (enic->csum_rx_enabled && !csum_not_calc) { | 1254 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { |
| 1400 | skb->csum = htons(checksum); | 1255 | skb->csum = htons(checksum); |
| 1401 | skb->ip_summed = CHECKSUM_COMPLETE; | 1256 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 1402 | } | 1257 | } |
| @@ -1407,8 +1262,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
| 1407 | (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) { | 1262 | (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) { |
| 1408 | 1263 | ||
| 1409 | if (netdev->features & NETIF_F_GRO) | 1264 | if (netdev->features & NETIF_F_GRO) |
| 1410 | vlan_gro_receive(&enic->napi, enic->vlan_group, | 1265 | vlan_gro_receive(&enic->napi[q_number], |
| 1411 | vlan_tci, skb); | 1266 | enic->vlan_group, vlan_tci, skb); |
| 1412 | else | 1267 | else |
| 1413 | vlan_hwaccel_receive_skb(skb, | 1268 | vlan_hwaccel_receive_skb(skb, |
| 1414 | enic->vlan_group, vlan_tci); | 1269 | enic->vlan_group, vlan_tci); |
| @@ -1416,12 +1271,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
| 1416 | } else { | 1271 | } else { |
| 1417 | 1272 | ||
| 1418 | if (netdev->features & NETIF_F_GRO) | 1273 | if (netdev->features & NETIF_F_GRO) |
| 1419 | napi_gro_receive(&enic->napi, skb); | 1274 | napi_gro_receive(&enic->napi[q_number], skb); |
| 1420 | else | 1275 | else |
| 1421 | netif_receive_skb(skb); | 1276 | netif_receive_skb(skb); |
| 1422 | 1277 | ||
| 1423 | } | 1278 | } |
| 1424 | |||
| 1425 | } else { | 1279 | } else { |
| 1426 | 1280 | ||
| 1427 | /* Buffer overflow | 1281 | /* Buffer overflow |
| @@ -1445,7 +1299,11 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |||
| 1445 | 1299 | ||
| 1446 | static int enic_poll(struct napi_struct *napi, int budget) | 1300 | static int enic_poll(struct napi_struct *napi, int budget) |
| 1447 | { | 1301 | { |
| 1448 | struct enic *enic = container_of(napi, struct enic, napi); | 1302 | struct net_device *netdev = napi->dev; |
| 1303 | struct enic *enic = netdev_priv(netdev); | ||
| 1304 | unsigned int cq_rq = enic_cq_rq(enic, 0); | ||
| 1305 | unsigned int cq_wq = enic_cq_wq(enic, 0); | ||
| 1306 | unsigned int intr = enic_legacy_io_intr(); | ||
| 1449 | unsigned int rq_work_to_do = budget; | 1307 | unsigned int rq_work_to_do = budget; |
| 1450 | unsigned int wq_work_to_do = -1; /* no limit */ | 1308 | unsigned int wq_work_to_do = -1; /* no limit */ |
| 1451 | unsigned int work_done, rq_work_done, wq_work_done; | 1309 | unsigned int work_done, rq_work_done, wq_work_done; |
| @@ -1454,10 +1312,10 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
| 1454 | /* Service RQ (first) and WQ | 1312 | /* Service RQ (first) and WQ |
| 1455 | */ | 1313 | */ |
| 1456 | 1314 | ||
| 1457 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | 1315 | rq_work_done = vnic_cq_service(&enic->cq[cq_rq], |
| 1458 | rq_work_to_do, enic_rq_service, NULL); | 1316 | rq_work_to_do, enic_rq_service, NULL); |
| 1459 | 1317 | ||
| 1460 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | 1318 | wq_work_done = vnic_cq_service(&enic->cq[cq_wq], |
| 1461 | wq_work_to_do, enic_wq_service, NULL); | 1319 | wq_work_to_do, enic_wq_service, NULL); |
| 1462 | 1320 | ||
| 1463 | /* Accumulate intr event credits for this polling | 1321 | /* Accumulate intr event credits for this polling |
| @@ -1468,12 +1326,12 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
| 1468 | work_done = rq_work_done + wq_work_done; | 1326 | work_done = rq_work_done + wq_work_done; |
| 1469 | 1327 | ||
| 1470 | if (work_done > 0) | 1328 | if (work_done > 0) |
| 1471 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | 1329 | vnic_intr_return_credits(&enic->intr[intr], |
| 1472 | work_done, | 1330 | work_done, |
| 1473 | 0 /* don't unmask intr */, | 1331 | 0 /* don't unmask intr */, |
| 1474 | 0 /* don't reset intr timer */); | 1332 | 0 /* don't reset intr timer */); |
| 1475 | 1333 | ||
| 1476 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); | 1334 | err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); |
| 1477 | 1335 | ||
| 1478 | /* Buffer allocation failed. Stay in polling | 1336 | /* Buffer allocation failed. Stay in polling |
| 1479 | * mode so we can try to fill the ring again. | 1337 | * mode so we can try to fill the ring again. |
| @@ -1489,7 +1347,7 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
| 1489 | */ | 1347 | */ |
| 1490 | 1348 | ||
| 1491 | napi_complete(napi); | 1349 | napi_complete(napi); |
| 1492 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | 1350 | vnic_intr_unmask(&enic->intr[intr]); |
| 1493 | } | 1351 | } |
| 1494 | 1352 | ||
| 1495 | return rq_work_done; | 1353 | return rq_work_done; |
| @@ -1497,7 +1355,11 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
| 1497 | 1355 | ||
| 1498 | static int enic_poll_msix(struct napi_struct *napi, int budget) | 1356 | static int enic_poll_msix(struct napi_struct *napi, int budget) |
| 1499 | { | 1357 | { |
| 1500 | struct enic *enic = container_of(napi, struct enic, napi); | 1358 | struct net_device *netdev = napi->dev; |
| 1359 | struct enic *enic = netdev_priv(netdev); | ||
| 1360 | unsigned int rq = (napi - &enic->napi[0]); | ||
| 1361 | unsigned int cq = enic_cq_rq(enic, rq); | ||
| 1362 | unsigned int intr = enic_msix_rq_intr(enic, rq); | ||
| 1501 | unsigned int work_to_do = budget; | 1363 | unsigned int work_to_do = budget; |
| 1502 | unsigned int work_done; | 1364 | unsigned int work_done; |
| 1503 | int err; | 1365 | int err; |
| @@ -1505,7 +1367,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
| 1505 | /* Service RQ | 1367 | /* Service RQ |
| 1506 | */ | 1368 | */ |
| 1507 | 1369 | ||
| 1508 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | 1370 | work_done = vnic_cq_service(&enic->cq[cq], |
| 1509 | work_to_do, enic_rq_service, NULL); | 1371 | work_to_do, enic_rq_service, NULL); |
| 1510 | 1372 | ||
| 1511 | /* Return intr event credits for this polling | 1373 | /* Return intr event credits for this polling |
| @@ -1514,12 +1376,12 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
| 1514 | */ | 1376 | */ |
| 1515 | 1377 | ||
| 1516 | if (work_done > 0) | 1378 | if (work_done > 0) |
| 1517 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | 1379 | vnic_intr_return_credits(&enic->intr[intr], |
| 1518 | work_done, | 1380 | work_done, |
| 1519 | 0 /* don't unmask intr */, | 1381 | 0 /* don't unmask intr */, |
| 1520 | 0 /* don't reset intr timer */); | 1382 | 0 /* don't reset intr timer */); |
| 1521 | 1383 | ||
| 1522 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); | 1384 | err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); |
| 1523 | 1385 | ||
| 1524 | /* Buffer allocation failed. Stay in polling mode | 1386 | /* Buffer allocation failed. Stay in polling mode |
| 1525 | * so we can try to fill the ring again. | 1387 | * so we can try to fill the ring again. |
| @@ -1535,7 +1397,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
| 1535 | */ | 1397 | */ |
| 1536 | 1398 | ||
| 1537 | napi_complete(napi); | 1399 | napi_complete(napi); |
| 1538 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | 1400 | vnic_intr_unmask(&enic->intr[intr]); |
| 1539 | } | 1401 | } |
| 1540 | 1402 | ||
| 1541 | return work_done; | 1403 | return work_done; |
| @@ -1577,7 +1439,7 @@ static void enic_free_intr(struct enic *enic) | |||
| 1577 | static int enic_request_intr(struct enic *enic) | 1439 | static int enic_request_intr(struct enic *enic) |
| 1578 | { | 1440 | { |
| 1579 | struct net_device *netdev = enic->netdev; | 1441 | struct net_device *netdev = enic->netdev; |
| 1580 | unsigned int i; | 1442 | unsigned int i, intr; |
| 1581 | int err = 0; | 1443 | int err = 0; |
| 1582 | 1444 | ||
| 1583 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1445 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
| @@ -1596,27 +1458,38 @@ static int enic_request_intr(struct enic *enic) | |||
| 1596 | 1458 | ||
| 1597 | case VNIC_DEV_INTR_MODE_MSIX: | 1459 | case VNIC_DEV_INTR_MODE_MSIX: |
| 1598 | 1460 | ||
| 1599 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | 1461 | for (i = 0; i < enic->rq_count; i++) { |
| 1600 | "%.11s-rx-0", netdev->name); | 1462 | intr = enic_msix_rq_intr(enic, i); |
| 1601 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; | 1463 | sprintf(enic->msix[intr].devname, |
| 1602 | enic->msix[ENIC_MSIX_RQ].devid = enic; | 1464 | "%.11s-rx-%d", netdev->name, i); |
| 1465 | enic->msix[intr].isr = enic_isr_msix_rq; | ||
| 1466 | enic->msix[intr].devid = &enic->napi[i]; | ||
| 1467 | } | ||
| 1603 | 1468 | ||
| 1604 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | 1469 | for (i = 0; i < enic->wq_count; i++) { |
| 1605 | "%.11s-tx-0", netdev->name); | 1470 | intr = enic_msix_wq_intr(enic, i); |
| 1606 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; | 1471 | sprintf(enic->msix[intr].devname, |
| 1607 | enic->msix[ENIC_MSIX_WQ].devid = enic; | 1472 | "%.11s-tx-%d", netdev->name, i); |
| 1473 | enic->msix[intr].isr = enic_isr_msix_wq; | ||
| 1474 | enic->msix[intr].devid = enic; | ||
| 1475 | } | ||
| 1608 | 1476 | ||
| 1609 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | 1477 | intr = enic_msix_err_intr(enic); |
| 1478 | sprintf(enic->msix[intr].devname, | ||
| 1610 | "%.11s-err", netdev->name); | 1479 | "%.11s-err", netdev->name); |
| 1611 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | 1480 | enic->msix[intr].isr = enic_isr_msix_err; |
| 1612 | enic->msix[ENIC_MSIX_ERR].devid = enic; | 1481 | enic->msix[intr].devid = enic; |
| 1613 | 1482 | ||
| 1614 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | 1483 | intr = enic_msix_notify_intr(enic); |
| 1484 | sprintf(enic->msix[intr].devname, | ||
| 1615 | "%.11s-notify", netdev->name); | 1485 | "%.11s-notify", netdev->name); |
| 1616 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | 1486 | enic->msix[intr].isr = enic_isr_msix_notify; |
| 1617 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | 1487 | enic->msix[intr].devid = enic; |
| 1618 | 1488 | ||
| 1619 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | 1489 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) |
| 1490 | enic->msix[i].requested = 0; | ||
| 1491 | |||
| 1492 | for (i = 0; i < enic->intr_count; i++) { | ||
| 1620 | err = request_irq(enic->msix_entry[i].vector, | 1493 | err = request_irq(enic->msix_entry[i].vector, |
| 1621 | enic->msix[i].isr, 0, | 1494 | enic->msix[i].isr, 0, |
| 1622 | enic->msix[i].devname, | 1495 | enic->msix[i].devname, |
| @@ -1662,10 +1535,12 @@ static int enic_dev_notify_set(struct enic *enic) | |||
| 1662 | spin_lock(&enic->devcmd_lock); | 1535 | spin_lock(&enic->devcmd_lock); |
| 1663 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1536 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
| 1664 | case VNIC_DEV_INTR_MODE_INTX: | 1537 | case VNIC_DEV_INTR_MODE_INTX: |
| 1665 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | 1538 | err = vnic_dev_notify_set(enic->vdev, |
| 1539 | enic_legacy_notify_intr()); | ||
| 1666 | break; | 1540 | break; |
| 1667 | case VNIC_DEV_INTR_MODE_MSIX: | 1541 | case VNIC_DEV_INTR_MODE_MSIX: |
| 1668 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | 1542 | err = vnic_dev_notify_set(enic->vdev, |
| 1543 | enic_msix_notify_intr(enic)); | ||
| 1669 | break; | 1544 | break; |
| 1670 | default: | 1545 | default: |
| 1671 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | 1546 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); |
| @@ -1676,39 +1551,6 @@ static int enic_dev_notify_set(struct enic *enic) | |||
| 1676 | return err; | 1551 | return err; |
| 1677 | } | 1552 | } |
| 1678 | 1553 | ||
| 1679 | static int enic_dev_notify_unset(struct enic *enic) | ||
| 1680 | { | ||
| 1681 | int err; | ||
| 1682 | |||
| 1683 | spin_lock(&enic->devcmd_lock); | ||
| 1684 | err = vnic_dev_notify_unset(enic->vdev); | ||
| 1685 | spin_unlock(&enic->devcmd_lock); | ||
| 1686 | |||
| 1687 | return err; | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | static int enic_dev_enable(struct enic *enic) | ||
| 1691 | { | ||
| 1692 | int err; | ||
| 1693 | |||
| 1694 | spin_lock(&enic->devcmd_lock); | ||
| 1695 | err = vnic_dev_enable(enic->vdev); | ||
| 1696 | spin_unlock(&enic->devcmd_lock); | ||
| 1697 | |||
| 1698 | return err; | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static int enic_dev_disable(struct enic *enic) | ||
| 1702 | { | ||
| 1703 | int err; | ||
| 1704 | |||
| 1705 | spin_lock(&enic->devcmd_lock); | ||
| 1706 | err = vnic_dev_disable(enic->vdev); | ||
| 1707 | spin_unlock(&enic->devcmd_lock); | ||
| 1708 | |||
| 1709 | return err; | ||
| 1710 | } | ||
| 1711 | |||
| 1712 | static void enic_notify_timer_start(struct enic *enic) | 1554 | static void enic_notify_timer_start(struct enic *enic) |
| 1713 | { | 1555 | { |
| 1714 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 1556 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
| @@ -1742,7 +1584,7 @@ static int enic_open(struct net_device *netdev) | |||
| 1742 | } | 1584 | } |
| 1743 | 1585 | ||
| 1744 | for (i = 0; i < enic->rq_count; i++) { | 1586 | for (i = 0; i < enic->rq_count; i++) { |
| 1745 | vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); | 1587 | vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); |
| 1746 | /* Need at least one buffer on ring to get going */ | 1588 | /* Need at least one buffer on ring to get going */ |
| 1747 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | 1589 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { |
| 1748 | netdev_err(netdev, "Unable to alloc receive buffers\n"); | 1590 | netdev_err(netdev, "Unable to alloc receive buffers\n"); |
| @@ -1756,11 +1598,17 @@ static int enic_open(struct net_device *netdev) | |||
| 1756 | for (i = 0; i < enic->rq_count; i++) | 1598 | for (i = 0; i < enic->rq_count; i++) |
| 1757 | vnic_rq_enable(&enic->rq[i]); | 1599 | vnic_rq_enable(&enic->rq[i]); |
| 1758 | 1600 | ||
| 1759 | enic_dev_add_station_addr(enic); | 1601 | if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr)) |
| 1760 | enic_set_multicast_list(netdev); | 1602 | enic_dev_add_addr(enic, enic->pp.mac_addr); |
| 1603 | else | ||
| 1604 | enic_dev_add_station_addr(enic); | ||
| 1605 | enic_set_rx_mode(netdev); | ||
| 1761 | 1606 | ||
| 1762 | netif_wake_queue(netdev); | 1607 | netif_wake_queue(netdev); |
| 1763 | napi_enable(&enic->napi); | 1608 | |
| 1609 | for (i = 0; i < enic->rq_count; i++) | ||
| 1610 | napi_enable(&enic->napi[i]); | ||
| 1611 | |||
| 1764 | enic_dev_enable(enic); | 1612 | enic_dev_enable(enic); |
| 1765 | 1613 | ||
| 1766 | for (i = 0; i < enic->intr_count; i++) | 1614 | for (i = 0; i < enic->intr_count; i++) |
| @@ -1795,10 +1643,16 @@ static int enic_stop(struct net_device *netdev) | |||
| 1795 | del_timer_sync(&enic->notify_timer); | 1643 | del_timer_sync(&enic->notify_timer); |
| 1796 | 1644 | ||
| 1797 | enic_dev_disable(enic); | 1645 | enic_dev_disable(enic); |
| 1798 | napi_disable(&enic->napi); | 1646 | |
| 1647 | for (i = 0; i < enic->rq_count; i++) | ||
| 1648 | napi_disable(&enic->napi[i]); | ||
| 1649 | |||
| 1799 | netif_carrier_off(netdev); | 1650 | netif_carrier_off(netdev); |
| 1800 | netif_tx_disable(netdev); | 1651 | netif_tx_disable(netdev); |
| 1801 | enic_dev_del_station_addr(enic); | 1652 | if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr)) |
| 1653 | enic_dev_del_addr(enic, enic->pp.mac_addr); | ||
| 1654 | else | ||
| 1655 | enic_dev_del_station_addr(enic); | ||
| 1802 | 1656 | ||
| 1803 | for (i = 0; i < enic->wq_count; i++) { | 1657 | for (i = 0; i < enic->wq_count; i++) { |
| 1804 | err = vnic_wq_disable(&enic->wq[i]); | 1658 | err = vnic_wq_disable(&enic->wq[i]); |
| @@ -1855,11 +1709,17 @@ static void enic_poll_controller(struct net_device *netdev) | |||
| 1855 | { | 1709 | { |
| 1856 | struct enic *enic = netdev_priv(netdev); | 1710 | struct enic *enic = netdev_priv(netdev); |
| 1857 | struct vnic_dev *vdev = enic->vdev; | 1711 | struct vnic_dev *vdev = enic->vdev; |
| 1712 | unsigned int i, intr; | ||
| 1858 | 1713 | ||
| 1859 | switch (vnic_dev_get_intr_mode(vdev)) { | 1714 | switch (vnic_dev_get_intr_mode(vdev)) { |
| 1860 | case VNIC_DEV_INTR_MODE_MSIX: | 1715 | case VNIC_DEV_INTR_MODE_MSIX: |
| 1861 | enic_isr_msix_rq(enic->pdev->irq, enic); | 1716 | for (i = 0; i < enic->rq_count; i++) { |
| 1862 | enic_isr_msix_wq(enic->pdev->irq, enic); | 1717 | intr = enic_msix_rq_intr(enic, i); |
| 1718 | enic_isr_msix_rq(enic->msix_entry[intr].vector, | ||
| 1719 | &enic->napi[i]); | ||
| 1720 | } | ||
| 1721 | intr = enic_msix_wq_intr(enic, i); | ||
| 1722 | enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); | ||
| 1863 | break; | 1723 | break; |
| 1864 | case VNIC_DEV_INTR_MODE_MSI: | 1724 | case VNIC_DEV_INTR_MODE_MSI: |
| 1865 | enic_isr_msi(enic->pdev->irq, enic); | 1725 | enic_isr_msi(enic->pdev->irq, enic); |
| @@ -1934,54 +1794,114 @@ static int enic_dev_hang_reset(struct enic *enic) | |||
| 1934 | return err; | 1794 | return err; |
| 1935 | } | 1795 | } |
| 1936 | 1796 | ||
| 1937 | static int enic_set_niccfg(struct enic *enic) | 1797 | static int enic_set_rsskey(struct enic *enic) |
| 1938 | { | 1798 | { |
| 1939 | const u8 rss_default_cpu = 0; | 1799 | dma_addr_t rss_key_buf_pa; |
| 1940 | const u8 rss_hash_type = 0; | 1800 | union vnic_rss_key *rss_key_buf_va = NULL; |
| 1941 | const u8 rss_hash_bits = 0; | 1801 | union vnic_rss_key rss_key = { |
| 1942 | const u8 rss_base_cpu = 0; | 1802 | .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, |
| 1943 | const u8 rss_enable = 0; | 1803 | .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, |
| 1944 | const u8 tso_ipid_split_en = 0; | 1804 | .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, |
| 1945 | const u8 ig_vlan_strip_en = 1; | 1805 | .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, |
| 1806 | }; | ||
| 1946 | int err; | 1807 | int err; |
| 1947 | 1808 | ||
| 1948 | /* Enable VLAN tag stripping. RSS not enabled (yet). | 1809 | rss_key_buf_va = pci_alloc_consistent(enic->pdev, |
| 1949 | */ | 1810 | sizeof(union vnic_rss_key), &rss_key_buf_pa); |
| 1811 | if (!rss_key_buf_va) | ||
| 1812 | return -ENOMEM; | ||
| 1813 | |||
| 1814 | memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); | ||
| 1950 | 1815 | ||
| 1951 | spin_lock(&enic->devcmd_lock); | 1816 | spin_lock(&enic->devcmd_lock); |
| 1952 | err = enic_set_nic_cfg(enic, | 1817 | err = enic_set_rss_key(enic, |
| 1953 | rss_default_cpu, rss_hash_type, | 1818 | rss_key_buf_pa, |
| 1954 | rss_hash_bits, rss_base_cpu, | 1819 | sizeof(union vnic_rss_key)); |
| 1955 | rss_enable, tso_ipid_split_en, | ||
| 1956 | ig_vlan_strip_en); | ||
| 1957 | spin_unlock(&enic->devcmd_lock); | 1820 | spin_unlock(&enic->devcmd_lock); |
| 1958 | 1821 | ||
| 1822 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), | ||
| 1823 | rss_key_buf_va, rss_key_buf_pa); | ||
| 1824 | |||
| 1959 | return err; | 1825 | return err; |
| 1960 | } | 1826 | } |
| 1961 | 1827 | ||
| 1962 | static int enic_dev_hang_notify(struct enic *enic) | 1828 | static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) |
| 1963 | { | 1829 | { |
| 1830 | dma_addr_t rss_cpu_buf_pa; | ||
| 1831 | union vnic_rss_cpu *rss_cpu_buf_va = NULL; | ||
| 1832 | unsigned int i; | ||
| 1964 | int err; | 1833 | int err; |
| 1965 | 1834 | ||
| 1835 | rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, | ||
| 1836 | sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); | ||
| 1837 | if (!rss_cpu_buf_va) | ||
| 1838 | return -ENOMEM; | ||
| 1839 | |||
| 1840 | for (i = 0; i < (1 << rss_hash_bits); i++) | ||
| 1841 | (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; | ||
| 1842 | |||
| 1966 | spin_lock(&enic->devcmd_lock); | 1843 | spin_lock(&enic->devcmd_lock); |
| 1967 | err = vnic_dev_hang_notify(enic->vdev); | 1844 | err = enic_set_rss_cpu(enic, |
| 1845 | rss_cpu_buf_pa, | ||
| 1846 | sizeof(union vnic_rss_cpu)); | ||
| 1968 | spin_unlock(&enic->devcmd_lock); | 1847 | spin_unlock(&enic->devcmd_lock); |
| 1969 | 1848 | ||
| 1849 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), | ||
| 1850 | rss_cpu_buf_va, rss_cpu_buf_pa); | ||
| 1851 | |||
| 1970 | return err; | 1852 | return err; |
| 1971 | } | 1853 | } |
| 1972 | 1854 | ||
| 1973 | int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) | 1855 | static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, |
| 1856 | u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) | ||
| 1974 | { | 1857 | { |
| 1858 | const u8 tso_ipid_split_en = 0; | ||
| 1859 | const u8 ig_vlan_strip_en = 1; | ||
| 1975 | int err; | 1860 | int err; |
| 1976 | 1861 | ||
| 1862 | /* Enable VLAN tag stripping. | ||
| 1863 | */ | ||
| 1864 | |||
| 1977 | spin_lock(&enic->devcmd_lock); | 1865 | spin_lock(&enic->devcmd_lock); |
| 1978 | err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, | 1866 | err = enic_set_nic_cfg(enic, |
| 1979 | IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); | 1867 | rss_default_cpu, rss_hash_type, |
| 1868 | rss_hash_bits, rss_base_cpu, | ||
| 1869 | rss_enable, tso_ipid_split_en, | ||
| 1870 | ig_vlan_strip_en); | ||
| 1980 | spin_unlock(&enic->devcmd_lock); | 1871 | spin_unlock(&enic->devcmd_lock); |
| 1981 | 1872 | ||
| 1982 | return err; | 1873 | return err; |
| 1983 | } | 1874 | } |
| 1984 | 1875 | ||
| 1876 | static int enic_set_rss_nic_cfg(struct enic *enic) | ||
| 1877 | { | ||
| 1878 | struct device *dev = enic_get_dev(enic); | ||
| 1879 | const u8 rss_default_cpu = 0; | ||
| 1880 | const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | | ||
| 1881 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | | ||
| 1882 | NIC_CFG_RSS_HASH_TYPE_IPV6 | | ||
| 1883 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; | ||
| 1884 | const u8 rss_hash_bits = 7; | ||
| 1885 | const u8 rss_base_cpu = 0; | ||
| 1886 | u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); | ||
| 1887 | |||
| 1888 | if (rss_enable) { | ||
| 1889 | if (!enic_set_rsskey(enic)) { | ||
| 1890 | if (enic_set_rsscpu(enic, rss_hash_bits)) { | ||
| 1891 | rss_enable = 0; | ||
| 1892 | dev_warn(dev, "RSS disabled, " | ||
| 1893 | "Failed to set RSS cpu indirection table."); | ||
| 1894 | } | ||
| 1895 | } else { | ||
| 1896 | rss_enable = 0; | ||
| 1897 | dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); | ||
| 1898 | } | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, | ||
| 1902 | rss_hash_bits, rss_base_cpu, rss_enable); | ||
| 1903 | } | ||
| 1904 | |||
| 1985 | static void enic_reset(struct work_struct *work) | 1905 | static void enic_reset(struct work_struct *work) |
| 1986 | { | 1906 | { |
| 1987 | struct enic *enic = container_of(work, struct enic, reset); | 1907 | struct enic *enic = container_of(work, struct enic, reset); |
| @@ -1994,9 +1914,9 @@ static void enic_reset(struct work_struct *work) | |||
| 1994 | enic_dev_hang_notify(enic); | 1914 | enic_dev_hang_notify(enic); |
| 1995 | enic_stop(enic->netdev); | 1915 | enic_stop(enic->netdev); |
| 1996 | enic_dev_hang_reset(enic); | 1916 | enic_dev_hang_reset(enic); |
| 1997 | enic_reset_multicast_list(enic); | 1917 | enic_reset_addr_lists(enic); |
| 1998 | enic_init_vnic_resources(enic); | 1918 | enic_init_vnic_resources(enic); |
| 1999 | enic_set_niccfg(enic); | 1919 | enic_set_rss_nic_cfg(enic); |
| 2000 | enic_dev_set_ig_vlan_rewrite_mode(enic); | 1920 | enic_dev_set_ig_vlan_rewrite_mode(enic); |
| 2001 | enic_open(enic->netdev); | 1921 | enic_open(enic->netdev); |
| 2002 | 1922 | ||
| @@ -2005,12 +1925,12 @@ static void enic_reset(struct work_struct *work) | |||
| 2005 | 1925 | ||
| 2006 | static int enic_set_intr_mode(struct enic *enic) | 1926 | static int enic_set_intr_mode(struct enic *enic) |
| 2007 | { | 1927 | { |
| 2008 | unsigned int n = 1; | 1928 | unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); |
| 2009 | unsigned int m = 1; | 1929 | unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); |
| 2010 | unsigned int i; | 1930 | unsigned int i; |
| 2011 | 1931 | ||
| 2012 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | 1932 | /* Set interrupt mode (INTx, MSI, MSI-X) depending |
| 2013 | * system capabilities. | 1933 | * on system capabilities. |
| 2014 | * | 1934 | * |
| 2015 | * Try MSI-X first | 1935 | * Try MSI-X first |
| 2016 | * | 1936 | * |
| @@ -2023,21 +1943,47 @@ static int enic_set_intr_mode(struct enic *enic) | |||
| 2023 | for (i = 0; i < n + m + 2; i++) | 1943 | for (i = 0; i < n + m + 2; i++) |
| 2024 | enic->msix_entry[i].entry = i; | 1944 | enic->msix_entry[i].entry = i; |
| 2025 | 1945 | ||
| 2026 | if (enic->config.intr_mode < 1 && | 1946 | /* Use multiple RQs if RSS is enabled |
| 1947 | */ | ||
| 1948 | |||
| 1949 | if (ENIC_SETTING(enic, RSS) && | ||
| 1950 | enic->config.intr_mode < 1 && | ||
| 2027 | enic->rq_count >= n && | 1951 | enic->rq_count >= n && |
| 2028 | enic->wq_count >= m && | 1952 | enic->wq_count >= m && |
| 2029 | enic->cq_count >= n + m && | 1953 | enic->cq_count >= n + m && |
| 2030 | enic->intr_count >= n + m + 2 && | 1954 | enic->intr_count >= n + m + 2) { |
| 2031 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | ||
| 2032 | 1955 | ||
| 2033 | enic->rq_count = n; | 1956 | if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { |
| 2034 | enic->wq_count = m; | ||
| 2035 | enic->cq_count = n + m; | ||
| 2036 | enic->intr_count = n + m + 2; | ||
| 2037 | 1957 | ||
| 2038 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | 1958 | enic->rq_count = n; |
| 1959 | enic->wq_count = m; | ||
| 1960 | enic->cq_count = n + m; | ||
| 1961 | enic->intr_count = n + m + 2; | ||
| 2039 | 1962 | ||
| 2040 | return 0; | 1963 | vnic_dev_set_intr_mode(enic->vdev, |
| 1964 | VNIC_DEV_INTR_MODE_MSIX); | ||
| 1965 | |||
| 1966 | return 0; | ||
| 1967 | } | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | if (enic->config.intr_mode < 1 && | ||
| 1971 | enic->rq_count >= 1 && | ||
| 1972 | enic->wq_count >= m && | ||
| 1973 | enic->cq_count >= 1 + m && | ||
| 1974 | enic->intr_count >= 1 + m + 2) { | ||
| 1975 | if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { | ||
| 1976 | |||
| 1977 | enic->rq_count = 1; | ||
| 1978 | enic->wq_count = m; | ||
| 1979 | enic->cq_count = 1 + m; | ||
| 1980 | enic->intr_count = 1 + m + 2; | ||
| 1981 | |||
| 1982 | vnic_dev_set_intr_mode(enic->vdev, | ||
| 1983 | VNIC_DEV_INTR_MODE_MSIX); | ||
| 1984 | |||
| 1985 | return 0; | ||
| 1986 | } | ||
| 2041 | } | 1987 | } |
| 2042 | 1988 | ||
| 2043 | /* Next try MSI | 1989 | /* Next try MSI |
| @@ -2113,7 +2059,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { | |||
| 2113 | .ndo_start_xmit = enic_hard_start_xmit, | 2059 | .ndo_start_xmit = enic_hard_start_xmit, |
| 2114 | .ndo_get_stats = enic_get_stats, | 2060 | .ndo_get_stats = enic_get_stats, |
| 2115 | .ndo_validate_addr = eth_validate_addr, | 2061 | .ndo_validate_addr = eth_validate_addr, |
| 2116 | .ndo_set_multicast_list = enic_set_multicast_list, | 2062 | .ndo_set_rx_mode = enic_set_rx_mode, |
| 2063 | .ndo_set_multicast_list = enic_set_rx_mode, | ||
| 2117 | .ndo_set_mac_address = enic_set_mac_address_dynamic, | 2064 | .ndo_set_mac_address = enic_set_mac_address_dynamic, |
| 2118 | .ndo_change_mtu = enic_change_mtu, | 2065 | .ndo_change_mtu = enic_change_mtu, |
| 2119 | .ndo_vlan_rx_register = enic_vlan_rx_register, | 2066 | .ndo_vlan_rx_register = enic_vlan_rx_register, |
| @@ -2122,6 +2069,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { | |||
| 2122 | .ndo_tx_timeout = enic_tx_timeout, | 2069 | .ndo_tx_timeout = enic_tx_timeout, |
| 2123 | .ndo_set_vf_port = enic_set_vf_port, | 2070 | .ndo_set_vf_port = enic_set_vf_port, |
| 2124 | .ndo_get_vf_port = enic_get_vf_port, | 2071 | .ndo_get_vf_port = enic_get_vf_port, |
| 2072 | .ndo_set_vf_mac = enic_set_vf_mac, | ||
| 2125 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2073 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 2126 | .ndo_poll_controller = enic_poll_controller, | 2074 | .ndo_poll_controller = enic_poll_controller, |
| 2127 | #endif | 2075 | #endif |
| @@ -2134,7 +2082,8 @@ static const struct net_device_ops enic_netdev_ops = { | |||
| 2134 | .ndo_get_stats = enic_get_stats, | 2082 | .ndo_get_stats = enic_get_stats, |
| 2135 | .ndo_validate_addr = eth_validate_addr, | 2083 | .ndo_validate_addr = eth_validate_addr, |
| 2136 | .ndo_set_mac_address = enic_set_mac_address, | 2084 | .ndo_set_mac_address = enic_set_mac_address, |
| 2137 | .ndo_set_multicast_list = enic_set_multicast_list, | 2085 | .ndo_set_rx_mode = enic_set_rx_mode, |
| 2086 | .ndo_set_multicast_list = enic_set_rx_mode, | ||
| 2138 | .ndo_change_mtu = enic_change_mtu, | 2087 | .ndo_change_mtu = enic_change_mtu, |
| 2139 | .ndo_vlan_rx_register = enic_vlan_rx_register, | 2088 | .ndo_vlan_rx_register = enic_vlan_rx_register, |
| 2140 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | 2089 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, |
| @@ -2145,28 +2094,22 @@ static const struct net_device_ops enic_netdev_ops = { | |||
| 2145 | #endif | 2094 | #endif |
| 2146 | }; | 2095 | }; |
| 2147 | 2096 | ||
| 2148 | void enic_dev_deinit(struct enic *enic) | 2097 | static void enic_dev_deinit(struct enic *enic) |
| 2149 | { | ||
| 2150 | netif_napi_del(&enic->napi); | ||
| 2151 | enic_free_vnic_resources(enic); | ||
| 2152 | enic_clear_intr_mode(enic); | ||
| 2153 | } | ||
| 2154 | |||
| 2155 | static int enic_dev_stats_clear(struct enic *enic) | ||
| 2156 | { | 2098 | { |
| 2157 | int err; | 2099 | unsigned int i; |
| 2158 | 2100 | ||
| 2159 | spin_lock(&enic->devcmd_lock); | 2101 | for (i = 0; i < enic->rq_count; i++) |
| 2160 | err = vnic_dev_stats_clear(enic->vdev); | 2102 | netif_napi_del(&enic->napi[i]); |
| 2161 | spin_unlock(&enic->devcmd_lock); | ||
| 2162 | 2103 | ||
| 2163 | return err; | 2104 | enic_free_vnic_resources(enic); |
| 2105 | enic_clear_intr_mode(enic); | ||
| 2164 | } | 2106 | } |
| 2165 | 2107 | ||
| 2166 | int enic_dev_init(struct enic *enic) | 2108 | static int enic_dev_init(struct enic *enic) |
| 2167 | { | 2109 | { |
| 2168 | struct device *dev = enic_get_dev(enic); | 2110 | struct device *dev = enic_get_dev(enic); |
| 2169 | struct net_device *netdev = enic->netdev; | 2111 | struct net_device *netdev = enic->netdev; |
| 2112 | unsigned int i; | ||
| 2170 | int err; | 2113 | int err; |
| 2171 | 2114 | ||
| 2172 | /* Get vNIC configuration | 2115 | /* Get vNIC configuration |
| @@ -2205,35 +2148,20 @@ int enic_dev_init(struct enic *enic) | |||
| 2205 | 2148 | ||
| 2206 | enic_init_vnic_resources(enic); | 2149 | enic_init_vnic_resources(enic); |
| 2207 | 2150 | ||
| 2208 | /* Clear LIF stats | 2151 | err = enic_set_rss_nic_cfg(enic); |
| 2209 | */ | ||
| 2210 | enic_dev_stats_clear(enic); | ||
| 2211 | |||
| 2212 | err = enic_set_rq_alloc_buf(enic); | ||
| 2213 | if (err) { | ||
| 2214 | dev_err(dev, "Failed to set RQ buffer allocator, aborting\n"); | ||
| 2215 | goto err_out_free_vnic_resources; | ||
| 2216 | } | ||
| 2217 | |||
| 2218 | err = enic_set_niccfg(enic); | ||
| 2219 | if (err) { | 2152 | if (err) { |
| 2220 | dev_err(dev, "Failed to config nic, aborting\n"); | 2153 | dev_err(dev, "Failed to config nic, aborting\n"); |
| 2221 | goto err_out_free_vnic_resources; | 2154 | goto err_out_free_vnic_resources; |
| 2222 | } | 2155 | } |
| 2223 | 2156 | ||
| 2224 | err = enic_dev_set_ig_vlan_rewrite_mode(enic); | ||
| 2225 | if (err) { | ||
| 2226 | netdev_err(netdev, | ||
| 2227 | "Failed to set ingress vlan rewrite mode, aborting.\n"); | ||
| 2228 | goto err_out_free_vnic_resources; | ||
| 2229 | } | ||
| 2230 | |||
| 2231 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | 2157 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
| 2232 | default: | 2158 | default: |
| 2233 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | 2159 | netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); |
| 2234 | break; | 2160 | break; |
| 2235 | case VNIC_DEV_INTR_MODE_MSIX: | 2161 | case VNIC_DEV_INTR_MODE_MSIX: |
| 2236 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | 2162 | for (i = 0; i < enic->rq_count; i++) |
| 2163 | netif_napi_add(netdev, &enic->napi[i], | ||
| 2164 | enic_poll_msix, 64); | ||
| 2237 | break; | 2165 | break; |
| 2238 | } | 2166 | } |
| 2239 | 2167 | ||
| @@ -2364,6 +2292,22 @@ static int __devinit enic_probe(struct pci_dev *pdev, | |||
| 2364 | goto err_out_vnic_unregister; | 2292 | goto err_out_vnic_unregister; |
| 2365 | } | 2293 | } |
| 2366 | 2294 | ||
| 2295 | /* Setup devcmd lock | ||
| 2296 | */ | ||
| 2297 | |||
| 2298 | spin_lock_init(&enic->devcmd_lock); | ||
| 2299 | |||
| 2300 | /* | ||
| 2301 | * Set ingress vlan rewrite mode before vnic initialization | ||
| 2302 | */ | ||
| 2303 | |||
| 2304 | err = enic_dev_set_ig_vlan_rewrite_mode(enic); | ||
| 2305 | if (err) { | ||
| 2306 | dev_err(dev, | ||
| 2307 | "Failed to set ingress vlan rewrite mode, aborting.\n"); | ||
| 2308 | goto err_out_dev_close; | ||
| 2309 | } | ||
| 2310 | |||
| 2367 | /* Issue device init to initialize the vnic-to-switch link. | 2311 | /* Issue device init to initialize the vnic-to-switch link. |
| 2368 | * We'll start with carrier off and wait for link UP | 2312 | * We'll start with carrier off and wait for link UP |
| 2369 | * notification later to turn on carrier. We don't need | 2313 | * notification later to turn on carrier. We don't need |
| @@ -2387,11 +2331,6 @@ static int __devinit enic_probe(struct pci_dev *pdev, | |||
| 2387 | } | 2331 | } |
| 2388 | } | 2332 | } |
| 2389 | 2333 | ||
| 2390 | /* Setup devcmd lock | ||
| 2391 | */ | ||
| 2392 | |||
| 2393 | spin_lock_init(&enic->devcmd_lock); | ||
| 2394 | |||
| 2395 | err = enic_dev_init(enic); | 2334 | err = enic_dev_init(enic); |
| 2396 | if (err) { | 2335 | if (err) { |
| 2397 | dev_err(dev, "Device initialization failed, aborting\n"); | 2336 | dev_err(dev, "Device initialization failed, aborting\n"); |
| @@ -2441,17 +2380,18 @@ static int __devinit enic_probe(struct pci_dev *pdev, | |||
| 2441 | dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); | 2380 | dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); |
| 2442 | } | 2381 | } |
| 2443 | if (ENIC_SETTING(enic, TXCSUM)) | 2382 | if (ENIC_SETTING(enic, TXCSUM)) |
| 2444 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 2383 | netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
| 2445 | if (ENIC_SETTING(enic, TSO)) | 2384 | if (ENIC_SETTING(enic, TSO)) |
| 2446 | netdev->features |= NETIF_F_TSO | | 2385 | netdev->hw_features |= NETIF_F_TSO | |
| 2447 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | 2386 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; |
| 2448 | if (ENIC_SETTING(enic, LRO)) | 2387 | if (ENIC_SETTING(enic, RXCSUM)) |
| 2449 | netdev->features |= NETIF_F_GRO; | 2388 | netdev->hw_features |= NETIF_F_RXCSUM; |
| 2389 | |||
| 2390 | netdev->features |= netdev->hw_features; | ||
| 2391 | |||
| 2450 | if (using_dac) | 2392 | if (using_dac) |
| 2451 | netdev->features |= NETIF_F_HIGHDMA; | 2393 | netdev->features |= NETIF_F_HIGHDMA; |
| 2452 | 2394 | ||
| 2453 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); | ||
| 2454 | |||
| 2455 | err = register_netdev(netdev); | 2395 | err = register_netdev(netdev); |
| 2456 | if (err) { | 2396 | if (err) { |
| 2457 | dev_err(dev, "Cannot register net device, aborting\n"); | 2397 | dev_err(dev, "Cannot register net device, aborting\n"); |
| @@ -2486,7 +2426,7 @@ static void __devexit enic_remove(struct pci_dev *pdev) | |||
| 2486 | if (netdev) { | 2426 | if (netdev) { |
| 2487 | struct enic *enic = netdev_priv(netdev); | 2427 | struct enic *enic = netdev_priv(netdev); |
| 2488 | 2428 | ||
| 2489 | flush_scheduled_work(); | 2429 | cancel_work_sync(&enic->reset); |
| 2490 | unregister_netdev(netdev); | 2430 | unregister_netdev(netdev); |
| 2491 | enic_dev_deinit(enic); | 2431 | enic_dev_deinit(enic); |
| 2492 | vnic_dev_close(enic->vdev); | 2432 | vnic_dev_close(enic->vdev); |
