diff options
| author | Vipul Pandya <vipul@chelsio.com> | 2012-05-18 05:59:26 -0400 |
|---|---|---|
| committer | Roland Dreier <roland@purestorage.com> | 2012-05-18 16:22:28 -0400 |
| commit | 3069ee9bc451d90a2fa8c3c7ef2774744d9d3bb0 (patch) | |
| tree | d07c27b92130db1e5c76cd6a3d38899c00900325 | |
| parent | 8caa1e8446948afefdb4dd2050a465f6da777652 (diff) | |
cxgb4: DB Drop Recovery for RDMA and LLD queues
recover LLD EQs for DB drop interrupts. This includes adding a new
db_lock, a spin lock disabling BH too, used by the recovery thread and
the ring_tx_db() paths to allow db drop recovery.
Clean up initial DB avoidance code.
Add read_eq_indices() - this allows the LLD to use the PCIe mw to
efficiently read hw eq contexts.
Add cxgb4_sync_txq_pidx() - called by iw_cxgb4 to sync up the sw/hw
pidx value.
Add flush_eq_cache() and cxgb4_flush_eq_cache(). This allows iw_cxgb4
to flush the sge eq context cache before beginning db drop recovery.
Add module parameter, dbfoifo_int_thresh, to allow tuning the db
interrupt threshold value.
Add dbfifo_int_thresh to cxgb4_lld_info so iw_cxgb4 knows the threshold.
Add module parameter, dbfoifo_drain_delay, to allow tuning the amount
of time delay between DB FULL and EMPTY upcalls to iw_cxgb4.
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 16 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 214 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 20 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 53 | ||||
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 15 |
6 files changed, 280 insertions, 42 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 5f3c0a728e18..ec2dafe8ae5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -51,6 +51,8 @@ | |||
| 51 | #define FW_VERSION_MINOR 1 | 51 | #define FW_VERSION_MINOR 1 |
| 52 | #define FW_VERSION_MICRO 0 | 52 | #define FW_VERSION_MICRO 0 |
| 53 | 53 | ||
| 54 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) | ||
| 55 | |||
| 54 | enum { | 56 | enum { |
| 55 | MAX_NPORTS = 4, /* max # of ports */ | 57 | MAX_NPORTS = 4, /* max # of ports */ |
| 56 | SERNUM_LEN = 24, /* Serial # length */ | 58 | SERNUM_LEN = 24, /* Serial # length */ |
| @@ -64,6 +66,15 @@ enum { | |||
| 64 | MEM_MC | 66 | MEM_MC |
| 65 | }; | 67 | }; |
| 66 | 68 | ||
| 69 | enum { | ||
| 70 | MEMWIN0_APERTURE = 65536, | ||
| 71 | MEMWIN0_BASE = 0x30000, | ||
| 72 | MEMWIN1_APERTURE = 32768, | ||
| 73 | MEMWIN1_BASE = 0x28000, | ||
| 74 | MEMWIN2_APERTURE = 2048, | ||
| 75 | MEMWIN2_BASE = 0x1b800, | ||
| 76 | }; | ||
| 77 | |||
| 67 | enum dev_master { | 78 | enum dev_master { |
| 68 | MASTER_CANT, | 79 | MASTER_CANT, |
| 69 | MASTER_MAY, | 80 | MASTER_MAY, |
| @@ -403,6 +414,9 @@ struct sge_txq { | |||
| 403 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | 414 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ |
| 404 | struct sge_qstat *stat; /* queue status entry */ | 415 | struct sge_qstat *stat; /* queue status entry */ |
| 405 | dma_addr_t phys_addr; /* physical address of the ring */ | 416 | dma_addr_t phys_addr; /* physical address of the ring */ |
| 417 | spinlock_t db_lock; | ||
| 418 | int db_disabled; | ||
| 419 | unsigned short db_pidx; | ||
| 406 | }; | 420 | }; |
| 407 | 421 | ||
| 408 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | 422 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ |
| @@ -475,6 +489,7 @@ struct adapter { | |||
| 475 | void __iomem *regs; | 489 | void __iomem *regs; |
| 476 | struct pci_dev *pdev; | 490 | struct pci_dev *pdev; |
| 477 | struct device *pdev_dev; | 491 | struct device *pdev_dev; |
| 492 | unsigned int mbox; | ||
| 478 | unsigned int fn; | 493 | unsigned int fn; |
| 479 | unsigned int flags; | 494 | unsigned int flags; |
| 480 | 495 | ||
| @@ -607,6 +622,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | |||
| 607 | void t4_sge_init(struct adapter *adap); | 622 | void t4_sge_init(struct adapter *adap); |
| 608 | void t4_sge_start(struct adapter *adap); | 623 | void t4_sge_start(struct adapter *adap); |
| 609 | void t4_sge_stop(struct adapter *adap); | 624 | void t4_sge_stop(struct adapter *adap); |
| 625 | extern int dbfifo_int_thresh; | ||
| 610 | 626 | ||
| 611 | #define for_each_port(adapter, iter) \ | 627 | #define for_each_port(adapter, iter) \ |
| 612 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | 628 | for (iter = 0; iter < (adapter)->params.nports; ++iter) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c243f932099e..e1f96fbb48c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -149,15 +149,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, | |||
| 149 | #endif | 149 | #endif |
| 150 | 150 | ||
| 151 | enum { | 151 | enum { |
| 152 | MEMWIN0_APERTURE = 65536, | ||
| 153 | MEMWIN0_BASE = 0x30000, | ||
| 154 | MEMWIN1_APERTURE = 32768, | ||
| 155 | MEMWIN1_BASE = 0x28000, | ||
| 156 | MEMWIN2_APERTURE = 2048, | ||
| 157 | MEMWIN2_BASE = 0x1b800, | ||
| 158 | }; | ||
| 159 | |||
| 160 | enum { | ||
| 161 | MAX_TXQ_ENTRIES = 16384, | 152 | MAX_TXQ_ENTRIES = 16384, |
| 162 | MAX_CTRL_TXQ_ENTRIES = 1024, | 153 | MAX_CTRL_TXQ_ENTRIES = 1024, |
| 163 | MAX_RSPQ_ENTRIES = 16384, | 154 | MAX_RSPQ_ENTRIES = 16384, |
| @@ -371,6 +362,15 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) | |||
| 371 | uhash | mhash, sleep); | 362 | uhash | mhash, sleep); |
| 372 | } | 363 | } |
| 373 | 364 | ||
| 365 | int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ | ||
| 366 | module_param(dbfifo_int_thresh, int, 0644); | ||
| 367 | MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); | ||
| 368 | |||
| 369 | int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ | ||
| 370 | module_param(dbfifo_drain_delay, int, 0644); | ||
| 371 | MODULE_PARM_DESC(dbfifo_drain_delay, | ||
| 372 | "usecs to sleep while draining the dbfifo"); | ||
| 373 | |||
| 374 | /* | 374 | /* |
| 375 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. | 375 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. |
| 376 | * If @mtu is -1 it is left unchanged. | 376 | * If @mtu is -1 it is left unchanged. |
| @@ -389,6 +389,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | |||
| 389 | return ret; | 389 | return ret; |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | static struct workqueue_struct *workq; | ||
| 393 | |||
| 392 | /** | 394 | /** |
| 393 | * link_start - enable a port | 395 | * link_start - enable a port |
| 394 | * @dev: the port to enable | 396 | * @dev: the port to enable |
| @@ -2196,7 +2198,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
| 2196 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | 2198 | adap->tid_release_head = (void **)((uintptr_t)p | chan); |
| 2197 | if (!adap->tid_release_task_busy) { | 2199 | if (!adap->tid_release_task_busy) { |
| 2198 | adap->tid_release_task_busy = true; | 2200 | adap->tid_release_task_busy = true; |
| 2199 | schedule_work(&adap->tid_release_task); | 2201 | queue_work(workq, &adap->tid_release_task); |
| 2200 | } | 2202 | } |
| 2201 | spin_unlock_bh(&adap->tid_release_lock); | 2203 | spin_unlock_bh(&adap->tid_release_lock); |
| 2202 | } | 2204 | } |
| @@ -2423,6 +2425,59 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | |||
| 2423 | } | 2425 | } |
| 2424 | EXPORT_SYMBOL(cxgb4_iscsi_init); | 2426 | EXPORT_SYMBOL(cxgb4_iscsi_init); |
| 2425 | 2427 | ||
| 2428 | int cxgb4_flush_eq_cache(struct net_device *dev) | ||
| 2429 | { | ||
| 2430 | struct adapter *adap = netdev2adap(dev); | ||
| 2431 | int ret; | ||
| 2432 | |||
| 2433 | ret = t4_fwaddrspace_write(adap, adap->mbox, | ||
| 2434 | 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); | ||
| 2435 | return ret; | ||
| 2436 | } | ||
| 2437 | EXPORT_SYMBOL(cxgb4_flush_eq_cache); | ||
| 2438 | |||
| 2439 | static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) | ||
| 2440 | { | ||
| 2441 | u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; | ||
| 2442 | __be64 indices; | ||
| 2443 | int ret; | ||
| 2444 | |||
| 2445 | ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); | ||
| 2446 | if (!ret) { | ||
| 2447 | indices = be64_to_cpu(indices); | ||
| 2448 | *cidx = (indices >> 25) & 0xffff; | ||
| 2449 | *pidx = (indices >> 9) & 0xffff; | ||
| 2450 | } | ||
| 2451 | return ret; | ||
| 2452 | } | ||
| 2453 | |||
| 2454 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, | ||
| 2455 | u16 size) | ||
| 2456 | { | ||
| 2457 | struct adapter *adap = netdev2adap(dev); | ||
| 2458 | u16 hw_pidx, hw_cidx; | ||
| 2459 | int ret; | ||
| 2460 | |||
| 2461 | ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); | ||
| 2462 | if (ret) | ||
| 2463 | goto out; | ||
| 2464 | |||
| 2465 | if (pidx != hw_pidx) { | ||
| 2466 | u16 delta; | ||
| 2467 | |||
| 2468 | if (pidx >= hw_pidx) | ||
| 2469 | delta = pidx - hw_pidx; | ||
| 2470 | else | ||
| 2471 | delta = size - hw_pidx + pidx; | ||
| 2472 | wmb(); | ||
| 2473 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 2474 | V_QID(qid) | V_PIDX(delta)); | ||
| 2475 | } | ||
| 2476 | out: | ||
| 2477 | return ret; | ||
| 2478 | } | ||
| 2479 | EXPORT_SYMBOL(cxgb4_sync_txq_pidx); | ||
| 2480 | |||
| 2426 | static struct pci_driver cxgb4_driver; | 2481 | static struct pci_driver cxgb4_driver; |
| 2427 | 2482 | ||
| 2428 | static void check_neigh_update(struct neighbour *neigh) | 2483 | static void check_neigh_update(struct neighbour *neigh) |
| @@ -2456,6 +2511,95 @@ static struct notifier_block cxgb4_netevent_nb = { | |||
| 2456 | .notifier_call = netevent_cb | 2511 | .notifier_call = netevent_cb |
| 2457 | }; | 2512 | }; |
| 2458 | 2513 | ||
| 2514 | static void drain_db_fifo(struct adapter *adap, int usecs) | ||
| 2515 | { | ||
| 2516 | u32 v; | ||
| 2517 | |||
| 2518 | do { | ||
| 2519 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 2520 | schedule_timeout(usecs_to_jiffies(usecs)); | ||
| 2521 | v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | ||
| 2522 | if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0) | ||
| 2523 | break; | ||
| 2524 | } while (1); | ||
| 2525 | } | ||
| 2526 | |||
| 2527 | static void disable_txq_db(struct sge_txq *q) | ||
| 2528 | { | ||
| 2529 | spin_lock_irq(&q->db_lock); | ||
| 2530 | q->db_disabled = 1; | ||
| 2531 | spin_unlock_irq(&q->db_lock); | ||
| 2532 | } | ||
| 2533 | |||
| 2534 | static void enable_txq_db(struct sge_txq *q) | ||
| 2535 | { | ||
| 2536 | spin_lock_irq(&q->db_lock); | ||
| 2537 | q->db_disabled = 0; | ||
| 2538 | spin_unlock_irq(&q->db_lock); | ||
| 2539 | } | ||
| 2540 | |||
| 2541 | static void disable_dbs(struct adapter *adap) | ||
| 2542 | { | ||
| 2543 | int i; | ||
| 2544 | |||
| 2545 | for_each_ethrxq(&adap->sge, i) | ||
| 2546 | disable_txq_db(&adap->sge.ethtxq[i].q); | ||
| 2547 | for_each_ofldrxq(&adap->sge, i) | ||
| 2548 | disable_txq_db(&adap->sge.ofldtxq[i].q); | ||
| 2549 | for_each_port(adap, i) | ||
| 2550 | disable_txq_db(&adap->sge.ctrlq[i].q); | ||
| 2551 | } | ||
| 2552 | |||
| 2553 | static void enable_dbs(struct adapter *adap) | ||
| 2554 | { | ||
| 2555 | int i; | ||
| 2556 | |||
| 2557 | for_each_ethrxq(&adap->sge, i) | ||
| 2558 | enable_txq_db(&adap->sge.ethtxq[i].q); | ||
| 2559 | for_each_ofldrxq(&adap->sge, i) | ||
| 2560 | enable_txq_db(&adap->sge.ofldtxq[i].q); | ||
| 2561 | for_each_port(adap, i) | ||
| 2562 | enable_txq_db(&adap->sge.ctrlq[i].q); | ||
| 2563 | } | ||
| 2564 | |||
| 2565 | static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) | ||
| 2566 | { | ||
| 2567 | u16 hw_pidx, hw_cidx; | ||
| 2568 | int ret; | ||
| 2569 | |||
| 2570 | spin_lock_bh(&q->db_lock); | ||
| 2571 | ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); | ||
| 2572 | if (ret) | ||
| 2573 | goto out; | ||
| 2574 | if (q->db_pidx != hw_pidx) { | ||
| 2575 | u16 delta; | ||
| 2576 | |||
| 2577 | if (q->db_pidx >= hw_pidx) | ||
| 2578 | delta = q->db_pidx - hw_pidx; | ||
| 2579 | else | ||
| 2580 | delta = q->size - hw_pidx + q->db_pidx; | ||
| 2581 | wmb(); | ||
| 2582 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 2583 | V_QID(q->cntxt_id) | V_PIDX(delta)); | ||
| 2584 | } | ||
| 2585 | out: | ||
| 2586 | q->db_disabled = 0; | ||
| 2587 | spin_unlock_bh(&q->db_lock); | ||
| 2588 | if (ret) | ||
| 2589 | CH_WARN(adap, "DB drop recovery failed.\n"); | ||
| 2590 | } | ||
| 2591 | static void recover_all_queues(struct adapter *adap) | ||
| 2592 | { | ||
| 2593 | int i; | ||
| 2594 | |||
| 2595 | for_each_ethrxq(&adap->sge, i) | ||
| 2596 | sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); | ||
| 2597 | for_each_ofldrxq(&adap->sge, i) | ||
| 2598 | sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); | ||
| 2599 | for_each_port(adap, i) | ||
| 2600 | sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); | ||
| 2601 | } | ||
| 2602 | |||
| 2459 | static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) | 2603 | static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) |
| 2460 | { | 2604 | { |
| 2461 | mutex_lock(&uld_mutex); | 2605 | mutex_lock(&uld_mutex); |
| @@ -2468,55 +2612,41 @@ static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) | |||
| 2468 | static void process_db_full(struct work_struct *work) | 2612 | static void process_db_full(struct work_struct *work) |
| 2469 | { | 2613 | { |
| 2470 | struct adapter *adap; | 2614 | struct adapter *adap; |
| 2471 | static int delay = 1000; | ||
| 2472 | u32 v; | ||
| 2473 | 2615 | ||
| 2474 | adap = container_of(work, struct adapter, db_full_task); | 2616 | adap = container_of(work, struct adapter, db_full_task); |
| 2475 | 2617 | ||
| 2476 | |||
| 2477 | /* stop LLD queues */ | ||
| 2478 | |||
| 2479 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | 2618 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); |
| 2480 | do { | 2619 | drain_db_fifo(adap, dbfifo_drain_delay); |
| 2481 | set_current_state(TASK_UNINTERRUPTIBLE); | 2620 | t4_set_reg_field(adap, A_SGE_INT_ENABLE3, |
| 2482 | schedule_timeout(usecs_to_jiffies(delay)); | 2621 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, |
| 2483 | v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | 2622 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); |
| 2484 | if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0) | ||
| 2485 | break; | ||
| 2486 | } while (1); | ||
| 2487 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); | 2623 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); |
| 2488 | |||
| 2489 | |||
| 2490 | /* | ||
| 2491 | * The more we get db full interrupts, the more we'll delay | ||
| 2492 | * in re-enabling db rings on queues, capped off at 200ms. | ||
| 2493 | */ | ||
| 2494 | delay = min(delay << 1, 200000); | ||
| 2495 | |||
| 2496 | /* resume LLD queues */ | ||
| 2497 | } | 2624 | } |
| 2498 | 2625 | ||
| 2499 | static void process_db_drop(struct work_struct *work) | 2626 | static void process_db_drop(struct work_struct *work) |
| 2500 | { | 2627 | { |
| 2501 | struct adapter *adap; | 2628 | struct adapter *adap; |
| 2502 | adap = container_of(work, struct adapter, db_drop_task); | ||
| 2503 | 2629 | ||
| 2630 | adap = container_of(work, struct adapter, db_drop_task); | ||
| 2504 | 2631 | ||
| 2505 | /* | 2632 | t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); |
| 2506 | * sync the PIDX values in HW and SW for LLD queues. | 2633 | disable_dbs(adap); |
| 2507 | */ | ||
| 2508 | |||
| 2509 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); | 2634 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); |
| 2635 | drain_db_fifo(adap, 1); | ||
| 2636 | recover_all_queues(adap); | ||
| 2637 | enable_dbs(adap); | ||
| 2510 | } | 2638 | } |
| 2511 | 2639 | ||
| 2512 | void t4_db_full(struct adapter *adap) | 2640 | void t4_db_full(struct adapter *adap) |
| 2513 | { | 2641 | { |
| 2514 | schedule_work(&adap->db_full_task); | 2642 | t4_set_reg_field(adap, A_SGE_INT_ENABLE3, |
| 2643 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); | ||
| 2644 | queue_work(workq, &adap->db_full_task); | ||
| 2515 | } | 2645 | } |
| 2516 | 2646 | ||
| 2517 | void t4_db_dropped(struct adapter *adap) | 2647 | void t4_db_dropped(struct adapter *adap) |
| 2518 | { | 2648 | { |
| 2519 | schedule_work(&adap->db_drop_task); | 2649 | queue_work(workq, &adap->db_drop_task); |
| 2520 | } | 2650 | } |
| 2521 | 2651 | ||
| 2522 | static void uld_attach(struct adapter *adap, unsigned int uld) | 2652 | static void uld_attach(struct adapter *adap, unsigned int uld) |
| @@ -2552,6 +2682,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
| 2552 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); | 2682 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); |
| 2553 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); | 2683 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); |
| 2554 | lli.fw_vers = adap->params.fw_vers; | 2684 | lli.fw_vers = adap->params.fw_vers; |
| 2685 | lli.dbfifo_int_thresh = dbfifo_int_thresh; | ||
| 2555 | 2686 | ||
| 2556 | handle = ulds[uld].add(&lli); | 2687 | handle = ulds[uld].add(&lli); |
| 2557 | if (IS_ERR(handle)) { | 2688 | if (IS_ERR(handle)) { |
| @@ -3668,6 +3799,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
| 3668 | 3799 | ||
| 3669 | adapter->pdev = pdev; | 3800 | adapter->pdev = pdev; |
| 3670 | adapter->pdev_dev = &pdev->dev; | 3801 | adapter->pdev_dev = &pdev->dev; |
| 3802 | adapter->mbox = func; | ||
| 3671 | adapter->fn = func; | 3803 | adapter->fn = func; |
| 3672 | adapter->msg_enable = dflt_msg_enable; | 3804 | adapter->msg_enable = dflt_msg_enable; |
| 3673 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 3805 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
| @@ -3865,6 +3997,10 @@ static int __init cxgb4_init_module(void) | |||
| 3865 | { | 3997 | { |
| 3866 | int ret; | 3998 | int ret; |
| 3867 | 3999 | ||
| 4000 | workq = create_singlethread_workqueue("cxgb4"); | ||
| 4001 | if (!workq) | ||
| 4002 | return -ENOMEM; | ||
| 4003 | |||
| 3868 | /* Debugfs support is optional, just warn if this fails */ | 4004 | /* Debugfs support is optional, just warn if this fails */ |
| 3869 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 4005 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
| 3870 | if (!cxgb4_debugfs_root) | 4006 | if (!cxgb4_debugfs_root) |
| @@ -3880,6 +4016,8 @@ static void __exit cxgb4_cleanup_module(void) | |||
| 3880 | { | 4016 | { |
| 3881 | pci_unregister_driver(&cxgb4_driver); | 4017 | pci_unregister_driver(&cxgb4_driver); |
| 3882 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | 4018 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ |
| 4019 | flush_workqueue(workq); | ||
| 4020 | destroy_workqueue(workq); | ||
| 3883 | } | 4021 | } |
| 3884 | 4022 | ||
| 3885 | module_init(cxgb4_init_module); | 4023 | module_init(cxgb4_init_module); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 5cc2f27d60c7..d79980c5fc63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
| @@ -218,6 +218,7 @@ struct cxgb4_lld_info { | |||
| 218 | unsigned short ucq_density; /* # of user CQs/page */ | 218 | unsigned short ucq_density; /* # of user CQs/page */ |
| 219 | void __iomem *gts_reg; /* address of GTS register */ | 219 | void __iomem *gts_reg; /* address of GTS register */ |
| 220 | void __iomem *db_reg; /* address of kernel doorbell */ | 220 | void __iomem *db_reg; /* address of kernel doorbell */ |
| 221 | int dbfifo_int_thresh; /* doorbell fifo int threshold */ | ||
| 221 | }; | 222 | }; |
| 222 | 223 | ||
| 223 | struct cxgb4_uld_info { | 224 | struct cxgb4_uld_info { |
| @@ -226,6 +227,7 @@ struct cxgb4_uld_info { | |||
| 226 | int (*rx_handler)(void *handle, const __be64 *rsp, | 227 | int (*rx_handler)(void *handle, const __be64 *rsp, |
| 227 | const struct pkt_gl *gl); | 228 | const struct pkt_gl *gl); |
| 228 | int (*state_change)(void *handle, enum cxgb4_state new_state); | 229 | int (*state_change)(void *handle, enum cxgb4_state new_state); |
| 230 | int (*control)(void *handle, enum cxgb4_control control, ...); | ||
| 229 | }; | 231 | }; |
| 230 | 232 | ||
| 231 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); | 233 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); |
| @@ -243,4 +245,6 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | |||
| 243 | const unsigned int *pgsz_order); | 245 | const unsigned int *pgsz_order); |
| 244 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | 246 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, |
| 245 | unsigned int skb_len, unsigned int pull_len); | 247 | unsigned int skb_len, unsigned int pull_len); |
| 248 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); | ||
| 249 | int cxgb4_flush_eq_cache(struct net_device *dev); | ||
| 246 | #endif /* !__CXGB4_OFLD_H */ | 250 | #endif /* !__CXGB4_OFLD_H */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 234c157a4879..e111d974afd8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -767,8 +767,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |||
| 767 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | 767 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) |
| 768 | { | 768 | { |
| 769 | wmb(); /* write descriptors before telling HW */ | 769 | wmb(); /* write descriptors before telling HW */ |
| 770 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | 770 | spin_lock(&q->db_lock); |
| 771 | QID(q->cntxt_id) | PIDX(n)); | 771 | if (!q->db_disabled) { |
| 772 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 773 | V_QID(q->cntxt_id) | V_PIDX(n)); | ||
| 774 | } | ||
| 775 | q->db_pidx = q->pidx; | ||
| 776 | spin_unlock(&q->db_lock); | ||
| 772 | } | 777 | } |
| 773 | 778 | ||
| 774 | /** | 779 | /** |
| @@ -2081,6 +2086,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | |||
| 2081 | q->stops = q->restarts = 0; | 2086 | q->stops = q->restarts = 0; |
| 2082 | q->stat = (void *)&q->desc[q->size]; | 2087 | q->stat = (void *)&q->desc[q->size]; |
| 2083 | q->cntxt_id = id; | 2088 | q->cntxt_id = id; |
| 2089 | spin_lock_init(&q->db_lock); | ||
| 2084 | adap->sge.egr_map[id - adap->sge.egr_start] = q; | 2090 | adap->sge.egr_map[id - adap->sge.egr_start] = q; |
| 2085 | } | 2091 | } |
| 2086 | 2092 | ||
| @@ -2415,9 +2421,15 @@ void t4_sge_init(struct adapter *adap) | |||
| 2415 | RXPKTCPLMODE | | 2421 | RXPKTCPLMODE | |
| 2416 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); | 2422 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); |
| 2417 | 2423 | ||
| 2424 | /* | ||
| 2425 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | ||
| 2426 | * and generate an interrupt when this occurs so we can recover. | ||
| 2427 | */ | ||
| 2418 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, | 2428 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, |
| 2419 | V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5), | 2429 | V_HP_INT_THRESH(M_HP_INT_THRESH) | |
| 2420 | V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5)); | 2430 | V_LP_INT_THRESH(M_LP_INT_THRESH), |
| 2431 | V_HP_INT_THRESH(dbfifo_int_thresh) | | ||
| 2432 | V_LP_INT_THRESH(dbfifo_int_thresh)); | ||
| 2421 | t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, | 2433 | t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, |
| 2422 | F_ENABLE_DROP); | 2434 | F_ENABLE_DROP); |
| 2423 | 2435 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 0adc5bcec7c4..111fc323f155 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
| @@ -190,6 +190,59 @@ | |||
| 190 | #define SGE_DEBUG_DATA_LOW 0x10d4 | 190 | #define SGE_DEBUG_DATA_LOW 0x10d4 |
| 191 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 | 191 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 |
| 192 | 192 | ||
| 193 | #define S_LP_INT_THRESH 12 | ||
| 194 | #define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) | ||
| 195 | #define S_HP_INT_THRESH 28 | ||
| 196 | #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) | ||
| 197 | #define A_SGE_DBFIFO_STATUS 0x10a4 | ||
| 198 | |||
| 199 | #define S_ENABLE_DROP 13 | ||
| 200 | #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) | ||
| 201 | #define F_ENABLE_DROP V_ENABLE_DROP(1U) | ||
| 202 | #define A_SGE_DOORBELL_CONTROL 0x10a8 | ||
| 203 | |||
| 204 | #define A_SGE_CTXT_CMD 0x11fc | ||
| 205 | #define A_SGE_DBQ_CTXT_BADDR 0x1084 | ||
| 206 | |||
| 207 | #define A_SGE_PF_KDOORBELL 0x0 | ||
| 208 | |||
| 209 | #define S_QID 15 | ||
| 210 | #define V_QID(x) ((x) << S_QID) | ||
| 211 | |||
| 212 | #define S_PIDX 0 | ||
| 213 | #define V_PIDX(x) ((x) << S_PIDX) | ||
| 214 | |||
| 215 | #define M_LP_COUNT 0x7ffU | ||
| 216 | #define S_LP_COUNT 0 | ||
| 217 | #define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) | ||
| 218 | |||
| 219 | #define M_HP_COUNT 0x7ffU | ||
| 220 | #define S_HP_COUNT 16 | ||
| 221 | #define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) | ||
| 222 | |||
| 223 | #define A_SGE_INT_ENABLE3 0x1040 | ||
| 224 | |||
| 225 | #define S_DBFIFO_HP_INT 8 | ||
| 226 | #define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) | ||
| 227 | #define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) | ||
| 228 | |||
| 229 | #define S_DBFIFO_LP_INT 7 | ||
| 230 | #define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) | ||
| 231 | #define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) | ||
| 232 | |||
| 233 | #define S_DROPPED_DB 0 | ||
| 234 | #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) | ||
| 235 | #define F_DROPPED_DB V_DROPPED_DB(1U) | ||
| 236 | |||
| 237 | #define S_ERR_DROPPED_DB 18 | ||
| 238 | #define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) | ||
| 239 | #define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) | ||
| 240 | |||
| 241 | #define A_PCIE_MEM_ACCESS_OFFSET 0x306c | ||
| 242 | |||
| 243 | #define M_HP_INT_THRESH 0xfU | ||
| 244 | #define M_LP_INT_THRESH 0xfU | ||
| 245 | |||
| 193 | #define PCIE_PF_CLI 0x44 | 246 | #define PCIE_PF_CLI 0x44 |
| 194 | #define PCIE_INT_CAUSE 0x3004 | 247 | #define PCIE_INT_CAUSE 0x3004 |
| 195 | #define UNXSPLCPLERR 0x20000000U | 248 | #define UNXSPLCPLERR 0x20000000U |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index edcfd7ec7802..ad53f796b574 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
| @@ -1620,4 +1620,19 @@ struct fw_hdr { | |||
| 1620 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | 1620 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) |
| 1621 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | 1621 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) |
| 1622 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) | 1622 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) |
| 1623 | |||
| 1624 | #define S_FW_CMD_OP 24 | ||
| 1625 | #define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) | ||
| 1626 | |||
| 1627 | #define S_FW_CMD_REQUEST 23 | ||
| 1628 | #define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) | ||
| 1629 | #define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) | ||
| 1630 | |||
| 1631 | #define S_FW_CMD_WRITE 21 | ||
| 1632 | #define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) | ||
| 1633 | #define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) | ||
| 1634 | |||
| 1635 | #define S_FW_LDST_CMD_ADDRSPACE 0 | ||
| 1636 | #define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) | ||
| 1637 | |||
| 1623 | #endif /* _T4FW_INTERFACE_H_ */ | 1638 | #endif /* _T4FW_INTERFACE_H_ */ |
