aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@mellanox.co.il>2006-01-09 17:04:40 -0500
committerRoland Dreier <rolandd@cisco.com>2006-01-09 17:04:40 -0500
commit92898522e3ee1a0ba54140aad1974d9e868f74ae (patch)
tree25f398798849c6806830aa3a32a98c72640a12e3 /drivers/infiniband/hw
parent6627fa662e86c400284b64c13661fdf6bff05983 (diff)
IB/mthca: prevent event queue overrun
I am seeing EQ overruns in SDP stress tests: if the CQ completion handler arms a CQ, this could generate more EQEs, so that EQ will never get empty and consumer index will never get updated. This is similiar to what we have with command interface: /* * cmd_event() may add more commands. * The card will think the queue has overflowed if * we don't tell it we've been processing events. */ However, for completion events, we *don't* want to update the consumer index on each event. So, perform EQ doorbell coalescing: allocate EQs with some spare EQEs, and update once we run out of them. The value 0x80 was selected to avoid any performance impact. Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e8a948f087c0..2eabb27804cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -45,6 +45,7 @@
45enum { 45enum {
46 MTHCA_NUM_ASYNC_EQE = 0x80, 46 MTHCA_NUM_ASYNC_EQE = 0x80,
47 MTHCA_NUM_CMD_EQE = 0x80, 47 MTHCA_NUM_CMD_EQE = 0x80,
48 MTHCA_NUM_SPARE_EQE = 0x80,
48 MTHCA_EQ_ENTRY_SIZE = 0x20 49 MTHCA_EQ_ENTRY_SIZE = 0x20
49}; 50};
50 51
@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
277{ 278{
278 struct mthca_eqe *eqe; 279 struct mthca_eqe *eqe;
279 int disarm_cqn; 280 int disarm_cqn;
280 int eqes_found = 0; 281 int eqes_found = 0;
282 int set_ci = 0;
281 283
282 while ((eqe = next_eqe_sw(eq))) { 284 while ((eqe = next_eqe_sw(eq))) {
283 int set_ci = 0;
284
285 /* 285 /*
286 * Make sure we read EQ entry contents after we've 286 * Make sure we read EQ entry contents after we've
287 * checked the ownership bit. 287 * checked the ownership bit.
@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
345 be16_to_cpu(eqe->event.cmd.token), 345 be16_to_cpu(eqe->event.cmd.token),
346 eqe->event.cmd.status, 346 eqe->event.cmd.status,
347 be64_to_cpu(eqe->event.cmd.out_param)); 347 be64_to_cpu(eqe->event.cmd.out_param));
348 /*
349 * cmd_event() may add more commands.
350 * The card will think the queue has overflowed if
351 * we don't tell it we've been processing events.
352 */
353 set_ci = 1;
354 break; 348 break;
355 349
356 case MTHCA_EVENT_TYPE_PORT_CHANGE: 350 case MTHCA_EVENT_TYPE_PORT_CHANGE:
@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
385 set_eqe_hw(eqe); 379 set_eqe_hw(eqe);
386 ++eq->cons_index; 380 ++eq->cons_index;
387 eqes_found = 1; 381 eqes_found = 1;
382 ++set_ci;
388 383
389 if (unlikely(set_ci)) { 384 /*
385 * The HCA will think the queue has overflowed if we
386 * don't tell it we've been processing events. We
387 * create our EQs with MTHCA_NUM_SPARE_EQE extra
388 * entries, so we must update our consumer index at
389 * least that often.
390 */
391 if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
390 /* 392 /*
391 * Conditional on hca_type is OK here because 393 * Conditional on hca_type is OK here because
392 * this is a rare case, not the fast path. 394 * this is a rare case, not the fast path.
@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
862 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? 864 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
863 128 : dev->eq_table.inta_pin; 865 128 : dev->eq_table.inta_pin;
864 866
865 err = mthca_create_eq(dev, dev->limits.num_cqs, 867 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
866 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, 868 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
867 &dev->eq_table.eq[MTHCA_EQ_COMP]); 869 &dev->eq_table.eq[MTHCA_EQ_COMP]);
868 if (err) 870 if (err)
869 goto err_out_unmap; 871 goto err_out_unmap;
870 872
871 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, 873 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
872 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, 874 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
873 &dev->eq_table.eq[MTHCA_EQ_ASYNC]); 875 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
874 if (err) 876 if (err)
875 goto err_out_comp; 877 goto err_out_comp;
876 878
877 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, 879 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
878 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, 880 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
879 &dev->eq_table.eq[MTHCA_EQ_CMD]); 881 &dev->eq_table.eq[MTHCA_EQ_CMD]);
880 if (err) 882 if (err)