aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2010-03-02 02:51:55 -0500
committerRoland Dreier <rolandd@cisco.com>2010-03-02 02:51:55 -0500
commit216fe702f7f5d440a5c54488f020624897fd5e2d (patch)
tree3d1446d152f5da1ec1c00cd9ff3db5684f82e294 /drivers
parente8094e667aa89a7c455da1da9daf8a5ed063da8d (diff)
parent68baf495d8e559a82787f595fecc30a43bb89bb7 (diff)
Merge branch 'cxgb3' into for-next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c80
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c10
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
13 files changed, 203 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7dfd51..a28e862f2d68 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { 109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
110 udelay(1); 110 udelay(1);
111 if (i++ > 1000000) { 111 if (i++ > 1000000) {
112 BUG_ON(1);
113 printk(KERN_ERR "%s: stalled rnic\n", 112 printk(KERN_ERR "%s: stalled rnic\n",
114 rdev_p->dev_name); 113 rdev_p->dev_name);
115 return -EIO; 114 return -EIO;
@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
155 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); 154 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
156} 155}
157 156
158int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 157int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
159{ 158{
160 struct rdma_cq_setup setup; 159 struct rdma_cq_setup setup;
161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); 160 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
163 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); 162 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
164 if (!cq->cqid) 163 if (!cq->cqid)
165 return -ENOMEM; 164 return -ENOMEM;
166 cq->sw_queue = kzalloc(size, GFP_KERNEL); 165 if (kernel) {
167 if (!cq->sw_queue) 166 cq->sw_queue = kzalloc(size, GFP_KERNEL);
168 return -ENOMEM; 167 if (!cq->sw_queue)
169 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), 168 return -ENOMEM;
170 (1UL << (cq->size_log2)) * 169 }
171 sizeof(struct t3_cqe), 170 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
172 &(cq->dma_addr), GFP_KERNEL); 171 &(cq->dma_addr), GFP_KERNEL);
173 if (!cq->queue) { 172 if (!cq->queue) {
174 kfree(cq->sw_queue); 173 kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440cc68f2..073373c2c560 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 8192 56#define T3_MAX_CQ_DEPTH 262144
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
157void cxio_rdev_close(struct cxio_rdev *rdev); 157void cxio_rdev_close(struct cxio_rdev *rdev);
158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
159 enum t3_cq_opcode op, u32 credit); 159 enum t3_cq_opcode op, u32 credit);
160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); 163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b7ac7f..15073b2da1c5 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
730 730
731static inline void cxio_set_wq_in_error(struct t3_wq *wq) 731static inline void cxio_set_wq_in_error(struct t3_wq *wq)
732{ 732{
733 wq->queue->wq_in_err.err = 1; 733 wq->queue->wq_in_err.err |= 1;
734}
735
736static inline void cxio_disable_wq_db(struct t3_wq *wq)
737{
738 wq->queue->wq_in_err.err |= 2;
739}
740
741static inline void cxio_enable_wq_db(struct t3_wq *wq)
742{
743 wq->queue->wq_in_err.err &= ~2;
744}
745
746static inline int cxio_wq_db_enabled(struct t3_wq *wq)
747{
748 return !(wq->queue->wq_in_err.err & 2);
734} 749}
735 750
736static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) 751static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea0105ddf6..ee1d8b4d4541 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
66static DEFINE_MUTEX(dev_mutex); 66static DEFINE_MUTEX(dev_mutex);
67 67
68static int disable_qp_db(int id, void *p, void *data)
69{
70 struct iwch_qp *qhp = p;
71
72 cxio_disable_wq_db(&qhp->wq);
73 return 0;
74}
75
76static int enable_qp_db(int id, void *p, void *data)
77{
78 struct iwch_qp *qhp = p;
79
80 if (data)
81 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
82 cxio_enable_wq_db(&qhp->wq);
83 return 0;
84}
85
86static void disable_dbs(struct iwch_dev *rnicp)
87{
88 spin_lock_irq(&rnicp->lock);
89 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
90 spin_unlock_irq(&rnicp->lock);
91}
92
93static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
94{
95 spin_lock_irq(&rnicp->lock);
96 idr_for_each(&rnicp->qpidr, enable_qp_db,
97 (void *)(unsigned long)ring_db);
98 spin_unlock_irq(&rnicp->lock);
99}
100
101static void iwch_db_drop_task(struct work_struct *work)
102{
103 struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
104 db_drop_task.work);
105 enable_dbs(rnicp, 1);
106}
107
68static void rnic_init(struct iwch_dev *rnicp) 108static void rnic_init(struct iwch_dev *rnicp)
69{ 109{
70 PDBG("%s iwch_dev %p\n", __func__, rnicp); 110 PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
72 idr_init(&rnicp->qpidr); 112 idr_init(&rnicp->qpidr);
73 idr_init(&rnicp->mmidr); 113 idr_init(&rnicp->mmidr);
74 spin_lock_init(&rnicp->lock); 114 spin_lock_init(&rnicp->lock);
115 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
75 116
76 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; 117 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
77 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; 118 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
147 mutex_lock(&dev_mutex); 188 mutex_lock(&dev_mutex);
148 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 189 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
149 if (dev->rdev.t3cdev_p == tdev) { 190 if (dev->rdev.t3cdev_p == tdev) {
191 dev->rdev.flags = CXIO_ERROR_FATAL;
192 cancel_delayed_work_sync(&dev->db_drop_task);
150 list_del(&dev->entry); 193 list_del(&dev->entry);
151 iwch_unregister_device(dev); 194 iwch_unregister_device(dev);
152 cxio_rdev_close(&dev->rdev); 195 cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
165 struct cxio_rdev *rdev = tdev->ulp; 208 struct cxio_rdev *rdev = tdev->ulp;
166 struct iwch_dev *rnicp; 209 struct iwch_dev *rnicp;
167 struct ib_event event; 210 struct ib_event event;
168 u32 portnum = port_id + 1; 211 u32 portnum = port_id + 1;
212 int dispatch = 0;
169 213
170 if (!rdev) 214 if (!rdev)
171 return; 215 return;
@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
174 case OFFLOAD_STATUS_DOWN: { 218 case OFFLOAD_STATUS_DOWN: {
175 rdev->flags = CXIO_ERROR_FATAL; 219 rdev->flags = CXIO_ERROR_FATAL;
176 event.event = IB_EVENT_DEVICE_FATAL; 220 event.event = IB_EVENT_DEVICE_FATAL;
221 dispatch = 1;
177 break; 222 break;
178 } 223 }
179 case OFFLOAD_PORT_DOWN: { 224 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR; 225 event.event = IB_EVENT_PORT_ERR;
226 dispatch = 1;
181 break; 227 break;
182 } 228 }
183 case OFFLOAD_PORT_UP: { 229 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE; 230 event.event = IB_EVENT_PORT_ACTIVE;
231 dispatch = 1;
232 break;
233 }
234 case OFFLOAD_DB_FULL: {
235 disable_dbs(rnicp);
236 break;
237 }
238 case OFFLOAD_DB_EMPTY: {
239 enable_dbs(rnicp, 1);
240 break;
241 }
242 case OFFLOAD_DB_DROP: {
243 unsigned long delay = 1000;
244 unsigned short r;
245
246 disable_dbs(rnicp);
247 get_random_bytes(&r, 2);
248 delay += r & 1023;
249
250 /*
251 * delay is between 1000-2023 usecs.
252 */
253 schedule_delayed_work(&rnicp->db_drop_task,
254 usecs_to_jiffies(delay));
185 break; 255 break;
186 } 256 }
187 } 257 }
188 258
189 event.device = &rnicp->ibdev; 259 if (dispatch) {
190 event.element.port_num = portnum; 260 event.device = &rnicp->ibdev;
191 ib_dispatch_event(&event); 261 event.element.port_num = portnum;
262 ib_dispatch_event(&event);
263 }
192 264
193 return; 265 return;
194} 266}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 84735506333f..a1c44578e039 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/workqueue.h>
39 40
40#include <rdma/ib_verbs.h> 41#include <rdma/ib_verbs.h>
41 42
@@ -110,6 +111,7 @@ struct iwch_dev {
110 struct idr mmidr; 111 struct idr mmidr;
111 spinlock_t lock; 112 spinlock_t lock;
112 struct list_head entry; 113 struct list_head entry;
114 struct delayed_work db_drop_task;
113}; 115};
114 116
115static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) 117static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed7175549ebd..47b35c6608d2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
187 entries = roundup_pow_of_two(entries); 187 entries = roundup_pow_of_two(entries);
188 chp->cq.size_log2 = ilog2(entries); 188 chp->cq.size_log2 = ilog2(entries);
189 189
190 if (cxio_create_cq(&rhp->rdev, &chp->cq)) { 190 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
191 kfree(chp); 191 kfree(chp);
192 return ERR_PTR(-ENOMEM); 192 return ERR_PTR(-ENOMEM);
193 } 193 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d7..b4d893de3650 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
452 ++(qhp->wq.sq_wptr); 452 ++(qhp->wq.sq_wptr);
453 } 453 }
454 spin_unlock_irqrestore(&qhp->lock, flag); 454 spin_unlock_irqrestore(&qhp->lock, flag);
455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 455 if (cxio_wq_db_enabled(&qhp->wq))
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
456 457
457out: 458out:
458 if (err) 459 if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
514 num_wrs--; 515 num_wrs--;
515 } 516 }
516 spin_unlock_irqrestore(&qhp->lock, flag); 517 spin_unlock_irqrestore(&qhp->lock, flag);
517 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 518 if (cxio_wq_db_enabled(&qhp->wq))
519 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
518 520
519out: 521out:
520 if (err) 522 if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
597 ++(qhp->wq.sq_wptr); 599 ++(qhp->wq.sq_wptr);
598 spin_unlock_irqrestore(&qhp->lock, flag); 600 spin_unlock_irqrestore(&qhp->lock, flag);
599 601
600 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 602 if (cxio_wq_db_enabled(&qhp->wq))
603 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
601 604
602 return err; 605 return err;
603} 606}
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..37945fce7fa5 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#include "common.h" 51#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 142 * for our work to complete. Get our own work queue to solve this.
142 */ 143 */
143static struct workqueue_struct *cxgb3_wq; 144struct workqueue_struct *cxgb3_wq;
144 145
145/** 146/**
146 * link_report - show link status and link speed/duplex 147 * link_report - show link status and link speed/duplex
@@ -590,6 +591,19 @@ static void setup_rss(struct adapter *adap)
590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 591 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591} 592}
592 593
594static void ring_dbs(struct adapter *adap)
595{
596 int i, j;
597
598 for (i = 0; i < SGE_QSETS; i++) {
599 struct sge_qset *qs = &adap->sge.qs[i];
600
601 if (qs->adap)
602 for (j = 0; j < SGE_TXQ_PER_SET; j++)
603 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
604 }
605}
606
593static void init_napi(struct adapter *adap) 607static void init_napi(struct adapter *adap)
594{ 608{
595 int i; 609 int i;
@@ -2754,6 +2768,42 @@ static void t3_adap_check_task(struct work_struct *work)
2754 spin_unlock_irq(&adapter->work_lock); 2768 spin_unlock_irq(&adapter->work_lock);
2755} 2769}
2756 2770
2771static void db_full_task(struct work_struct *work)
2772{
2773 struct adapter *adapter = container_of(work, struct adapter,
2774 db_full_task);
2775
2776 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2777}
2778
2779static void db_empty_task(struct work_struct *work)
2780{
2781 struct adapter *adapter = container_of(work, struct adapter,
2782 db_empty_task);
2783
2784 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2785}
2786
2787static void db_drop_task(struct work_struct *work)
2788{
2789 struct adapter *adapter = container_of(work, struct adapter,
2790 db_drop_task);
2791 unsigned long delay = 1000;
2792 unsigned short r;
2793
2794 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2795
2796 /*
2797 * Sleep a while before ringing the driver qset dbs.
2798 * The delay is between 1000-2023 usecs.
2799 */
2800 get_random_bytes(&r, 2);
2801 delay += r & 1023;
2802 set_current_state(TASK_UNINTERRUPTIBLE);
2803 schedule_timeout(usecs_to_jiffies(delay));
2804 ring_dbs(adapter);
2805}
2806
2757/* 2807/*
2758 * Processes external (PHY) interrupts in process context. 2808 * Processes external (PHY) interrupts in process context.
2759 */ 2809 */
@@ -3222,6 +3272,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3222 INIT_LIST_HEAD(&adapter->adapter_list); 3272 INIT_LIST_HEAD(&adapter->adapter_list);
3223 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3273 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3224 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3274 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3275
3276 INIT_WORK(&adapter->db_full_task, db_full_task);
3277 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3278 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3279
3225 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3280 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3226 3281
3227 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3282 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 318a018ca7c5..9b434461c4f1 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
42#include "sge_defs.h" 42#include "sge_defs.h"
43#include "t3_cpl.h" 43#include "t3_cpl.h"
44#include "firmware_exports.h" 44#include "firmware_exports.h"
45#include "cxgb3_offload.h"
45 46
46#define USE_GTS 0 47#define USE_GTS 0
47 48
@@ -2833,8 +2834,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2833 } 2834 }
2834 2835
2835 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2836 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2836 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2837 queue_work(cxgb3_wq, &adapter->db_drop_task);
2837 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2838
2839 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2840 queue_work(cxgb3_wq, &adapter->db_full_task);
2841
2842 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2843 queue_work(cxgb3_wq, &adapter->db_empty_task);
2838 2844
2839 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2845 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2840 if (status & SGE_FATALERR) 2846 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe065570..c38fc717a0d1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1432,7 +1432,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1435 F_HIRCQPARITYERROR) 1435 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1436 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1437 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1438 F_LOPIODRBDROPERR)
1436#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1439#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1437 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1440 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1438 F_NFASRCHFAIL) 1441 F_NFASRCHFAIL)