aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c79
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c10
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
10 files changed, 192 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b7ac7f..15073b2da1c5 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
730 730
731static inline void cxio_set_wq_in_error(struct t3_wq *wq) 731static inline void cxio_set_wq_in_error(struct t3_wq *wq)
732{ 732{
733 wq->queue->wq_in_err.err = 1; 733 wq->queue->wq_in_err.err |= 1;
734}
735
736static inline void cxio_disable_wq_db(struct t3_wq *wq)
737{
738 wq->queue->wq_in_err.err |= 2;
739}
740
741static inline void cxio_enable_wq_db(struct t3_wq *wq)
742{
743 wq->queue->wq_in_err.err &= ~2;
744}
745
746static inline int cxio_wq_db_enabled(struct t3_wq *wq)
747{
748 return !(wq->queue->wq_in_err.err & 2);
734} 749}
735 750
736static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) 751static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea0105ddf6..d992543890ee 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
66static DEFINE_MUTEX(dev_mutex); 66static DEFINE_MUTEX(dev_mutex);
67 67
68static int disable_qp_db(int id, void *p, void *data)
69{
70 struct iwch_qp *qhp = p;
71
72 cxio_disable_wq_db(&qhp->wq);
73 return 0;
74}
75
76static int enable_qp_db(int id, void *p, void *data)
77{
78 struct iwch_qp *qhp = p;
79
80 if (data)
81 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
82 cxio_enable_wq_db(&qhp->wq);
83 return 0;
84}
85
86static void disable_dbs(struct iwch_dev *rnicp)
87{
88 spin_lock_irq(&rnicp->lock);
89 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
90 spin_unlock_irq(&rnicp->lock);
91}
92
93static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
94{
95 spin_lock_irq(&rnicp->lock);
96 idr_for_each(&rnicp->qpidr, enable_qp_db,
97 (void *)(unsigned long)ring_db);
98 spin_unlock_irq(&rnicp->lock);
99}
100
101static void iwch_db_drop_task(struct work_struct *work)
102{
103 struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
104 db_drop_task.work);
105 enable_dbs(rnicp, 1);
106}
107
68static void rnic_init(struct iwch_dev *rnicp) 108static void rnic_init(struct iwch_dev *rnicp)
69{ 109{
70 PDBG("%s iwch_dev %p\n", __func__, rnicp); 110 PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
72 idr_init(&rnicp->qpidr); 112 idr_init(&rnicp->qpidr);
73 idr_init(&rnicp->mmidr); 113 idr_init(&rnicp->mmidr);
74 spin_lock_init(&rnicp->lock); 114 spin_lock_init(&rnicp->lock);
115 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
75 116
76 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; 117 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
77 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; 118 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,7 @@ static void close_rnic_dev(struct t3cdev *tdev)
147 mutex_lock(&dev_mutex); 188 mutex_lock(&dev_mutex);
148 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 189 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
149 if (dev->rdev.t3cdev_p == tdev) { 190 if (dev->rdev.t3cdev_p == tdev) {
191 cancel_delayed_work_sync(&dev->db_drop_task);
150 list_del(&dev->entry); 192 list_del(&dev->entry);
151 iwch_unregister_device(dev); 193 iwch_unregister_device(dev);
152 cxio_rdev_close(&dev->rdev); 194 cxio_rdev_close(&dev->rdev);
@@ -165,7 +207,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
165 struct cxio_rdev *rdev = tdev->ulp; 207 struct cxio_rdev *rdev = tdev->ulp;
166 struct iwch_dev *rnicp; 208 struct iwch_dev *rnicp;
167 struct ib_event event; 209 struct ib_event event;
168 u32 portnum = port_id + 1; 210 u32 portnum = port_id + 1;
211 int dispatch = 0;
169 212
170 if (!rdev) 213 if (!rdev)
171 return; 214 return;
@@ -174,21 +217,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
174 case OFFLOAD_STATUS_DOWN: { 217 case OFFLOAD_STATUS_DOWN: {
175 rdev->flags = CXIO_ERROR_FATAL; 218 rdev->flags = CXIO_ERROR_FATAL;
176 event.event = IB_EVENT_DEVICE_FATAL; 219 event.event = IB_EVENT_DEVICE_FATAL;
220 dispatch = 1;
177 break; 221 break;
178 } 222 }
179 case OFFLOAD_PORT_DOWN: { 223 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR; 224 event.event = IB_EVENT_PORT_ERR;
225 dispatch = 1;
181 break; 226 break;
182 } 227 }
183 case OFFLOAD_PORT_UP: { 228 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE; 229 event.event = IB_EVENT_PORT_ACTIVE;
230 dispatch = 1;
231 break;
232 }
233 case OFFLOAD_DB_FULL: {
234 disable_dbs(rnicp);
235 break;
236 }
237 case OFFLOAD_DB_EMPTY: {
238 enable_dbs(rnicp, 1);
239 break;
240 }
241 case OFFLOAD_DB_DROP: {
242 unsigned long delay = 1000;
243 unsigned short r;
244
245 disable_dbs(rnicp);
246 get_random_bytes(&r, 2);
247 delay += r & 1023;
248
249 /*
250 * delay is between 1000-2023 usecs.
251 */
252 schedule_delayed_work(&rnicp->db_drop_task,
253 usecs_to_jiffies(delay));
185 break; 254 break;
186 } 255 }
187 } 256 }
188 257
189 event.device = &rnicp->ibdev; 258 if (dispatch) {
190 event.element.port_num = portnum; 259 event.device = &rnicp->ibdev;
191 ib_dispatch_event(&event); 260 event.element.port_num = portnum;
261 ib_dispatch_event(&event);
262 }
192 263
193 return; 264 return;
194} 265}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 84735506333f..a1c44578e039 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/workqueue.h>
39 40
40#include <rdma/ib_verbs.h> 41#include <rdma/ib_verbs.h>
41 42
@@ -110,6 +111,7 @@ struct iwch_dev {
110 struct idr mmidr; 111 struct idr mmidr;
111 spinlock_t lock; 112 spinlock_t lock;
112 struct list_head entry; 113 struct list_head entry;
114 struct delayed_work db_drop_task;
113}; 115};
114 116
115static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) 117static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d7..b4d893de3650 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
452 ++(qhp->wq.sq_wptr); 452 ++(qhp->wq.sq_wptr);
453 } 453 }
454 spin_unlock_irqrestore(&qhp->lock, flag); 454 spin_unlock_irqrestore(&qhp->lock, flag);
455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 455 if (cxio_wq_db_enabled(&qhp->wq))
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
456 457
457out: 458out:
458 if (err) 459 if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
514 num_wrs--; 515 num_wrs--;
515 } 516 }
516 spin_unlock_irqrestore(&qhp->lock, flag); 517 spin_unlock_irqrestore(&qhp->lock, flag);
517 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 518 if (cxio_wq_db_enabled(&qhp->wq))
519 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
518 520
519out: 521out:
520 if (err) 522 if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
597 ++(qhp->wq.sq_wptr); 599 ++(qhp->wq.sq_wptr);
598 spin_unlock_irqrestore(&qhp->lock, flag); 600 spin_unlock_irqrestore(&qhp->lock, flag);
599 601
600 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 602 if (cxio_wq_db_enabled(&qhp->wq))
603 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
601 604
602 return err; 605 return err;
603} 606}
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..37945fce7fa5 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#include "common.h" 51#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 142 * for our work to complete. Get our own work queue to solve this.
142 */ 143 */
143static struct workqueue_struct *cxgb3_wq; 144struct workqueue_struct *cxgb3_wq;
144 145
145/** 146/**
146 * link_report - show link status and link speed/duplex 147 * link_report - show link status and link speed/duplex
@@ -590,6 +591,19 @@ static void setup_rss(struct adapter *adap)
590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 591 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591} 592}
592 593
594static void ring_dbs(struct adapter *adap)
595{
596 int i, j;
597
598 for (i = 0; i < SGE_QSETS; i++) {
599 struct sge_qset *qs = &adap->sge.qs[i];
600
601 if (qs->adap)
602 for (j = 0; j < SGE_TXQ_PER_SET; j++)
603 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
604 }
605}
606
593static void init_napi(struct adapter *adap) 607static void init_napi(struct adapter *adap)
594{ 608{
595 int i; 609 int i;
@@ -2754,6 +2768,42 @@ static void t3_adap_check_task(struct work_struct *work)
2754 spin_unlock_irq(&adapter->work_lock); 2768 spin_unlock_irq(&adapter->work_lock);
2755} 2769}
2756 2770
2771static void db_full_task(struct work_struct *work)
2772{
2773 struct adapter *adapter = container_of(work, struct adapter,
2774 db_full_task);
2775
2776 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2777}
2778
2779static void db_empty_task(struct work_struct *work)
2780{
2781 struct adapter *adapter = container_of(work, struct adapter,
2782 db_empty_task);
2783
2784 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2785}
2786
2787static void db_drop_task(struct work_struct *work)
2788{
2789 struct adapter *adapter = container_of(work, struct adapter,
2790 db_drop_task);
2791 unsigned long delay = 1000;
2792 unsigned short r;
2793
2794 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2795
2796 /*
2797 * Sleep a while before ringing the driver qset dbs.
2798 * The delay is between 1000-2023 usecs.
2799 */
2800 get_random_bytes(&r, 2);
2801 delay += r & 1023;
2802 set_current_state(TASK_UNINTERRUPTIBLE);
2803 schedule_timeout(usecs_to_jiffies(delay));
2804 ring_dbs(adapter);
2805}
2806
2757/* 2807/*
2758 * Processes external (PHY) interrupts in process context. 2808 * Processes external (PHY) interrupts in process context.
2759 */ 2809 */
@@ -3222,6 +3272,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3222 INIT_LIST_HEAD(&adapter->adapter_list); 3272 INIT_LIST_HEAD(&adapter->adapter_list);
3223 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3273 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3224 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3274 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3275
3276 INIT_WORK(&adapter->db_full_task, db_full_task);
3277 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3278 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3279
3225 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3280 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3226 3281
3227 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3282 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 318a018ca7c5..9b434461c4f1 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
42#include "sge_defs.h" 42#include "sge_defs.h"
43#include "t3_cpl.h" 43#include "t3_cpl.h"
44#include "firmware_exports.h" 44#include "firmware_exports.h"
45#include "cxgb3_offload.h"
45 46
46#define USE_GTS 0 47#define USE_GTS 0
47 48
@@ -2833,8 +2834,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2833 } 2834 }
2834 2835
2835 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2836 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2836 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2837 queue_work(cxgb3_wq, &adapter->db_drop_task);
2837 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2838
2839 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2840 queue_work(cxgb3_wq, &adapter->db_full_task);
2841
2842 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2843 queue_work(cxgb3_wq, &adapter->db_empty_task);
2838 2844
2839 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2845 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2840 if (status & SGE_FATALERR) 2846 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe065570..c38fc717a0d1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1432,7 +1432,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1435 F_HIRCQPARITYERROR) 1435 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1436 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1437 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1438 F_LOPIODRBDROPERR)
1436#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1439#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1437 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1440 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1438 F_NFASRCHFAIL) 1441 F_NFASRCHFAIL)