aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/snic/snic_fwint.h4
-rw-r--r--drivers/scsi/snic/snic_io.c62
2 files changed, 59 insertions, 7 deletions
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2cfaf2dc915f..c5f9e1917a8e 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -414,7 +414,7 @@ enum snic_ev_type {
414/* Payload 88 bytes = 128 - 24 - 16 */ 414/* Payload 88 bytes = 128 - 24 - 16 */
415#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ 415#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
416 sizeof(struct snic_io_hdr) - \ 416 sizeof(struct snic_io_hdr) - \
417 (2 * sizeof(u64)))) 417 (2 * sizeof(u64)) - sizeof(ulong)))
418 418
419/* 419/*
420 * snic_host_req: host -> firmware request 420 * snic_host_req: host -> firmware request
@@ -448,6 +448,8 @@ struct snic_host_req {
448 /* hba reset */ 448 /* hba reset */
449 struct snic_hba_reset reset; 449 struct snic_hba_reset reset;
450 } u; 450 } u;
451
452 ulong req_pa;
451}; /* end of snic_host_req structure */ 453}; /* end of snic_host_req structure */
452 454
453 455
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 993db7de4e4b..8e69548395b9 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -48,7 +48,7 @@ snic_wq_cmpl_frame_send(struct vnic_wq *wq,
48 SNIC_TRC(snic->shost->host_no, 0, 0, 48 SNIC_TRC(snic->shost->host_no, 0, 0,
49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, 49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
50 0); 50 0);
51 pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); 51
52 buf->os_buf = NULL; 52 buf->os_buf = NULL;
53} 53}
54 54
@@ -137,13 +137,36 @@ snic_select_wq(struct snic *snic)
137 return 0; 137 return 0;
138} 138}
139 139
140static int
141snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
142{
143 int nr_wqdesc = snic->config.wq_enet_desc_count;
144
145 if (q_num > 0) {
146 /*
147 * Multi Queue case, additional care is required.
148 * Per WQ active requests need to be maintained.
149 */
150 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
151 SNIC_BUG_ON(q_num > 0);
152
153 return -1;
154 }
155
156 nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
157
158 return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1);
159}
160
140int 161int
141snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) 162snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
142{ 163{
143 dma_addr_t pa = 0; 164 dma_addr_t pa = 0;
144 unsigned long flags; 165 unsigned long flags;
145 struct snic_fw_stats *fwstats = &snic->s_stats.fw; 166 struct snic_fw_stats *fwstats = &snic->s_stats.fw;
167 struct snic_host_req *req = (struct snic_host_req *) os_buf;
146 long act_reqs; 168 long act_reqs;
169 long desc_avail = 0;
147 int q_num = 0; 170 int q_num = 0;
148 171
149 snic_print_desc(__func__, os_buf, len); 172 snic_print_desc(__func__, os_buf, len);
@@ -156,11 +179,15 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
156 return -ENOMEM; 179 return -ENOMEM;
157 } 180 }
158 181
182 req->req_pa = (ulong)pa;
183
159 q_num = snic_select_wq(snic); 184 q_num = snic_select_wq(snic);
160 185
161 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 186 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
162 if (!svnic_wq_desc_avail(snic->wq)) { 187 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
188 if (desc_avail <= 0) {
163 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); 189 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
190 req->req_pa = 0;
164 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 191 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
165 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 192 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
166 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); 193 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
@@ -169,10 +196,13 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
169 } 196 }
170 197
171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); 198 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
199 /*
200 * Update stats
201 * note: when multi queue enabled, fw actv_reqs should be per queue.
202 */
203 act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
172 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 204 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
173 205
174 /* Update stats */
175 act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
176 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) 206 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
177 atomic64_set(&fwstats->max_actv_reqs, act_reqs); 207 atomic64_set(&fwstats->max_actv_reqs, act_reqs);
178 208
@@ -318,11 +348,31 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
318 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", 348 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
319 rqi, rqi->req, rqi->abort_req, rqi->dr_req); 349 rqi, rqi->req, rqi->abort_req, rqi->dr_req);
320 350
321 if (rqi->abort_req) 351 if (rqi->abort_req) {
352 if (rqi->abort_req->req_pa)
353 pci_unmap_single(snic->pdev,
354 rqi->abort_req->req_pa,
355 sizeof(struct snic_host_req),
356 PCI_DMA_TODEVICE);
357
322 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 358 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
359 }
360
361 if (rqi->dr_req) {
362 if (rqi->dr_req->req_pa)
363 pci_unmap_single(snic->pdev,
364 rqi->dr_req->req_pa,
365 sizeof(struct snic_host_req),
366 PCI_DMA_TODEVICE);
323 367
324 if (rqi->dr_req)
325 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 368 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
369 }
370
371 if (rqi->req->req_pa)
372 pci_unmap_single(snic->pdev,
373 rqi->req->req_pa,
374 rqi->req_len,
375 PCI_DMA_TODEVICE);
326 376
327 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 377 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
328} 378}