aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames.Smart@Emulex.Com <James.Smart@Emulex.Com>2005-11-29 16:32:13 -0500
committerJames Bottomley <jejb@mulgrave.(none)>2005-12-13 20:34:14 -0500
commit875fbdfe9b1b4c8f12622a8d8d81428ff0984875 (patch)
tree63c787cb9a7d4a92eaedcc9f8a89a00d5d8f6603
parent5cc36b3cd0e3610ad7c7e2514160998276798fc0 (diff)
[SCSI] lpfc 8.1.1 : Add polled-mode support
- Add functionality to run in polled mode only. Includes run time attribute to enable mode. - Enable runtime writable hba settings for coallescing and delay parameters Customers have requested a mode in the driver to run strictly polled. This is generally to support an environment where the server is extremely loaded and is looking to reclaim some cpu cycles from adapter interrupt handling. This patch adds a new "poll" attribute, and the following behavior: if value is 0 (default): The driver uses the normal method for i/o completion. It uses the firmware feature of interrupt coalesing. The firmware allows a minimum number of i/o completions before an interrupt, or a maximum time delay between interrupts. By default, the driver sets these to no delay (disabled) or 1 i/o - meaning coalescing is disabled. Attributes were provided to change the coalescing values, but it was a module-load time only and global across all adapters. This patch allows them to be writable on a per-adapter basis. if value is 1 : Interrupts are left enabled, expecting that the user has tuned the interrupt coalescing values. When this setting is enabled, the driver will attempt to service completed i/o whenever new i/o is submitted to the adapter. If the coalescing values are large, and the i/o generation rate steady, an interrupt will be avoided by servicing completed i/o prior to the coalescing thresholds kicking in. However, if the i/o completion load is high enough or i/o generation slow, the coalescion values will ensure that completed i/o is serviced in a timely fashion. if value is 3 : Turns off FCP i/o interrupts altogether. The coalescing values now have no effect. A new attribute "poll_tmo" (default 10ms) exists to set the polling interval for i/o completion. When this setting is enabled, the driver will attempt to service completed i/o and restart the interval timer whenever new i/o is submitted. This behavior allows for servicing of completed i/o sooner than the interval timer, but ensures that if no i/o is being issued, then the interval timer will kick in to service the outstanding i/o. Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c92
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c72
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c217
6 files changed, 377 insertions, 33 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index dc73a2f8f12c..1f3873ae9d68 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -45,6 +45,11 @@ struct lpfc_sli2_slim;
45 45
46#define MAX_HBAEVT 32 46#define MAX_HBAEVT 32
47 47
48enum lpfc_polling_flags {
49 ENABLE_FCP_RING_POLLING = 0x1,
50 DISABLE_FCP_RING_INT = 0x2
51};
52
48/* Provide DMA memory definitions the driver uses per port instance. */ 53/* Provide DMA memory definitions the driver uses per port instance. */
49struct lpfc_dmabuf { 54struct lpfc_dmabuf {
50 struct list_head list; 55 struct list_head list;
@@ -287,6 +292,8 @@ struct lpfc_hba {
287 uint32_t cfg_fcp_bind_method; 292 uint32_t cfg_fcp_bind_method;
288 uint32_t cfg_discovery_threads; 293 uint32_t cfg_discovery_threads;
289 uint32_t cfg_max_luns; 294 uint32_t cfg_max_luns;
295 uint32_t cfg_poll;
296 uint32_t cfg_poll_tmo;
290 uint32_t cfg_sg_seg_cnt; 297 uint32_t cfg_sg_seg_cnt;
291 uint32_t cfg_sg_dma_buf_size; 298 uint32_t cfg_sg_dma_buf_size;
292 299
@@ -338,7 +345,9 @@ struct lpfc_hba {
338#define VPD_PORT 0x8 /* valid vpd port data */ 345#define VPD_PORT 0x8 /* valid vpd port data */
339#define VPD_MASK 0xf /* mask for any vpd data */ 346#define VPD_MASK 0xf /* mask for any vpd data */
340 347
348 struct timer_list fcp_poll_timer;
341 struct timer_list els_tmofunc; 349 struct timer_list els_tmofunc;
350
342 /* 351 /*
343 * stat counters 352 * stat counters
344 */ 353 */
@@ -349,6 +358,7 @@ struct lpfc_hba {
349 struct lpfc_sysfs_mbox sysfs_mbox; 358 struct lpfc_sysfs_mbox sysfs_mbox;
350 359
351 /* fastpath list. */ 360 /* fastpath list. */
361 spinlock_t scsi_buf_list_lock;
352 struct list_head lpfc_scsi_buf_list; 362 struct list_head lpfc_scsi_buf_list;
353 uint32_t total_scsi_bufs; 363 uint32_t total_scsi_bufs;
354 struct list_head lpfc_iocb_list; 364 struct list_head lpfc_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 89e8222bc7cc..5625a8c2a8fd 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -278,6 +278,71 @@ lpfc_board_online_store(struct class_device *cdev, const char *buf,
278 return -EIO; 278 return -EIO;
279} 279}
280 280
281static ssize_t
282lpfc_poll_show(struct class_device *cdev, char *buf)
283{
284 struct Scsi_Host *host = class_to_shost(cdev);
285 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
286
287 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
288}
289
290static ssize_t
291lpfc_poll_store(struct class_device *cdev, const char *buf,
292 size_t count)
293{
294 struct Scsi_Host *host = class_to_shost(cdev);
295 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
296 uint32_t creg_val;
297 uint32_t old_val;
298 int val=0;
299
300 if (!isdigit(buf[0]))
301 return -EINVAL;
302
303 if (sscanf(buf, "%i", &val) != 1)
304 return -EINVAL;
305
306 if ((val & 0x3) != val)
307 return -EINVAL;
308
309 spin_lock_irq(phba->host->host_lock);
310
311 old_val = phba->cfg_poll;
312
313 if (val & ENABLE_FCP_RING_POLLING) {
314 if ((val & DISABLE_FCP_RING_INT) &&
315 !(old_val & DISABLE_FCP_RING_INT)) {
316 creg_val = readl(phba->HCregaddr);
317 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
318 writel(creg_val, phba->HCregaddr);
319 readl(phba->HCregaddr); /* flush */
320
321 lpfc_poll_start_timer(phba);
322 }
323 } else if (val != 0x0) {
324 spin_unlock_irq(phba->host->host_lock);
325 return -EINVAL;
326 }
327
328 if (!(val & DISABLE_FCP_RING_INT) &&
329 (old_val & DISABLE_FCP_RING_INT))
330 {
331 spin_unlock_irq(phba->host->host_lock);
332 del_timer(&phba->fcp_poll_timer);
333 spin_lock_irq(phba->host->host_lock);
334 creg_val = readl(phba->HCregaddr);
335 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
336 writel(creg_val, phba->HCregaddr);
337 readl(phba->HCregaddr); /* flush */
338 }
339
340 phba->cfg_poll = val;
341
342 spin_unlock_irq(phba->host->host_lock);
343
344 return strlen(buf);
345}
281 346
282#define lpfc_param_show(attr) \ 347#define lpfc_param_show(attr) \
283static ssize_t \ 348static ssize_t \
@@ -416,6 +481,15 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
416static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, 481static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
417 lpfc_board_online_show, lpfc_board_online_store); 482 lpfc_board_online_show, lpfc_board_online_store);
418 483
484static int lpfc_poll = 0;
485module_param(lpfc_poll, int, 0);
486MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
487 " 0 - none,"
488 " 1 - poll with interrupts enabled"
489 " 3 - poll and disable FCP ring interrupts");
490
491static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
492 lpfc_poll_show, lpfc_poll_store);
419 493
420/* 494/*
421# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 495# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
@@ -523,10 +597,10 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
523# is 0. Default value of cr_count is 1. The cr_count feature is disabled if 597# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
524# cr_delay is set to 0. 598# cr_delay is set to 0.
525*/ 599*/
526LPFC_ATTR(cr_delay, 0, 0, 63, "A count of milliseconds after which an" 600LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an"
527 "interrupt response is generated"); 601 "interrupt response is generated");
528 602
529LPFC_ATTR(cr_count, 1, 1, 255, "A count of I/O completions after which an" 603LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an"
530 "interrupt response is generated"); 604 "interrupt response is generated");
531 605
532/* 606/*
@@ -553,6 +627,13 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands"
553LPFC_ATTR_R(max_luns, 256, 1, 32768, 627LPFC_ATTR_R(max_luns, 256, 1, 32768,
554 "Maximum number of LUNs per target driver will support"); 628 "Maximum number of LUNs per target driver will support");
555 629
630/*
631# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
632# Value range is [1,255], default value is 10.
633*/
634LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
635 "Milliseconds driver will wait between polling FCP ring");
636
556struct class_device_attribute *lpfc_host_attrs[] = { 637struct class_device_attribute *lpfc_host_attrs[] = {
557 &class_device_attr_info, 638 &class_device_attr_info,
558 &class_device_attr_serialnum, 639 &class_device_attr_serialnum,
@@ -575,11 +656,15 @@ struct class_device_attribute *lpfc_host_attrs[] = {
575 &class_device_attr_lpfc_topology, 656 &class_device_attr_lpfc_topology,
576 &class_device_attr_lpfc_scan_down, 657 &class_device_attr_lpfc_scan_down,
577 &class_device_attr_lpfc_link_speed, 658 &class_device_attr_lpfc_link_speed,
659 &class_device_attr_lpfc_cr_delay,
660 &class_device_attr_lpfc_cr_count,
578 &class_device_attr_lpfc_fdmi_on, 661 &class_device_attr_lpfc_fdmi_on,
579 &class_device_attr_lpfc_max_luns, 662 &class_device_attr_lpfc_max_luns,
580 &class_device_attr_nport_evt_cnt, 663 &class_device_attr_nport_evt_cnt,
581 &class_device_attr_management_version, 664 &class_device_attr_management_version,
582 &class_device_attr_board_online, 665 &class_device_attr_board_online,
666 &class_device_attr_lpfc_poll,
667 &class_device_attr_lpfc_poll_tmo,
583 NULL, 668 NULL,
584}; 669};
585 670
@@ -1292,6 +1377,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1292 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 1377 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1293 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1378 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1294 lpfc_max_luns_init(phba, lpfc_max_luns); 1379 lpfc_max_luns_init(phba, lpfc_max_luns);
1380 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1381
1382 phba->cfg_poll = lpfc_poll;
1295 1383
1296 /* 1384 /*
1297 * The total number of segments is the configuration value plus 2 1385 * The total number of segments is the configuration value plus 2
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index d527d05a607f..f1e708946e66 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -143,6 +143,9 @@ LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
143int lpfc_mem_alloc(struct lpfc_hba *); 143int lpfc_mem_alloc(struct lpfc_hba *);
144void lpfc_mem_free(struct lpfc_hba *); 144void lpfc_mem_free(struct lpfc_hba *);
145 145
146void lpfc_poll_timeout(unsigned long ptr);
147void lpfc_poll_start_timer(struct lpfc_hba * phba);
148void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
146struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 149struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
147void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 150void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
148uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 151uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 766dac230a6c..db3c2ad4e941 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -370,6 +370,10 @@ lpfc_config_port_post(struct lpfc_hba * phba)
370 if (psli->num_rings > 3) 370 if (psli->num_rings > 3)
371 status |= HC_R3INT_ENA; 371 status |= HC_R3INT_ENA;
372 372
373 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
374 (phba->cfg_poll & DISABLE_FCP_RING_INT))
375 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
376
373 writel(status, phba->HCregaddr); 377 writel(status, phba->HCregaddr);
374 readl(phba->HCregaddr); /* flush */ 378 readl(phba->HCregaddr); /* flush */
375 spin_unlock_irq(phba->host->host_lock); 379 spin_unlock_irq(phba->host->host_lock);
@@ -1237,6 +1241,7 @@ lpfc_stop_timer(struct lpfc_hba * phba)
1237 } 1241 }
1238 } 1242 }
1239 1243
1244 del_timer_sync(&phba->fcp_poll_timer);
1240 del_timer_sync(&phba->fc_estabtmo); 1245 del_timer_sync(&phba->fc_estabtmo);
1241 del_timer_sync(&phba->fc_disctmo); 1246 del_timer_sync(&phba->fc_disctmo);
1242 del_timer_sync(&phba->fc_fdmitmo); 1247 del_timer_sync(&phba->fc_fdmitmo);
@@ -1416,6 +1421,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1416 psli->mbox_tmo.function = lpfc_mbox_timeout; 1421 psli->mbox_tmo.function = lpfc_mbox_timeout;
1417 psli->mbox_tmo.data = (unsigned long)phba; 1422 psli->mbox_tmo.data = (unsigned long)phba;
1418 1423
1424 init_timer(&phba->fcp_poll_timer);
1425 phba->fcp_poll_timer.function = lpfc_poll_timeout;
1426 phba->fcp_poll_timer.data = (unsigned long)phba;
1427
1419 /* 1428 /*
1420 * Get all the module params for configuring this host and then 1429 * Get all the module params for configuring this host and then
1421 * establish the host parameters. 1430 * establish the host parameters.
@@ -1530,6 +1539,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1530 host->max_cmd_len = 16; 1539 host->max_cmd_len = 16;
1531 1540
1532 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1541 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1542 spin_lock_init(&phba->scsi_buf_list_lock);
1533 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1543 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1534 1544
1535 host->transportt = lpfc_transport_template; 1545 host->transportt = lpfc_transport_template;
@@ -1561,6 +1571,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1561 if (error) 1571 if (error)
1562 goto out_free_irq; 1572 goto out_free_irq;
1563 1573
1574 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1575 spin_lock_irq(phba->host->host_lock);
1576 lpfc_poll_start_timer(phba);
1577 spin_unlock_irq(phba->host->host_lock);
1578 }
1579
1564 /* 1580 /*
1565 * set fixed host attributes 1581 * set fixed host attributes
1566 * Must done after lpfc_sli_hba_setup() 1582 * Must done after lpfc_sli_hba_setup()
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7dc7810b7482..c422220db0ae 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -151,18 +151,22 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
151} 151}
152 152
153struct lpfc_scsi_buf* 153struct lpfc_scsi_buf*
154lpfc_sli_get_scsi_buf(struct lpfc_hba * phba) 154lpfc_get_scsi_buf(struct lpfc_hba * phba)
155{ 155{
156 struct lpfc_scsi_buf * lpfc_cmd = NULL; 156 struct lpfc_scsi_buf * lpfc_cmd = NULL;
157 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 157 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
158 unsigned long iflag = 0;
158 159
160 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
159 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 161 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
162 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
160 return lpfc_cmd; 163 return lpfc_cmd;
161} 164}
162 165
163static void 166static void
164lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 167lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
165{ 168{
169 unsigned long iflag = 0;
166 /* 170 /*
167 * There are only two special cases to consider. (1) the scsi command 171 * There are only two special cases to consider. (1) the scsi command
168 * requested scatter-gather usage or (2) the scsi command allocated 172 * requested scatter-gather usage or (2) the scsi command allocated
@@ -180,8 +184,10 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
180 } 184 }
181 } 185 }
182 186
187 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
183 psb->pCmd = NULL; 188 psb->pCmd = NULL;
184 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 189 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
190 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
185} 191}
186 192
187static int 193static int
@@ -403,7 +409,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
403 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 409 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
404 struct lpfc_nodelist *pnode = rdata->pnode; 410 struct lpfc_nodelist *pnode = rdata->pnode;
405 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 411 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
406 unsigned long iflag;
407 412
408 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 413 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
409 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 414 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@@ -457,9 +462,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
457 462
458 cmd->scsi_done(cmd); 463 cmd->scsi_done(cmd);
459 464
460 spin_lock_irqsave(phba->host->host_lock, iflag);
461 lpfc_release_scsi_buf(phba, lpfc_cmd); 465 lpfc_release_scsi_buf(phba, lpfc_cmd);
462 spin_unlock_irqrestore(phba->host->host_lock, iflag);
463} 466}
464 467
465static void 468static void
@@ -707,6 +710,37 @@ lpfc_info(struct Scsi_Host *host)
707 return lpfcinfobuf; 710 return lpfcinfobuf;
708} 711}
709 712
713static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
714{
715 unsigned long poll_tmo_expires =
716 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
717
718 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
719 mod_timer(&phba->fcp_poll_timer,
720 poll_tmo_expires);
721}
722
723void lpfc_poll_start_timer(struct lpfc_hba * phba)
724{
725 lpfc_poll_rearm_timer(phba);
726}
727
728void lpfc_poll_timeout(unsigned long ptr)
729{
730 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
731 unsigned long iflag;
732
733 spin_lock_irqsave(phba->host->host_lock, iflag);
734
735 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
736 lpfc_sli_poll_fcp_ring (phba);
737 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
738 lpfc_poll_rearm_timer(phba);
739 }
740
741 spin_unlock_irqrestore(phba->host->host_lock, iflag);
742}
743
710static int 744static int
711lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 745lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
712{ 746{
@@ -733,7 +767,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
733 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 767 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
734 goto out_fail_command; 768 goto out_fail_command;
735 } 769 }
736 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 770 lpfc_cmd = lpfc_get_scsi_buf (phba);
737 if (lpfc_cmd == NULL) { 771 if (lpfc_cmd == NULL) {
738 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 772 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
739 "%d:0707 driver's buffer pool is empty, " 773 "%d:0707 driver's buffer pool is empty, "
@@ -761,11 +795,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
761 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 795 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
762 if (err) 796 if (err)
763 goto out_host_busy_free_buf; 797 goto out_host_busy_free_buf;
798
799 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
800 lpfc_sli_poll_fcp_ring(phba);
801 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
802 lpfc_poll_rearm_timer(phba);
803 }
804
764 return 0; 805 return 0;
765 806
766 out_host_busy_free_buf: 807 out_host_busy_free_buf:
767 lpfc_release_scsi_buf(phba, lpfc_cmd); 808 lpfc_release_scsi_buf(phba, lpfc_cmd);
768 cmnd->host_scribble = NULL;
769 out_host_busy: 809 out_host_busy:
770 return SCSI_MLQUEUE_HOST_BUSY; 810 return SCSI_MLQUEUE_HOST_BUSY;
771 811
@@ -839,9 +879,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
839 goto out; 879 goto out;
840 } 880 }
841 881
882 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
883 lpfc_sli_poll_fcp_ring (phba);
884
842 /* Wait for abort to complete */ 885 /* Wait for abort to complete */
843 while (lpfc_cmd->pCmd == cmnd) 886 while (lpfc_cmd->pCmd == cmnd)
844 { 887 {
888 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
889 lpfc_sli_poll_fcp_ring (phba);
890
845 spin_unlock_irq(phba->host->host_lock); 891 spin_unlock_irq(phba->host->host_lock);
846 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 892 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
847 spin_lock_irq(phba->host->host_lock); 893 spin_lock_irq(phba->host->host_lock);
@@ -905,7 +951,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
905 break; 951 break;
906 } 952 }
907 953
908 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 954 lpfc_cmd = lpfc_get_scsi_buf (phba);
909 if (lpfc_cmd == NULL) 955 if (lpfc_cmd == NULL)
910 goto out; 956 goto out;
911 957
@@ -1001,7 +1047,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1001 lpfc_block_requests(phba); 1047 lpfc_block_requests(phba);
1002 spin_lock_irq(shost->host_lock); 1048 spin_lock_irq(shost->host_lock);
1003 1049
1004 lpfc_cmd = lpfc_sli_get_scsi_buf (phba); 1050 lpfc_cmd = lpfc_get_scsi_buf(phba);
1005 if (lpfc_cmd == NULL) 1051 if (lpfc_cmd == NULL)
1006 goto out; 1052 goto out;
1007 1053
@@ -1136,10 +1182,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1136 break; 1182 break;
1137 } 1183 }
1138 1184
1139 spin_lock_irqsave(phba->host->host_lock, flags); 1185 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1140 phba->total_scsi_bufs++; 1186 phba->total_scsi_bufs++;
1141 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1187 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1142 spin_unlock_irqrestore(phba->host->host_lock, flags); 1188 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1143 } 1189 }
1144 return 0; 1190 return 0;
1145} 1191}
@@ -1163,6 +1209,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
1163 */ 1209 */
1164 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1210 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
1165 1211
1212 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1213 lpfc_sli_poll_fcp_ring(phba);
1214 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1215 lpfc_poll_rearm_timer(phba);
1216 }
1217
1166 return 0; 1218 return 0;
1167} 1219}
1168 1220
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e2c08c5d83fb..7b785ade8b07 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -886,6 +886,182 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
886 return rc; 886 return rc;
887} 887}
888 888
889static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
890 struct lpfc_sli_ring * pring)
891{
892 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
893 /*
894 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
895 * rsp ring <portRspMax>
896 */
897 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
898 "%d:0312 Ring %d handler: portRspPut %d "
899 "is bigger then rsp ring %d\n",
900 phba->brd_no, pring->ringno,
901 le32_to_cpu(pgp->rspPutInx),
902 pring->numRiocb);
903
904 phba->hba_state = LPFC_HBA_ERROR;
905
906 /*
907 * All error attention handlers are posted to
908 * worker thread
909 */
910 phba->work_ha |= HA_ERATT;
911 phba->work_hs = HS_FFER3;
912 if (phba->work_wait)
913 wake_up(phba->work_wait);
914
915 return;
916}
917
918void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
919{
920 struct lpfc_sli * psli = &phba->sli;
921 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
922 IOCB_t *irsp = NULL;
923 IOCB_t *entry = NULL;
924 struct lpfc_iocbq *cmdiocbq = NULL;
925 struct lpfc_iocbq rspiocbq;
926 struct lpfc_pgp *pgp;
927 uint32_t status;
928 uint32_t portRspPut, portRspMax;
929 int type;
930 uint32_t rsp_cmpl = 0;
931 void __iomem *to_slim;
932 uint32_t ha_copy;
933
934 pring->stats.iocb_event++;
935
936 /* The driver assumes SLI-2 mode */
937 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
938
939 /*
940 * The next available response entry should never exceed the maximum
941 * entries. If it does, treat it as an adapter hardware error.
942 */
943 portRspMax = pring->numRiocb;
944 portRspPut = le32_to_cpu(pgp->rspPutInx);
945 if (unlikely(portRspPut >= portRspMax)) {
946 lpfc_sli_rsp_pointers_error(phba, pring);
947 return;
948 }
949
950 rmb();
951 while (pring->rspidx != portRspPut) {
952
953 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
954
955 if (++pring->rspidx >= portRspMax)
956 pring->rspidx = 0;
957
958 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
959 (uint32_t *) &rspiocbq.iocb,
960 sizeof (IOCB_t));
961 irsp = &rspiocbq.iocb;
962 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
963 pring->stats.iocb_rsp++;
964 rsp_cmpl++;
965
966 if (unlikely(irsp->ulpStatus)) {
967 /* Rsp ring <ringno> error: IOCB */
968 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
969 "%d:0326 Rsp Ring %d error: IOCB Data: "
970 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
971 phba->brd_no, pring->ringno,
972 irsp->un.ulpWord[0],
973 irsp->un.ulpWord[1],
974 irsp->un.ulpWord[2],
975 irsp->un.ulpWord[3],
976 irsp->un.ulpWord[4],
977 irsp->un.ulpWord[5],
978 *(((uint32_t *) irsp) + 6),
979 *(((uint32_t *) irsp) + 7));
980 }
981
982 switch (type) {
983 case LPFC_ABORT_IOCB:
984 case LPFC_SOL_IOCB:
985 /*
986 * Idle exchange closed via ABTS from port. No iocb
987 * resources need to be recovered.
988 */
989 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
990 printk(KERN_INFO "%s: IOCB cmd 0x%x processed."
991 " Skipping completion\n", __FUNCTION__,
992 irsp->ulpCommand);
993 break;
994 }
995
996 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
997 &rspiocbq);
998 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
999 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1000 &rspiocbq);
1001 }
1002 break;
1003 default:
1004 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1005 char adaptermsg[LPFC_MAX_ADPTMSG];
1006 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1007 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1008 MAX_MSG_DATA);
1009 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1010 phba->brd_no, adaptermsg);
1011 } else {
1012 /* Unknown IOCB command */
1013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1014 "%d:0321 Unknown IOCB command "
1015 "Data: x%x, x%x x%x x%x x%x\n",
1016 phba->brd_no, type,
1017 irsp->ulpCommand,
1018 irsp->ulpStatus,
1019 irsp->ulpIoTag,
1020 irsp->ulpContext);
1021 }
1022 break;
1023 }
1024
1025 /*
1026 * The response IOCB has been processed. Update the ring
1027 * pointer in SLIM. If the port response put pointer has not
1028 * been updated, sync the pgp->rspPutInx and fetch the new port
1029 * response put pointer.
1030 */
1031 to_slim = phba->MBslimaddr +
1032 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1033 writeb(pring->rspidx, to_slim);
1034
1035 if (pring->rspidx == portRspPut)
1036 portRspPut = le32_to_cpu(pgp->rspPutInx);
1037 }
1038
1039 ha_copy = readl(phba->HAregaddr);
1040 ha_copy >>= (LPFC_FCP_RING * 4);
1041
1042 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1043 pring->stats.iocb_rsp_full++;
1044 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1045 writel(status, phba->CAregaddr);
1046 readl(phba->CAregaddr);
1047 }
1048 if ((ha_copy & HA_R0CE_RSP) &&
1049 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1050 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1051 pring->stats.iocb_cmd_empty++;
1052
1053 /* Force update of the local copy of cmdGetInx */
1054 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1055 lpfc_sli_resume_iocb(phba, pring);
1056
1057 if ((pring->lpfc_sli_cmd_available))
1058 (pring->lpfc_sli_cmd_available) (phba, pring);
1059
1060 }
1061
1062 return;
1063}
1064
889/* 1065/*
890 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1066 * This routine presumes LPFC_FCP_RING handling and doesn't bother
891 * to check it explicitly. 1067 * to check it explicitly.
@@ -917,24 +1093,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
917 portRspMax = pring->numRiocb; 1093 portRspMax = pring->numRiocb;
918 portRspPut = le32_to_cpu(pgp->rspPutInx); 1094 portRspPut = le32_to_cpu(pgp->rspPutInx);
919 if (unlikely(portRspPut >= portRspMax)) { 1095 if (unlikely(portRspPut >= portRspMax)) {
920 /* 1096 lpfc_sli_rsp_pointers_error(phba, pring);
921 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
922 * rsp ring <portRspMax>
923 */
924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
925 "%d:0312 Ring %d handler: portRspPut %d "
926 "is bigger then rsp ring %d\n",
927 phba->brd_no, pring->ringno, portRspPut,
928 portRspMax);
929
930 phba->hba_state = LPFC_HBA_ERROR;
931
932 /* All error attention handlers are posted to worker thread */
933 phba->work_ha |= HA_ERATT;
934 phba->work_hs = HS_FFER3;
935 if (phba->work_wait)
936 wake_up(phba->work_wait);
937
938 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1097 spin_unlock_irqrestore(phba->host->host_lock, iflag);
939 return 1; 1098 return 1;
940 } 1099 }
@@ -947,6 +1106,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
947 * network byte order and pci byte orders are different. 1106 * network byte order and pci byte orders are different.
948 */ 1107 */
949 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1108 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1109
1110 if (++pring->rspidx >= portRspMax)
1111 pring->rspidx = 0;
1112
950 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1113 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
951 (uint32_t *) &rspiocbq.iocb, 1114 (uint32_t *) &rspiocbq.iocb,
952 sizeof (IOCB_t)); 1115 sizeof (IOCB_t));
@@ -1020,9 +1183,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1020 * been updated, sync the pgp->rspPutInx and fetch the new port 1183 * been updated, sync the pgp->rspPutInx and fetch the new port
1021 * response put pointer. 1184 * response put pointer.
1022 */ 1185 */
1023 if (++pring->rspidx >= portRspMax)
1024 pring->rspidx = 0;
1025
1026 to_slim = phba->MBslimaddr + 1186 to_slim = phba->MBslimaddr +
1027 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1187 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1028 writel(pring->rspidx, to_slim); 1188 writel(pring->rspidx, to_slim);
@@ -2615,6 +2775,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2615 DECLARE_WAIT_QUEUE_HEAD(done_q); 2775 DECLARE_WAIT_QUEUE_HEAD(done_q);
2616 long timeleft, timeout_req = 0; 2776 long timeleft, timeout_req = 0;
2617 int retval = IOCB_SUCCESS; 2777 int retval = IOCB_SUCCESS;
2778 uint32_t creg_val;
2618 2779
2619 /* 2780 /*
2620 * If the caller has provided a response iocbq buffer, then context2 2781 * If the caller has provided a response iocbq buffer, then context2
@@ -2630,6 +2791,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2630 piocb->context_un.wait_queue = &done_q; 2791 piocb->context_un.wait_queue = &done_q;
2631 piocb->iocb_flag &= ~LPFC_IO_WAKE; 2792 piocb->iocb_flag &= ~LPFC_IO_WAKE;
2632 2793
2794 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
2795 creg_val = readl(phba->HCregaddr);
2796 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2797 writel(creg_val, phba->HCregaddr);
2798 readl(phba->HCregaddr); /* flush */
2799 }
2800
2633 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 2801 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
2634 if (retval == IOCB_SUCCESS) { 2802 if (retval == IOCB_SUCCESS) {
2635 timeout_req = timeout * HZ; 2803 timeout_req = timeout * HZ;
@@ -2663,6 +2831,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2663 retval = IOCB_ERROR; 2831 retval = IOCB_ERROR;
2664 } 2832 }
2665 2833
2834 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
2835 creg_val = readl(phba->HCregaddr);
2836 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2837 writel(creg_val, phba->HCregaddr);
2838 readl(phba->HCregaddr); /* flush */
2839 }
2840
2666 if (prspiocbq) 2841 if (prspiocbq)
2667 piocb->context2 = NULL; 2842 piocb->context2 = NULL;
2668 2843