aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-01-26 23:10:15 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-02-08 19:40:49 -0500
commit3b5dd52aaffd291edea9f939ed46a960b240bb45 (patch)
tree89fb81b8ac2cdab88a4f60f449d51990ad18b625
parent4cc0e56e977f12e6f400cbab3df7cf1e11d6f58a (diff)
[SCSI] lpfc 8.3.8: (BSG4) Add new vendor specific BSG Commands
Add the following new vendor specific BSG commands. - Add LPFC_BSG_VENDOR_GET_MGMT_REV command - Add LPFC_BSG_VENDOR_MBOX command - Add LPFC_BSG_VENDOR_DIAG_MODE command - Add LPFC_BSG_VENDOR_DIAG_TEST command Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1583
1 files changed, 1569 insertions, 14 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a7e8921015eb..64ab075a3656 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -74,13 +74,23 @@ struct lpfc_bsg_iocb {
74 struct fc_bsg_job *set_job; 74 struct fc_bsg_job *set_job;
75}; 75};
76 76
77struct lpfc_bsg_mbox {
78 LPFC_MBOXQ_t *pmboxq;
79 MAILBOX_t *mb;
80
81 /* job waiting for this mbox command to finish */
82 struct fc_bsg_job *set_job;
83};
84
77#define TYPE_EVT 1 85#define TYPE_EVT 1
78#define TYPE_IOCB 2 86#define TYPE_IOCB 2
87#define TYPE_MBOX 3
79struct bsg_job_data { 88struct bsg_job_data {
80 uint32_t type; 89 uint32_t type;
81 union { 90 union {
82 struct lpfc_bsg_event *evt; 91 struct lpfc_bsg_event *evt;
83 struct lpfc_bsg_iocb iocb; 92 struct lpfc_bsg_iocb iocb;
93 struct lpfc_bsg_mbox mbox;
84 } context_un; 94 } context_un;
85}; 95};
86 96
@@ -92,6 +102,7 @@ struct event_data {
92 uint32_t len; 102 uint32_t len;
93}; 103};
94 104
105#define BUF_SZ_4K 4096
95#define SLI_CT_ELX_LOOPBACK 0x10 106#define SLI_CT_ELX_LOOPBACK 0x10
96 107
97enum ELX_LOOPBACK_CMD { 108enum ELX_LOOPBACK_CMD {
@@ -99,6 +110,9 @@ enum ELX_LOOPBACK_CMD {
99 ELX_LOOPBACK_DATA, 110 ELX_LOOPBACK_DATA,
100}; 111};
101 112
113#define ELX_LOOPBACK_HEADER_SZ \
114 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
115
102struct lpfc_dmabufext { 116struct lpfc_dmabufext {
103 struct lpfc_dmabuf dma; 117 struct lpfc_dmabuf dma;
104 uint32_t size; 118 uint32_t size;
@@ -201,7 +215,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
201/** 215/**
202 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 216 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
203 * @job: fc_bsg_job to handle 217 * @job: fc_bsg_job to handle
204 */ 218 **/
205static int 219static int
206lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 220lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
207{ 221{
@@ -380,7 +394,7 @@ no_dd_data:
380 * can be called from both worker thread context and interrupt 394 * can be called from both worker thread context and interrupt
381 * context. This function also can be called from other thread which 395 * context. This function also can be called from other thread which
382 * cleans up the SLI layer objects. 396 * cleans up the SLI layer objects.
383 * This function copy the contents of the response iocb to the 397 * This function copies the contents of the response iocb to the
384 * response iocb memory object provided by the caller of 398 * response iocb memory object provided by the caller of
385 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 399 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
386 * sleeps for the iocb completion. 400 * sleeps for the iocb completion.
@@ -461,7 +475,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
461/** 475/**
462 * lpfc_bsg_rport_els - send an ELS command from a bsg request 476 * lpfc_bsg_rport_els - send an ELS command from a bsg request
463 * @job: fc_bsg_job to handle 477 * @job: fc_bsg_job to handle
464 */ 478 **/
465static int 479static int
466lpfc_bsg_rport_els(struct fc_bsg_job *job) 480lpfc_bsg_rport_els(struct fc_bsg_job *job)
467{ 481{
@@ -527,8 +541,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
527 } 541 }
528 542
529 /* prep els iocb set context1 to the ndlp, context2 to the command 543 /* prep els iocb set context1 to the ndlp, context2 to the command
530 * dmabuf, context3 holds the data dmabuf 544 * dmabuf, context3 holds the data dmabuf
531 */ 545 */
532 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 546 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
533 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 547 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
534 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 548 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
@@ -613,6 +627,14 @@ no_dd_data:
613 return rc; 627 return rc;
614} 628}
615 629
630/**
631 * lpfc_bsg_event_free - frees an allocated event structure
632 * @kref: Pointer to a kref.
633 *
634 * Called from kref_put. Back cast the kref into an event structure address.
635 * Free any events to get, delete associated nodes, free any events to see,
636 * free any data then free the event itself.
637 **/
616static void 638static void
617lpfc_bsg_event_free(struct kref *kref) 639lpfc_bsg_event_free(struct kref *kref)
618{ 640{
@@ -639,18 +661,32 @@ lpfc_bsg_event_free(struct kref *kref)
639 kfree(evt); 661 kfree(evt);
640} 662}
641 663
664/**
665 * lpfc_bsg_event_ref - increments the kref for an event
666 * @evt: Pointer to an event structure.
667 **/
642static inline void 668static inline void
643lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 669lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
644{ 670{
645 kref_get(&evt->kref); 671 kref_get(&evt->kref);
646} 672}
647 673
674/**
675 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
676 * @evt: Pointer to an event structure.
677 **/
648static inline void 678static inline void
649lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 679lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
650{ 680{
651 kref_put(&evt->kref, lpfc_bsg_event_free); 681 kref_put(&evt->kref, lpfc_bsg_event_free);
652} 682}
653 683
684/**
685 * lpfc_bsg_event_new - allocate and initialize a event structure
686 * @ev_mask: Mask of events.
687 * @ev_reg_id: Event reg id.
688 * @ev_req_id: Event request id.
689 **/
654static struct lpfc_bsg_event * 690static struct lpfc_bsg_event *
655lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 691lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
656{ 692{
@@ -670,8 +706,13 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
670 return evt; 706 return evt;
671} 707}
672 708
709/**
710 * diag_cmd_data_free - Frees an lpfc dma buffer extension
711 * @phba: Pointer to HBA context object.
712 * @mlist: Pointer to an lpfc dma buffer extension.
713 **/
673static int 714static int
674dfc_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 715diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
675{ 716{
676 struct lpfc_dmabufext *mlast; 717 struct lpfc_dmabufext *mlast;
677 struct pci_dev *pcidev; 718 struct pci_dev *pcidev;
@@ -705,7 +746,7 @@ dfc_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
705 * 746 *
706 * This function is called when an unsolicited CT command is received. It 747 * This function is called when an unsolicited CT command is received. It
707 * forwards the event to any processes registered to receive CT events. 748 * forwards the event to any processes registered to receive CT events.
708 */ 749 **/
709int 750int
710lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 751lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
711 struct lpfc_iocbq *piocbq) 752 struct lpfc_iocbq *piocbq)
@@ -857,7 +898,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
857 } else { 898 } else {
858 switch (cmd) { 899 switch (cmd) {
859 case ELX_LOOPBACK_DATA: 900 case ELX_LOOPBACK_DATA:
860 dfc_cmd_data_free(phba, 901 diag_cmd_data_free(phba,
861 (struct lpfc_dmabufext *) 902 (struct lpfc_dmabufext *)
862 dmabuf); 903 dmabuf);
863 break; 904 break;
@@ -935,7 +976,7 @@ error_ct_unsol_exit:
935/** 976/**
936 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 977 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
937 * @job: SET_EVENT fc_bsg_job 978 * @job: SET_EVENT fc_bsg_job
938 */ 979 **/
939static int 980static int
940lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 981lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
941{ 982{
@@ -1018,7 +1059,7 @@ job_error:
1018/** 1059/**
1019 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1060 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1020 * @job: GET_EVENT fc_bsg_job 1061 * @job: GET_EVENT fc_bsg_job
1021 */ 1062 **/
1022static int 1063static int
1023lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1064lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1024{ 1065{
@@ -1107,9 +1148,1498 @@ job_error:
1107} 1148}
1108 1149
1109/** 1150/**
1151 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1152 * @phba: Pointer to HBA context object.
1153 * @cmdiocbq: Pointer to command iocb.
1154 * @rspiocbq: Pointer to response iocb.
1155 *
1156 * This function is the completion handler for iocbs issued using
1157 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1158 * ring event handler function without any lock held. This function
1159 * can be called from both worker thread context and interrupt
1160 * context. This function also can be called from other thread which
1161 * cleans up the SLI layer objects.
1162 * This function copy the contents of the response iocb to the
1163 * response iocb memory object provided by the caller of
1164 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1165 * sleeps for the iocb completion.
1166 **/
1167static void
1168lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1169 struct lpfc_iocbq *cmdiocbq,
1170 struct lpfc_iocbq *rspiocbq)
1171{
1172 struct bsg_job_data *dd_data;
1173 struct fc_bsg_job *job;
1174 IOCB_t *rsp;
1175 struct lpfc_dmabuf *bmp;
1176 struct lpfc_nodelist *ndlp;
1177 unsigned long flags;
1178 int rc = 0;
1179
1180 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1181 dd_data = cmdiocbq->context1;
1182 /* normal completion and timeout crossed paths, already done */
1183 if (!dd_data) {
1184 spin_unlock_irqrestore(&phba->hbalock, flags);
1185 return;
1186 }
1187
1188 job = dd_data->context_un.iocb.set_job;
1189 bmp = dd_data->context_un.iocb.bmp;
1190 rsp = &rspiocbq->iocb;
1191 ndlp = dd_data->context_un.iocb.ndlp;
1192
1193 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1194 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1195
1196 if (rsp->ulpStatus) {
1197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1198 switch (rsp->un.ulpWord[4] & 0xff) {
1199 case IOERR_SEQUENCE_TIMEOUT:
1200 rc = -ETIMEDOUT;
1201 break;
1202 case IOERR_INVALID_RPI:
1203 rc = -EFAULT;
1204 break;
1205 default:
1206 rc = -EACCES;
1207 break;
1208 }
1209 } else
1210 rc = -EACCES;
1211 } else
1212 job->reply->reply_payload_rcv_len =
1213 rsp->un.genreq64.bdl.bdeSize;
1214
1215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1216 lpfc_sli_release_iocbq(phba, cmdiocbq);
1217 lpfc_nlp_put(ndlp);
1218 kfree(bmp);
1219 kfree(dd_data);
1220 /* make error code available to userspace */
1221 job->reply->result = rc;
1222 job->dd_data = NULL;
1223 /* complete the job back to userspace */
1224 job->job_done(job);
1225 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1226 return;
1227}
1228
1229/**
1230 * lpfc_issue_ct_rsp - issue a ct response
1231 * @phba: Pointer to HBA context object.
1232 * @job: Pointer to the job object.
1233 * @tag: tag index value into the ports context exchange array.
1234 * @bmp: Pointer to a dma buffer descriptor.
1235 * @num_entry: Number of enties in the bde.
1236 **/
1237static int
1238lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1239 struct lpfc_dmabuf *bmp, int num_entry)
1240{
1241 IOCB_t *icmd;
1242 struct lpfc_iocbq *ctiocb = NULL;
1243 int rc = 0;
1244 struct lpfc_nodelist *ndlp = NULL;
1245 struct bsg_job_data *dd_data;
1246 uint32_t creg_val;
1247
1248 /* allocate our bsg tracking structure */
1249 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1250 if (!dd_data) {
1251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1252 "2736 Failed allocation of dd_data\n");
1253 rc = -ENOMEM;
1254 goto no_dd_data;
1255 }
1256
1257 /* Allocate buffer for command iocb */
1258 ctiocb = lpfc_sli_get_iocbq(phba);
1259 if (!ctiocb) {
1260 rc = ENOMEM;
1261 goto no_ctiocb;
1262 }
1263
1264 icmd = &ctiocb->iocb;
1265 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1266 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1267 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1268 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1269 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1270 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1271 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1272 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1273 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1274
1275 /* Fill in rest of iocb */
1276 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1277 icmd->ulpBdeCount = 1;
1278 icmd->ulpLe = 1;
1279 icmd->ulpClass = CLASS3;
1280 if (phba->sli_rev == LPFC_SLI_REV4) {
1281 /* Do not issue unsol response if oxid not marked as valid */
1282 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1283 rc = IOCB_ERROR;
1284 goto issue_ct_rsp_exit;
1285 }
1286 icmd->ulpContext = phba->ct_ctx[tag].oxid;
1287 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1288 if (!ndlp) {
1289 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1290 "2721 ndlp null for oxid %x SID %x\n",
1291 icmd->ulpContext,
1292 phba->ct_ctx[tag].SID);
1293 rc = IOCB_ERROR;
1294 goto issue_ct_rsp_exit;
1295 }
1296 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1297 /* The exchange is done, mark the entry as invalid */
1298 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1299 } else
1300 icmd->ulpContext = (ushort) tag;
1301
1302 icmd->ulpTimeout = phba->fc_ratov * 2;
1303
1304 /* Xmit CT response on exchange <xid> */
1305 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1306 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1307 icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
1308
1309 ctiocb->iocb_cmpl = NULL;
1310 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1311 ctiocb->vport = phba->pport;
1312 ctiocb->context3 = bmp;
1313
1314 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1315 ctiocb->context1 = dd_data;
1316 ctiocb->context2 = NULL;
1317 dd_data->type = TYPE_IOCB;
1318 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1319 dd_data->context_un.iocb.rspiocbq = NULL;
1320 dd_data->context_un.iocb.set_job = job;
1321 dd_data->context_un.iocb.bmp = bmp;
1322 dd_data->context_un.iocb.ndlp = ndlp;
1323
1324 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1325 creg_val = readl(phba->HCregaddr);
1326 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1327 writel(creg_val, phba->HCregaddr);
1328 readl(phba->HCregaddr); /* flush */
1329 }
1330
1331 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1332
1333 if (rc == IOCB_SUCCESS)
1334 return 0; /* done for now */
1335
1336issue_ct_rsp_exit:
1337 lpfc_sli_release_iocbq(phba, ctiocb);
1338no_ctiocb:
1339 kfree(dd_data);
1340no_dd_data:
1341 return rc;
1342}
1343
1344/**
1345 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1346 * @job: SEND_MGMT_RESP fc_bsg_job
1347 **/
1348static int
1349lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1350{
1351 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1352 struct lpfc_hba *phba = vport->phba;
1353 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1354 job->request->rqst_data.h_vendor.vendor_cmd;
1355 struct ulp_bde64 *bpl;
1356 struct lpfc_dmabuf *bmp = NULL;
1357 struct scatterlist *sgel = NULL;
1358 int request_nseg;
1359 int numbde;
1360 dma_addr_t busaddr;
1361 uint32_t tag = mgmt_resp->tag;
1362 unsigned long reqbfrcnt =
1363 (unsigned long)job->request_payload.payload_len;
1364 int rc = 0;
1365
1366 /* in case no data is transferred */
1367 job->reply->reply_payload_rcv_len = 0;
1368
1369 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1370 rc = -ERANGE;
1371 goto send_mgmt_rsp_exit;
1372 }
1373
1374 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1375 if (!bmp) {
1376 rc = -ENOMEM;
1377 goto send_mgmt_rsp_exit;
1378 }
1379
1380 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1381 if (!bmp->virt) {
1382 rc = -ENOMEM;
1383 goto send_mgmt_rsp_free_bmp;
1384 }
1385
1386 INIT_LIST_HEAD(&bmp->list);
1387 bpl = (struct ulp_bde64 *) bmp->virt;
1388 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1389 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1390 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1391 busaddr = sg_dma_address(sgel);
1392 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1393 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1394 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1395 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1396 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1397 bpl++;
1398 }
1399
1400 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1401
1402 if (rc == IOCB_SUCCESS)
1403 return 0; /* done for now */
1404
1405 /* TBD need to handle a timeout */
1406 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1407 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1408 rc = -EACCES;
1409 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1410
1411send_mgmt_rsp_free_bmp:
1412 kfree(bmp);
1413send_mgmt_rsp_exit:
1414 /* make error code available to userspace */
1415 job->reply->result = rc;
1416 job->dd_data = NULL;
1417 return rc;
1418}
1419
1420/**
1421 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1422 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1423 *
1424 * This function is responsible for placing a port into diagnostic loopback
1425 * mode in order to perform a diagnostic loopback test.
1426 * All new scsi requests are blocked, a small delay is used to allow the
1427 * scsi requests to complete then the link is brought down. If the link is
1428 * is placed in loopback mode then scsi requests are again allowed
1429 * so the scsi mid-layer doesn't give up on the port.
1430 * All of this is done in-line.
1431 */
1432static int
1433lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1434{
1435 struct Scsi_Host *shost = job->shost;
1436 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1437 struct lpfc_hba *phba = vport->phba;
1438 struct diag_mode_set *loopback_mode;
1439 struct lpfc_sli *psli = &phba->sli;
1440 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1441 uint32_t link_flags;
1442 uint32_t timeout;
1443 struct lpfc_vport **vports;
1444 LPFC_MBOXQ_t *pmboxq;
1445 int mbxstatus;
1446 int i = 0;
1447 int rc = 0;
1448
1449 /* no data to return just the return code */
1450 job->reply->reply_payload_rcv_len = 0;
1451
1452 if (job->request_len <
1453 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
1454 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1455 "2738 Received DIAG MODE request below minimum "
1456 "size\n");
1457 rc = -EINVAL;
1458 goto job_error;
1459 }
1460
1461 loopback_mode = (struct diag_mode_set *)
1462 job->request->rqst_data.h_vendor.vendor_cmd;
1463 link_flags = loopback_mode->type;
1464 timeout = loopback_mode->timeout;
1465
1466 if ((phba->link_state == LPFC_HBA_ERROR) ||
1467 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1468 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1469 rc = -EACCES;
1470 goto job_error;
1471 }
1472
1473 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1474 if (!pmboxq) {
1475 rc = -ENOMEM;
1476 goto job_error;
1477 }
1478
1479 vports = lpfc_create_vport_work_array(phba);
1480 if (vports) {
1481 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1482 shost = lpfc_shost_from_vport(vports[i]);
1483 scsi_block_requests(shost);
1484 }
1485
1486 lpfc_destroy_vport_work_array(phba, vports);
1487 } else {
1488 shost = lpfc_shost_from_vport(phba->pport);
1489 scsi_block_requests(shost);
1490 }
1491
1492 while (pring->txcmplq_cnt) {
1493 if (i++ > 500) /* wait up to 5 seconds */
1494 break;
1495
1496 msleep(10);
1497 }
1498
1499 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1500 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1501 pmboxq->u.mb.mbxOwner = OWN_HOST;
1502
1503 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1504
1505 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1506 /* wait for link down before proceeding */
1507 i = 0;
1508 while (phba->link_state != LPFC_LINK_DOWN) {
1509 if (i++ > timeout) {
1510 rc = -ETIMEDOUT;
1511 goto loopback_mode_exit;
1512 }
1513
1514 msleep(10);
1515 }
1516
1517 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1518 if (link_flags == INTERNAL_LOOP_BACK)
1519 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1520 else
1521 pmboxq->u.mb.un.varInitLnk.link_flags =
1522 FLAGS_TOPOLOGY_MODE_LOOP;
1523
1524 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1525 pmboxq->u.mb.mbxOwner = OWN_HOST;
1526
1527 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1528 LPFC_MBOX_TMO);
1529
1530 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1531 rc = -ENODEV;
1532 else {
1533 phba->link_flag |= LS_LOOPBACK_MODE;
1534 /* wait for the link attention interrupt */
1535 msleep(100);
1536
1537 i = 0;
1538 while (phba->link_state != LPFC_HBA_READY) {
1539 if (i++ > timeout) {
1540 rc = -ETIMEDOUT;
1541 break;
1542 }
1543
1544 msleep(10);
1545 }
1546 }
1547
1548 } else
1549 rc = -ENODEV;
1550
1551loopback_mode_exit:
1552 vports = lpfc_create_vport_work_array(phba);
1553 if (vports) {
1554 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1555 shost = lpfc_shost_from_vport(vports[i]);
1556 scsi_unblock_requests(shost);
1557 }
1558 lpfc_destroy_vport_work_array(phba, vports);
1559 } else {
1560 shost = lpfc_shost_from_vport(phba->pport);
1561 scsi_unblock_requests(shost);
1562 }
1563
1564 /*
1565 * Let SLI layer release mboxq if mbox command completed after timeout.
1566 */
1567 if (mbxstatus != MBX_TIMEOUT)
1568 mempool_free(pmboxq, phba->mbox_mem_pool);
1569
1570job_error:
1571 /* make error code available to userspace */
1572 job->reply->result = rc;
1573 /* complete the job back to userspace if no error */
1574 if (rc == 0)
1575 job->job_done(job);
1576 return rc;
1577}
1578
1579/**
1580 * lpfcdiag_loop_self_reg - obtains a remote port login id
1581 * @phba: Pointer to HBA context object
1582 * @rpi: Pointer to a remote port login id
1583 *
1584 * This function obtains a remote port login id so the diag loopback test
1585 * can send and receive its own unsolicited CT command.
1586 **/
1587static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
1588{
1589 LPFC_MBOXQ_t *mbox;
1590 struct lpfc_dmabuf *dmabuff;
1591 int status;
1592
1593 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1594 if (!mbox)
1595 return ENOMEM;
1596
1597 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
1598 (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
1599 if (status) {
1600 mempool_free(mbox, phba->mbox_mem_pool);
1601 return ENOMEM;
1602 }
1603
1604 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
1605 mbox->context1 = NULL;
1606 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1607
1608 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1609 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1610 kfree(dmabuff);
1611 if (status != MBX_TIMEOUT)
1612 mempool_free(mbox, phba->mbox_mem_pool);
1613 return ENODEV;
1614 }
1615
1616 *rpi = mbox->u.mb.un.varWords[0];
1617
1618 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
1619 kfree(dmabuff);
1620 mempool_free(mbox, phba->mbox_mem_pool);
1621 return 0;
1622}
1623
1624/**
1625 * lpfcdiag_loop_self_unreg - unregs from the rpi
1626 * @phba: Pointer to HBA context object
1627 * @rpi: Remote port login id
1628 *
1629 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1630 **/
1631static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
1632{
1633 LPFC_MBOXQ_t *mbox;
1634 int status;
1635
1636 /* Allocate mboxq structure */
1637 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1638 if (mbox == NULL)
1639 return ENOMEM;
1640
1641 lpfc_unreg_login(phba, 0, rpi, mbox);
1642 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1643
1644 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
1645 if (status != MBX_TIMEOUT)
1646 mempool_free(mbox, phba->mbox_mem_pool);
1647 return EIO;
1648 }
1649
1650 mempool_free(mbox, phba->mbox_mem_pool);
1651 return 0;
1652}
1653
1654/**
1655 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1656 * @phba: Pointer to HBA context object
1657 * @rpi: Remote port login id
1658 * @txxri: Pointer to transmit exchange id
1659 * @rxxri: Pointer to response exchabge id
1660 *
1661 * This function obtains the transmit and receive ids required to send
1662 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1663 * flags are used to the unsolicted response handler is able to process
1664 * the ct command sent on the same port.
1665 **/
1666static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1667 uint16_t *txxri, uint16_t * rxxri)
1668{
1669 struct lpfc_bsg_event *evt;
1670 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
1671 IOCB_t *cmd, *rsp;
1672 struct lpfc_dmabuf *dmabuf;
1673 struct ulp_bde64 *bpl = NULL;
1674 struct lpfc_sli_ct_request *ctreq = NULL;
1675 int ret_val = 0;
1676 unsigned long flags;
1677
1678 *txxri = 0;
1679 *rxxri = 0;
1680 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
1681 SLI_CT_ELX_LOOPBACK);
1682 if (!evt)
1683 return ENOMEM;
1684
1685 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1686 list_add(&evt->node, &phba->ct_ev_waiters);
1687 lpfc_bsg_event_ref(evt);
1688 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1689
1690 cmdiocbq = lpfc_sli_get_iocbq(phba);
1691 rspiocbq = lpfc_sli_get_iocbq(phba);
1692
1693 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1694 if (dmabuf) {
1695 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1696 INIT_LIST_HEAD(&dmabuf->list);
1697 bpl = (struct ulp_bde64 *) dmabuf->virt;
1698 memset(bpl, 0, sizeof(*bpl));
1699 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1700 bpl->addrHigh =
1701 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
1702 bpl->addrLow =
1703 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
1704 bpl->tus.f.bdeFlags = 0;
1705 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1706 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1707 }
1708
1709 if (cmdiocbq == NULL || rspiocbq == NULL ||
1710 dmabuf == NULL || bpl == NULL || ctreq == NULL) {
1711 ret_val = ENOMEM;
1712 goto err_get_xri_exit;
1713 }
1714
1715 cmd = &cmdiocbq->iocb;
1716 rsp = &rspiocbq->iocb;
1717
1718 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
1719
1720 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
1721 ctreq->RevisionId.bits.InId = 0;
1722 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
1723 ctreq->FsSubType = 0;
1724 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
1725 ctreq->CommandResponse.bits.Size = 0;
1726
1727
1728 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
1729 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
1730 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1731 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
1732
1733 cmd->un.xseq64.w5.hcsw.Fctl = LA;
1734 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
1735 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
1736 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1737
1738 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1739 cmd->ulpBdeCount = 1;
1740 cmd->ulpLe = 1;
1741 cmd->ulpClass = CLASS3;
1742 cmd->ulpContext = rpi;
1743
1744 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
1745 cmdiocbq->vport = phba->pport;
1746
1747 ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
1748 rspiocbq,
1749 (phba->fc_ratov * 2)
1750 + LPFC_DRVR_TIMEOUT);
1751 if (ret_val)
1752 goto err_get_xri_exit;
1753
1754 *txxri = rsp->ulpContext;
1755
1756 evt->waiting = 1;
1757 evt->wait_time_stamp = jiffies;
1758 ret_val = wait_event_interruptible_timeout(
1759 evt->wq, !list_empty(&evt->events_to_see),
1760 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
1761 if (list_empty(&evt->events_to_see))
1762 ret_val = (ret_val) ? EINTR : ETIMEDOUT;
1763 else {
1764 ret_val = IOCB_SUCCESS;
1765 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1766 list_move(evt->events_to_see.prev, &evt->events_to_get);
1767 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1768 *rxxri = (list_entry(evt->events_to_get.prev,
1769 typeof(struct event_data),
1770 node))->immed_dat;
1771 }
1772 evt->waiting = 0;
1773
1774err_get_xri_exit:
1775 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1776 lpfc_bsg_event_unref(evt); /* release ref */
1777 lpfc_bsg_event_unref(evt); /* delete */
1778 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1779
1780 if (dmabuf) {
1781 if (dmabuf->virt)
1782 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1783 kfree(dmabuf);
1784 }
1785
1786 if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
1787 lpfc_sli_release_iocbq(phba, cmdiocbq);
1788 if (rspiocbq)
1789 lpfc_sli_release_iocbq(phba, rspiocbq);
1790 return ret_val;
1791}
1792
1793/**
1794 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1795 * @phba: Pointer to HBA context object
1796 * @bpl: Pointer to 64 bit bde structure
1797 * @size: Number of bytes to process
1798 * @nocopydata: Flag to copy user data into the allocated buffer
1799 *
1800 * This function allocates page size buffers and populates an lpfc_dmabufext.
1801 * If allowed the user data pointed to with indataptr is copied into the kernel
1802 * memory. The chained list of page size buffers is returned.
1803 **/
1804static struct lpfc_dmabufext *
1805diag_cmd_data_alloc(struct lpfc_hba *phba,
1806 struct ulp_bde64 *bpl, uint32_t size,
1807 int nocopydata)
1808{
1809 struct lpfc_dmabufext *mlist = NULL;
1810 struct lpfc_dmabufext *dmp;
1811 int cnt, offset = 0, i = 0;
1812 struct pci_dev *pcidev;
1813
1814 pcidev = phba->pcidev;
1815
1816 while (size) {
1817 /* We get chunks of 4K */
1818 if (size > BUF_SZ_4K)
1819 cnt = BUF_SZ_4K;
1820 else
1821 cnt = size;
1822
1823 /* allocate struct lpfc_dmabufext buffer header */
1824 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
1825 if (!dmp)
1826 goto out;
1827
1828 INIT_LIST_HEAD(&dmp->dma.list);
1829
1830 /* Queue it to a linked list */
1831 if (mlist)
1832 list_add_tail(&dmp->dma.list, &mlist->dma.list);
1833 else
1834 mlist = dmp;
1835
1836 /* allocate buffer */
1837 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
1838 cnt,
1839 &(dmp->dma.phys),
1840 GFP_KERNEL);
1841
1842 if (!dmp->dma.virt)
1843 goto out;
1844
1845 dmp->size = cnt;
1846
1847 if (nocopydata) {
1848 bpl->tus.f.bdeFlags = 0;
1849 pci_dma_sync_single_for_device(phba->pcidev,
1850 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1851
1852 } else {
1853 memset((uint8_t *)dmp->dma.virt, 0, cnt);
1854 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1855 }
1856
1857 /* build buffer ptr list for IOCB */
1858 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
1859 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
1860 bpl->tus.f.bdeSize = (ushort) cnt;
1861 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1862 bpl++;
1863
1864 i++;
1865 offset += cnt;
1866 size -= cnt;
1867 }
1868
1869 mlist->flag = i;
1870 return mlist;
1871out:
1872 diag_cmd_data_free(phba, mlist);
1873 return NULL;
1874}
1875
1876/**
1877 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1878 * @phba: Pointer to HBA context object
1879 * @rxxri: Receive exchange id
1880 * @len: Number of data bytes
1881 *
1882 * This function allocates and posts a data buffer of sufficient size to recieve
1883 * an unsolicted CT command.
1884 **/
1885static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1886 size_t len)
1887{
1888 struct lpfc_sli *psli = &phba->sli;
1889 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1890 struct lpfc_iocbq *cmdiocbq;
1891 IOCB_t *cmd = NULL;
1892 struct list_head head, *curr, *next;
1893 struct lpfc_dmabuf *rxbmp;
1894 struct lpfc_dmabuf *dmp;
1895 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
1896 struct ulp_bde64 *rxbpl = NULL;
1897 uint32_t num_bde;
1898 struct lpfc_dmabufext *rxbuffer = NULL;
1899 int ret_val = 0;
1900 int i = 0;
1901
1902 cmdiocbq = lpfc_sli_get_iocbq(phba);
1903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1904 if (rxbmp != NULL) {
1905 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1906 INIT_LIST_HEAD(&rxbmp->list);
1907 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1908 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1909 }
1910
1911 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
1912 ret_val = ENOMEM;
1913 goto err_post_rxbufs_exit;
1914 }
1915
1916 /* Queue buffers for the receive exchange */
1917 num_bde = (uint32_t)rxbuffer->flag;
1918 dmp = &rxbuffer->dma;
1919
1920 cmd = &cmdiocbq->iocb;
1921 i = 0;
1922
1923 INIT_LIST_HEAD(&head);
1924 list_add_tail(&head, &dmp->list);
1925 list_for_each_safe(curr, next, &head) {
1926 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
1927 list_del(curr);
1928
1929 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1930 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
1931 cmd->un.quexri64cx.buff.bde.addrHigh =
1932 putPaddrHigh(mp[i]->phys);
1933 cmd->un.quexri64cx.buff.bde.addrLow =
1934 putPaddrLow(mp[i]->phys);
1935 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
1936 ((struct lpfc_dmabufext *)mp[i])->size;
1937 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
1938 cmd->ulpCommand = CMD_QUE_XRI64_CX;
1939 cmd->ulpPU = 0;
1940 cmd->ulpLe = 1;
1941 cmd->ulpBdeCount = 1;
1942 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
1943
1944 } else {
1945 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
1946 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
1947 cmd->un.cont64[i].tus.f.bdeSize =
1948 ((struct lpfc_dmabufext *)mp[i])->size;
1949 cmd->ulpBdeCount = ++i;
1950
1951 if ((--num_bde > 0) && (i < 2))
1952 continue;
1953
1954 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
1955 cmd->ulpLe = 1;
1956 }
1957
1958 cmd->ulpClass = CLASS3;
1959 cmd->ulpContext = rxxri;
1960
1961 ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
1962
1963 if (ret_val == IOCB_ERROR) {
1964 diag_cmd_data_free(phba,
1965 (struct lpfc_dmabufext *)mp[0]);
1966 if (mp[1])
1967 diag_cmd_data_free(phba,
1968 (struct lpfc_dmabufext *)mp[1]);
1969 dmp = list_entry(next, struct lpfc_dmabuf, list);
1970 ret_val = EIO;
1971 goto err_post_rxbufs_exit;
1972 }
1973
1974 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
1975 if (mp[1]) {
1976 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
1977 mp[1] = NULL;
1978 }
1979
1980 /* The iocb was freed by lpfc_sli_issue_iocb */
1981 cmdiocbq = lpfc_sli_get_iocbq(phba);
1982 if (!cmdiocbq) {
1983 dmp = list_entry(next, struct lpfc_dmabuf, list);
1984 ret_val = EIO;
1985 goto err_post_rxbufs_exit;
1986 }
1987
1988 cmd = &cmdiocbq->iocb;
1989 i = 0;
1990 }
1991 list_del(&head);
1992
1993err_post_rxbufs_exit:
1994
1995 if (rxbmp) {
1996 if (rxbmp->virt)
1997 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
1998 kfree(rxbmp);
1999 }
2000
2001 if (cmdiocbq)
2002 lpfc_sli_release_iocbq(phba, cmdiocbq);
2003 return ret_val;
2004}
2005
2006/**
2007 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2008 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2009 *
2010 * This function receives a user data buffer to be transmitted and received on
2011 * the same port, the link must be up and in loopback mode prior
2012 * to being called.
2013 * 1. A kernel buffer is allocated to copy the user data into.
2014 * 2. The port registers with "itself".
2015 * 3. The transmit and receive exchange ids are obtained.
2016 * 4. The receive exchange id is posted.
2017 * 5. A new els loopback event is created.
2018 * 6. The command and response iocbs are allocated.
2019 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2020 *
2021 * This function is meant to be called n times while the port is in loopback
2022 * so it is the apps responsibility to issue a reset to take the port out
2023 * of loopback mode.
2024 **/
2025static int
2026lpfc_bsg_diag_test(struct fc_bsg_job *job)
2027{
2028 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2029 struct lpfc_hba *phba = vport->phba;
2030 struct diag_mode_test *diag_mode;
2031 struct lpfc_bsg_event *evt;
2032 struct event_data *evdat;
2033 struct lpfc_sli *psli = &phba->sli;
2034 uint32_t size;
2035 uint32_t full_size;
2036 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2037 uint16_t rpi;
2038 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2039 IOCB_t *cmd, *rsp;
2040 struct lpfc_sli_ct_request *ctreq;
2041 struct lpfc_dmabuf *txbmp;
2042 struct ulp_bde64 *txbpl = NULL;
2043 struct lpfc_dmabufext *txbuffer = NULL;
2044 struct list_head head;
2045 struct lpfc_dmabuf *curr;
2046 uint16_t txxri, rxxri;
2047 uint32_t num_bde;
2048 uint8_t *ptr = NULL, *rx_databuf = NULL;
2049 int rc = 0;
2050 unsigned long flags;
2051 void *dataout = NULL;
2052 uint32_t total_mem;
2053
2054 /* in case no data is returned return just the return code */
2055 job->reply->reply_payload_rcv_len = 0;
2056
2057 if (job->request_len <
2058 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2059 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2060 "2739 Received DIAG TEST request below minimum "
2061 "size\n");
2062 rc = -EINVAL;
2063 goto loopback_test_exit;
2064 }
2065
2066 if (job->request_payload.payload_len !=
2067 job->reply_payload.payload_len) {
2068 rc = -EINVAL;
2069 goto loopback_test_exit;
2070 }
2071
2072 diag_mode = (struct diag_mode_test *)
2073 job->request->rqst_data.h_vendor.vendor_cmd;
2074
2075 if ((phba->link_state == LPFC_HBA_ERROR) ||
2076 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2077 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2078 rc = -EACCES;
2079 goto loopback_test_exit;
2080 }
2081
2082 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2083 rc = -EACCES;
2084 goto loopback_test_exit;
2085 }
2086
2087 size = job->request_payload.payload_len;
2088 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2089
2090 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2091 rc = -ERANGE;
2092 goto loopback_test_exit;
2093 }
2094
2095 if (size >= BUF_SZ_4K) {
2096 /*
2097 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2098 * then we allocate 64k and re-use that buffer over and over to
2099 * xfer the whole block. This is because Linux kernel has a
2100 * problem allocating more than 120k of kernel space memory. Saw
2101 * problem with GET_FCPTARGETMAPPING...
2102 */
2103 if (size <= (64 * 1024))
2104 total_mem = size;
2105 else
2106 total_mem = 64 * 1024;
2107 } else
2108 /* Allocate memory for ioctl data */
2109 total_mem = BUF_SZ_4K;
2110
2111 dataout = kmalloc(total_mem, GFP_KERNEL);
2112 if (dataout == NULL) {
2113 rc = -ENOMEM;
2114 goto loopback_test_exit;
2115 }
2116
2117 ptr = dataout;
2118 ptr += ELX_LOOPBACK_HEADER_SZ;
2119 sg_copy_to_buffer(job->request_payload.sg_list,
2120 job->request_payload.sg_cnt,
2121 ptr, size);
2122
2123 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2124 if (rc) {
2125 rc = -ENOMEM;
2126 goto loopback_test_exit;
2127 }
2128
2129 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2130 if (rc) {
2131 lpfcdiag_loop_self_unreg(phba, rpi);
2132 rc = -ENOMEM;
2133 goto loopback_test_exit;
2134 }
2135
2136 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2137 if (rc) {
2138 lpfcdiag_loop_self_unreg(phba, rpi);
2139 rc = -ENOMEM;
2140 goto loopback_test_exit;
2141 }
2142
2143 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2144 SLI_CT_ELX_LOOPBACK);
2145 if (!evt) {
2146 lpfcdiag_loop_self_unreg(phba, rpi);
2147 rc = -ENOMEM;
2148 goto loopback_test_exit;
2149 }
2150
2151 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2152 list_add(&evt->node, &phba->ct_ev_waiters);
2153 lpfc_bsg_event_ref(evt);
2154 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2155
2156 cmdiocbq = lpfc_sli_get_iocbq(phba);
2157 rspiocbq = lpfc_sli_get_iocbq(phba);
2158 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2159
2160 if (txbmp) {
2161 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2162 INIT_LIST_HEAD(&txbmp->list);
2163 txbpl = (struct ulp_bde64 *) txbmp->virt;
2164 if (txbpl)
2165 txbuffer = diag_cmd_data_alloc(phba,
2166 txbpl, full_size, 0);
2167 }
2168
2169 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
2170 rc = -ENOMEM;
2171 goto err_loopback_test_exit;
2172 }
2173
2174 cmd = &cmdiocbq->iocb;
2175 rsp = &rspiocbq->iocb;
2176
2177 INIT_LIST_HEAD(&head);
2178 list_add_tail(&head, &txbuffer->dma.list);
2179 list_for_each_entry(curr, &head, list) {
2180 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2181 if (current_offset == 0) {
2182 ctreq = curr->virt;
2183 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2184 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2185 ctreq->RevisionId.bits.InId = 0;
2186 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2187 ctreq->FsSubType = 0;
2188 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2189 ctreq->CommandResponse.bits.Size = size;
2190 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2191 } else
2192 segment_offset = 0;
2193
2194 BUG_ON(segment_offset >= segment_len);
2195 memcpy(curr->virt + segment_offset,
2196 ptr + current_offset,
2197 segment_len - segment_offset);
2198
2199 current_offset += segment_len - segment_offset;
2200 BUG_ON(current_offset > size);
2201 }
2202 list_del(&head);
2203
2204 /* Build the XMIT_SEQUENCE iocb */
2205
2206 num_bde = (uint32_t)txbuffer->flag;
2207
2208 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2209 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2210 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2211 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2212
2213 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2214 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2215 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2216 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2217
2218 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2219 cmd->ulpBdeCount = 1;
2220 cmd->ulpLe = 1;
2221 cmd->ulpClass = CLASS3;
2222 cmd->ulpContext = txxri;
2223
2224 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2225 cmdiocbq->vport = phba->pport;
2226
2227 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
2228 (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
2229
2230 if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2231 rc = -EIO;
2232 goto err_loopback_test_exit;
2233 }
2234
2235 evt->waiting = 1;
2236 rc = wait_event_interruptible_timeout(
2237 evt->wq, !list_empty(&evt->events_to_see),
2238 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2239 evt->waiting = 0;
2240 if (list_empty(&evt->events_to_see))
2241 rc = (rc) ? -EINTR : -ETIMEDOUT;
2242 else {
2243 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2244 list_move(evt->events_to_see.prev, &evt->events_to_get);
2245 evdat = list_entry(evt->events_to_get.prev,
2246 typeof(*evdat), node);
2247 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2248 rx_databuf = evdat->data;
2249 if (evdat->len != full_size) {
2250 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2251 "1603 Loopback test did not receive expected "
2252 "data length. actual length 0x%x expected "
2253 "length 0x%x\n",
2254 evdat->len, full_size);
2255 rc = -EIO;
2256 } else if (rx_databuf == NULL)
2257 rc = -EIO;
2258 else {
2259 rc = IOCB_SUCCESS;
2260 /* skip over elx loopback header */
2261 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2262 job->reply->reply_payload_rcv_len =
2263 sg_copy_from_buffer(job->reply_payload.sg_list,
2264 job->reply_payload.sg_cnt,
2265 rx_databuf, size);
2266 job->reply->reply_payload_rcv_len = size;
2267 }
2268 }
2269
2270err_loopback_test_exit:
2271 lpfcdiag_loop_self_unreg(phba, rpi);
2272
2273 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2274 lpfc_bsg_event_unref(evt); /* release ref */
2275 lpfc_bsg_event_unref(evt); /* delete */
2276 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2277
2278 if (cmdiocbq != NULL)
2279 lpfc_sli_release_iocbq(phba, cmdiocbq);
2280
2281 if (rspiocbq != NULL)
2282 lpfc_sli_release_iocbq(phba, rspiocbq);
2283
2284 if (txbmp != NULL) {
2285 if (txbpl != NULL) {
2286 if (txbuffer != NULL)
2287 diag_cmd_data_free(phba, txbuffer);
2288 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2289 }
2290 kfree(txbmp);
2291 }
2292
2293loopback_test_exit:
2294 kfree(dataout);
2295 /* make error code available to userspace */
2296 job->reply->result = rc;
2297 job->dd_data = NULL;
2298 /* complete the job back to userspace if no error */
2299 if (rc == 0)
2300 job->job_done(job);
2301 return rc;
2302}
2303
2304/**
2305 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2306 * @job: GET_DFC_REV fc_bsg_job
2307 **/
2308static int
2309lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2310{
2311 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2312 struct lpfc_hba *phba = vport->phba;
2313 struct get_mgmt_rev *event_req;
2314 struct get_mgmt_rev_reply *event_reply;
2315 int rc = 0;
2316
2317 if (job->request_len <
2318 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2319 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2320 "2740 Received GET_DFC_REV request below "
2321 "minimum size\n");
2322 rc = -EINVAL;
2323 goto job_error;
2324 }
2325
2326 event_req = (struct get_mgmt_rev *)
2327 job->request->rqst_data.h_vendor.vendor_cmd;
2328
2329 event_reply = (struct get_mgmt_rev_reply *)
2330 job->reply->reply_data.vendor_reply.vendor_rsp;
2331
2332 if (job->reply_len <
2333 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2334 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2335 "2741 Received GET_DFC_REV reply below "
2336 "minimum size\n");
2337 rc = -EINVAL;
2338 goto job_error;
2339 }
2340
2341 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2342 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2343job_error:
2344 job->reply->result = rc;
2345 if (rc == 0)
2346 job->job_done(job);
2347 return rc;
2348}
2349
2350/**
2351 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2352 * @phba: Pointer to HBA context object.
2353 * @pmboxq: Pointer to mailbox command.
2354 *
2355 * This is completion handler function for mailbox commands issued from
2356 * lpfc_bsg_issue_mbox function. This function is called by the
2357 * mailbox event handler function with no lock held. This function
2358 * will wake up thread waiting on the wait queue pointed by context1
2359 * of the mailbox.
2360 **/
2361void
2362lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2363{
2364 struct bsg_job_data *dd_data;
2365 MAILBOX_t *pmb;
2366 MAILBOX_t *mb;
2367 struct fc_bsg_job *job;
2368 uint32_t size;
2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2372 dd_data = pmboxq->context1;
2373 if (!dd_data) {
2374 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2375 return;
2376 }
2377
2378 pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
2379 mb = dd_data->context_un.mbox.mb;
2380 job = dd_data->context_un.mbox.set_job;
2381 memcpy(mb, pmb, sizeof(*pmb));
2382 size = job->request_payload.payload_len;
2383 job->reply->reply_payload_rcv_len =
2384 sg_copy_from_buffer(job->reply_payload.sg_list,
2385 job->reply_payload.sg_cnt,
2386 mb, size);
2387 job->reply->result = 0;
2388 dd_data->context_un.mbox.set_job = NULL;
2389 job->dd_data = NULL;
2390 job->job_done(job);
2391 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2392 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2393 kfree(mb);
2394 kfree(dd_data);
2395 return;
2396}
2397
2398/**
2399 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2400 * @phba: Pointer to HBA context object.
2401 * @mb: Pointer to a mailbox object.
2402 * @vport: Pointer to a vport object.
2403 *
2404 * Some commands require the port to be offline, some may not be called from
2405 * the application.
2406 **/
2407static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2408 MAILBOX_t *mb, struct lpfc_vport *vport)
2409{
2410 /* return negative error values for bsg job */
2411 switch (mb->mbxCommand) {
2412 /* Offline only */
2413 case MBX_INIT_LINK:
2414 case MBX_DOWN_LINK:
2415 case MBX_CONFIG_LINK:
2416 case MBX_CONFIG_RING:
2417 case MBX_RESET_RING:
2418 case MBX_UNREG_LOGIN:
2419 case MBX_CLEAR_LA:
2420 case MBX_DUMP_CONTEXT:
2421 case MBX_RUN_DIAGS:
2422 case MBX_RESTART:
2423 case MBX_SET_MASK:
2424 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
2425 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2426 "2743 Command 0x%x is illegal in on-line "
2427 "state\n",
2428 mb->mbxCommand);
2429 return -EPERM;
2430 }
2431 case MBX_WRITE_NV:
2432 case MBX_WRITE_VPARMS:
2433 case MBX_LOAD_SM:
2434 case MBX_READ_NV:
2435 case MBX_READ_CONFIG:
2436 case MBX_READ_RCONFIG:
2437 case MBX_READ_STATUS:
2438 case MBX_READ_XRI:
2439 case MBX_READ_REV:
2440 case MBX_READ_LNK_STAT:
2441 case MBX_DUMP_MEMORY:
2442 case MBX_DOWN_LOAD:
2443 case MBX_UPDATE_CFG:
2444 case MBX_KILL_BOARD:
2445 case MBX_LOAD_AREA:
2446 case MBX_LOAD_EXP_ROM:
2447 case MBX_BEACON:
2448 case MBX_DEL_LD_ENTRY:
2449 case MBX_SET_DEBUG:
2450 case MBX_WRITE_WWN:
2451 case MBX_SLI4_CONFIG:
2452 case MBX_READ_EVENT_LOG_STATUS:
2453 case MBX_WRITE_EVENT_LOG:
2454 case MBX_PORT_CAPABILITIES:
2455 case MBX_PORT_IOV_CONTROL:
2456 break;
2457 case MBX_SET_VARIABLE:
2458 case MBX_RUN_BIU_DIAG64:
2459 case MBX_READ_EVENT_LOG:
2460 case MBX_READ_SPARM64:
2461 case MBX_READ_LA:
2462 case MBX_READ_LA64:
2463 case MBX_REG_LOGIN:
2464 case MBX_REG_LOGIN64:
2465 case MBX_CONFIG_PORT:
2466 case MBX_RUN_BIU_DIAG:
2467 default:
2468 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2469 "2742 Unknown Command 0x%x\n",
2470 mb->mbxCommand);
2471 return -EPERM;
2472 }
2473
2474 return 0; /* ok */
2475}
2476
2477/**
2478 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2479 * @phba: Pointer to HBA context object.
2480 * @mb: Pointer to a mailbox object.
2481 * @vport: Pointer to a vport object.
2482 *
2483 * Allocate a tracking object, mailbox command memory, get a mailbox
2484 * from the mailbox pool, copy the caller mailbox command.
2485 *
2486 * If offline and the sli is active we need to poll for the command (port is
2487 * being reset) and com-plete the job, otherwise issue the mailbox command and
2488 * let our completion handler finish the command.
2489 **/
2490static uint32_t
2491lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2492 struct lpfc_vport *vport)
2493{
2494 LPFC_MBOXQ_t *pmboxq;
2495 MAILBOX_t *pmb;
2496 MAILBOX_t *mb;
2497 struct bsg_job_data *dd_data;
2498 uint32_t size;
2499 int rc = 0;
2500
2501 /* allocate our bsg tracking structure */
2502 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2503 if (!dd_data) {
2504 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2505 "2727 Failed allocation of dd_data\n");
2506 return -ENOMEM;
2507 }
2508
2509 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2510 if (!mb) {
2511 kfree(dd_data);
2512 return -ENOMEM;
2513 }
2514
2515 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2516 if (!pmboxq) {
2517 kfree(dd_data);
2518 kfree(mb);
2519 return -ENOMEM;
2520 }
2521
2522 size = job->request_payload.payload_len;
2523 job->reply->reply_payload_rcv_len =
2524 sg_copy_to_buffer(job->request_payload.sg_list,
2525 job->request_payload.sg_cnt,
2526 mb, size);
2527
2528 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2529 if (rc != 0) {
2530 kfree(dd_data);
2531 kfree(mb);
2532 mempool_free(pmboxq, phba->mbox_mem_pool);
2533 return rc; /* must be negative */
2534 }
2535
2536 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2537 pmb = &pmboxq->u.mb;
2538 memcpy(pmb, mb, sizeof(*pmb));
2539 pmb->mbxOwner = OWN_HOST;
2540 pmboxq->context1 = NULL;
2541 pmboxq->vport = vport;
2542
2543 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2544 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2545 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2546 if (rc != MBX_SUCCESS) {
2547 if (rc != MBX_TIMEOUT) {
2548 kfree(dd_data);
2549 kfree(mb);
2550 mempool_free(pmboxq, phba->mbox_mem_pool);
2551 }
2552 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2553 }
2554
2555 memcpy(mb, pmb, sizeof(*pmb));
2556 job->reply->reply_payload_rcv_len =
2557 sg_copy_from_buffer(job->reply_payload.sg_list,
2558 job->reply_payload.sg_cnt,
2559 mb, size);
2560 kfree(dd_data);
2561 kfree(mb);
2562 mempool_free(pmboxq, phba->mbox_mem_pool);
2563 /* not waiting mbox already done */
2564 return 0;
2565 }
2566
2567 /* setup wake call as IOCB callback */
2568 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2569 /* setup context field to pass wait_queue pointer to wake function */
2570 pmboxq->context1 = dd_data;
2571 dd_data->type = TYPE_MBOX;
2572 dd_data->context_un.mbox.pmboxq = pmboxq;
2573 dd_data->context_un.mbox.mb = mb;
2574 dd_data->context_un.mbox.set_job = job;
2575 job->dd_data = dd_data;
2576 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2577 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
2578 kfree(dd_data);
2579 kfree(mb);
2580 mempool_free(pmboxq, phba->mbox_mem_pool);
2581 return -EIO;
2582 }
2583
2584 return 1;
2585}
2586
2587/**
2588 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2589 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2590 **/
2591static int
2592lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2593{
2594 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2595 struct lpfc_hba *phba = vport->phba;
2596 int rc = 0;
2597
2598 /* in case no data is transferred */
2599 job->reply->reply_payload_rcv_len = 0;
2600 if (job->request_len <
2601 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
2602 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2603 "2737 Received MBOX_REQ request below "
2604 "minimum size\n");
2605 rc = -EINVAL;
2606 goto job_error;
2607 }
2608
2609 if (job->request_payload.payload_len != PAGE_SIZE) {
2610 rc = -EINVAL;
2611 goto job_error;
2612 }
2613
2614 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2615 rc = -EAGAIN;
2616 goto job_error;
2617 }
2618
2619 rc = lpfc_bsg_issue_mbox(phba, job, vport);
2620
2621job_error:
2622 if (rc == 0) {
2623 /* job done */
2624 job->reply->result = 0;
2625 job->dd_data = NULL;
2626 job->job_done(job);
2627 } else if (rc == 1)
2628 /* job submitted, will complete later*/
2629 rc = 0; /* return zero, no error */
2630 else {
2631 /* some error occurred */
2632 job->reply->result = rc;
2633 job->dd_data = NULL;
2634 }
2635
2636 return rc;
2637}
2638
2639/**
1110 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2640 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
1111 * @job: fc_bsg_job to handle 2641 * @job: fc_bsg_job to handle
1112 */ 2642 **/
1113static int 2643static int
1114lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 2644lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
1115{ 2645{
@@ -1120,10 +2650,24 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
1120 case LPFC_BSG_VENDOR_SET_CT_EVENT: 2650 case LPFC_BSG_VENDOR_SET_CT_EVENT:
1121 rc = lpfc_bsg_hba_set_event(job); 2651 rc = lpfc_bsg_hba_set_event(job);
1122 break; 2652 break;
1123
1124 case LPFC_BSG_VENDOR_GET_CT_EVENT: 2653 case LPFC_BSG_VENDOR_GET_CT_EVENT:
1125 rc = lpfc_bsg_hba_get_event(job); 2654 rc = lpfc_bsg_hba_get_event(job);
1126 break; 2655 break;
2656 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
2657 rc = lpfc_bsg_send_mgmt_rsp(job);
2658 break;
2659 case LPFC_BSG_VENDOR_DIAG_MODE:
2660 rc = lpfc_bsg_diag_mode(job);
2661 break;
2662 case LPFC_BSG_VENDOR_DIAG_TEST:
2663 rc = lpfc_bsg_diag_test(job);
2664 break;
2665 case LPFC_BSG_VENDOR_GET_MGMT_REV:
2666 rc = lpfc_bsg_get_dfc_rev(job);
2667 break;
2668 case LPFC_BSG_VENDOR_MBOX:
2669 rc = lpfc_bsg_mbox_cmd(job);
2670 break;
1127 default: 2671 default:
1128 rc = -EINVAL; 2672 rc = -EINVAL;
1129 job->reply->reply_payload_rcv_len = 0; 2673 job->reply->reply_payload_rcv_len = 0;
@@ -1138,7 +2682,7 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
1138/** 2682/**
1139 * lpfc_bsg_request - handle a bsg request from the FC transport 2683 * lpfc_bsg_request - handle a bsg request from the FC transport
1140 * @job: fc_bsg_job to handle 2684 * @job: fc_bsg_job to handle
1141 */ 2685 **/
1142int 2686int
1143lpfc_bsg_request(struct fc_bsg_job *job) 2687lpfc_bsg_request(struct fc_bsg_job *job)
1144{ 2688{
@@ -1173,7 +2717,7 @@ lpfc_bsg_request(struct fc_bsg_job *job)
1173 * 2717 *
1174 * This function just aborts the job's IOCB. The aborted IOCB will return to 2718 * This function just aborts the job's IOCB. The aborted IOCB will return to
1175 * the waiting function which will handle passing the error back to userspace 2719 * the waiting function which will handle passing the error back to userspace
1176 */ 2720 **/
1177int 2721int
1178lpfc_bsg_timeout(struct fc_bsg_job *job) 2722lpfc_bsg_timeout(struct fc_bsg_job *job)
1179{ 2723{
@@ -1182,6 +2726,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
1182 struct lpfc_iocbq *cmdiocb; 2726 struct lpfc_iocbq *cmdiocb;
1183 struct lpfc_bsg_event *evt; 2727 struct lpfc_bsg_event *evt;
1184 struct lpfc_bsg_iocb *iocb; 2728 struct lpfc_bsg_iocb *iocb;
2729 struct lpfc_bsg_mbox *mbox;
1185 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 2730 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1186 struct bsg_job_data *dd_data; 2731 struct bsg_job_data *dd_data;
1187 unsigned long flags; 2732 unsigned long flags;
@@ -1219,6 +2764,16 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
1219 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2764 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1220 job->job_done(job); 2765 job->job_done(job);
1221 break; 2766 break;
2767 case TYPE_MBOX:
2768 mbox = &dd_data->context_un.mbox;
2769 /* this mbox has no job anymore */
2770 mbox->set_job = NULL;
2771 job->dd_data = NULL;
2772 job->reply->reply_payload_rcv_len = 0;
2773 job->reply->result = -EAGAIN;
2774 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2775 job->job_done(job);
2776 break;
1222 default: 2777 default:
1223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2778 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224 break; 2779 break;