diff options
author | Matthew R. Ochs <mrochs@linux.vnet.ibm.com> | 2015-06-09 18:15:52 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Odin.com> | 2015-07-31 00:02:01 -0400 |
commit | c21e0bbfc48509a776ec4a39bd9a0fb45a9c315b (patch) | |
tree | ec2bc76bd16eb59c3efbd7af05c8364eb4a9ecf0 /drivers/scsi/cxlflash/main.c | |
parent | 2dc127bb299d1c7436a08e79193bd0251068356e (diff) |
cxlflash: Base support for IBM CXL Flash Adapter
SCSI device driver to support filesystem access on the IBM CXL Flash adapter.
Supported-by: Stephen Bates <stephen.bates@pmcs.com>
Reviewed-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
Diffstat (limited to 'drivers/scsi/cxlflash/main.c')
-rw-r--r-- | drivers/scsi/cxlflash/main.c | 2294 |
1 files changed, 2294 insertions, 0 deletions
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c new file mode 100644 index 000000000000..0720d2f13c2a --- /dev/null +++ b/drivers/scsi/cxlflash/main.c | |||
@@ -0,0 +1,2294 @@ | |||
1 | /* | ||
2 | * CXL Flash Device Driver | ||
3 | * | ||
4 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation | ||
5 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation | ||
6 | * | ||
7 | * Copyright (C) 2015 IBM Corporation | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/delay.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/pci.h> | ||
19 | |||
20 | #include <asm/unaligned.h> | ||
21 | |||
22 | #include <misc/cxl.h> | ||
23 | |||
24 | #include <scsi/scsi_cmnd.h> | ||
25 | #include <scsi/scsi_host.h> | ||
26 | |||
27 | #include "main.h" | ||
28 | #include "sislite.h" | ||
29 | #include "common.h" | ||
30 | |||
31 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); | ||
32 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); | ||
33 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); | ||
34 | MODULE_LICENSE("GPL"); | ||
35 | |||
36 | |||
37 | /** | ||
38 | * cxlflash_cmd_checkout() - checks out an AFU command | ||
39 | * @afu: AFU to checkout from. | ||
40 | * | ||
41 | * Commands are checked out in a round-robin fashion. Note that since | ||
42 | * the command pool is larger than the hardware queue, the majority of | ||
43 | * times we will only loop once or twice before getting a command. The | ||
44 | * buffer and CDB within the command are initialized (zeroed) prior to | ||
45 | * returning. | ||
46 | * | ||
47 | * Return: The checked out command or NULL when command pool is empty. | ||
48 | */ | ||
49 | struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu) | ||
50 | { | ||
51 | int k, dec = CXLFLASH_NUM_CMDS; | ||
52 | struct afu_cmd *cmd; | ||
53 | |||
54 | while (dec--) { | ||
55 | k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); | ||
56 | |||
57 | cmd = &afu->cmd[k]; | ||
58 | |||
59 | if (!atomic_dec_if_positive(&cmd->free)) { | ||
60 | pr_debug("%s: returning found index=%d\n", | ||
61 | __func__, cmd->slot); | ||
62 | memset(cmd->buf, 0, CMD_BUFSIZE); | ||
63 | memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); | ||
64 | return cmd; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | return NULL; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * cxlflash_cmd_checkin() - checks in an AFU command | ||
73 | * @cmd: AFU command to checkin. | ||
74 | * | ||
75 | * Safe to pass commands that have already been checked in. Several | ||
76 | * internal tracking fields are reset as part of the checkin. Note | ||
77 | * that these are intentionally reset prior to toggling the free bit | ||
78 | * to avoid clobbering values in the event that the command is checked | ||
79 | * out right away. | ||
80 | */ | ||
81 | void cxlflash_cmd_checkin(struct afu_cmd *cmd) | ||
82 | { | ||
83 | cmd->rcb.scp = NULL; | ||
84 | cmd->rcb.timeout = 0; | ||
85 | cmd->sa.ioasc = 0; | ||
86 | cmd->cmd_tmf = false; | ||
87 | cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ | ||
88 | |||
89 | if (unlikely(atomic_inc_return(&cmd->free) != 1)) { | ||
90 | pr_err("%s: Freeing cmd (%d) that is not in use!\n", | ||
91 | __func__, cmd->slot); | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * process_cmd_err() - command error handler | ||
100 | * @cmd: AFU command that experienced the error. | ||
101 | * @scp: SCSI command associated with the AFU command in error. | ||
102 | * | ||
103 | * Translates error bits from AFU command to SCSI command results. | ||
104 | */ | ||
105 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) | ||
106 | { | ||
107 | struct sisl_ioarcb *ioarcb; | ||
108 | struct sisl_ioasa *ioasa; | ||
109 | |||
110 | if (unlikely(!cmd)) | ||
111 | return; | ||
112 | |||
113 | ioarcb = &(cmd->rcb); | ||
114 | ioasa = &(cmd->sa); | ||
115 | |||
116 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { | ||
117 | pr_debug("%s: cmd underrun cmd = %p scp = %p\n", | ||
118 | __func__, cmd, scp); | ||
119 | scp->result = (DID_ERROR << 16); | ||
120 | } | ||
121 | |||
122 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { | ||
123 | pr_debug("%s: cmd underrun cmd = %p scp = %p\n", | ||
124 | __func__, cmd, scp); | ||
125 | scp->result = (DID_ERROR << 16); | ||
126 | } | ||
127 | |||
128 | pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " | ||
129 | "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n", | ||
130 | __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, | ||
131 | ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, | ||
132 | ioasa->fc_extra); | ||
133 | |||
134 | if (ioasa->rc.scsi_rc) { | ||
135 | /* We have a SCSI status */ | ||
136 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { | ||
137 | memcpy(scp->sense_buffer, ioasa->sense_data, | ||
138 | SISL_SENSE_DATA_LEN); | ||
139 | scp->result = ioasa->rc.scsi_rc; | ||
140 | } else | ||
141 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * We encountered an error. Set scp->result based on nature | ||
146 | * of error. | ||
147 | */ | ||
148 | if (ioasa->rc.fc_rc) { | ||
149 | /* We have an FC status */ | ||
150 | switch (ioasa->rc.fc_rc) { | ||
151 | case SISL_FC_RC_LINKDOWN: | ||
152 | scp->result = (DID_REQUEUE << 16); | ||
153 | break; | ||
154 | case SISL_FC_RC_RESID: | ||
155 | /* This indicates an FCP resid underrun */ | ||
156 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { | ||
157 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, | ||
158 | * then we will handle this error else where. | ||
159 | * If not then we must handle it here. | ||
160 | * This is probably an AFU bug. We will | ||
161 | * attempt a retry to see if that resolves it. | ||
162 | */ | ||
163 | scp->result = (DID_ERROR << 16); | ||
164 | } | ||
165 | break; | ||
166 | case SISL_FC_RC_RESIDERR: | ||
167 | /* Resid mismatch between adapter and device */ | ||
168 | case SISL_FC_RC_TGTABORT: | ||
169 | case SISL_FC_RC_ABORTOK: | ||
170 | case SISL_FC_RC_ABORTFAIL: | ||
171 | case SISL_FC_RC_NOLOGI: | ||
172 | case SISL_FC_RC_ABORTPEND: | ||
173 | case SISL_FC_RC_WRABORTPEND: | ||
174 | case SISL_FC_RC_NOEXP: | ||
175 | case SISL_FC_RC_INUSE: | ||
176 | scp->result = (DID_ERROR << 16); | ||
177 | break; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | if (ioasa->rc.afu_rc) { | ||
182 | /* We have an AFU error */ | ||
183 | switch (ioasa->rc.afu_rc) { | ||
184 | case SISL_AFU_RC_NO_CHANNELS: | ||
185 | scp->result = (DID_MEDIUM_ERROR << 16); | ||
186 | break; | ||
187 | case SISL_AFU_RC_DATA_DMA_ERR: | ||
188 | switch (ioasa->afu_extra) { | ||
189 | case SISL_AFU_DMA_ERR_PAGE_IN: | ||
190 | /* Retry */ | ||
191 | scp->result = (DID_IMM_RETRY << 16); | ||
192 | break; | ||
193 | case SISL_AFU_DMA_ERR_INVALID_EA: | ||
194 | default: | ||
195 | scp->result = (DID_ERROR << 16); | ||
196 | } | ||
197 | break; | ||
198 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: | ||
199 | /* Retry */ | ||
200 | scp->result = (DID_ALLOC_FAILURE << 16); | ||
201 | break; | ||
202 | default: | ||
203 | scp->result = (DID_ERROR << 16); | ||
204 | } | ||
205 | } | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * cmd_complete() - command completion handler | ||
210 | * @cmd: AFU command that has completed. | ||
211 | * | ||
212 | * Prepares and submits command that has either completed or timed out to | ||
213 | * the SCSI stack. Checks AFU command back into command pool for non-internal | ||
214 | * (rcb.scp populated) commands. | ||
215 | */ | ||
216 | static void cmd_complete(struct afu_cmd *cmd) | ||
217 | { | ||
218 | struct scsi_cmnd *scp; | ||
219 | u32 resid; | ||
220 | ulong lock_flags; | ||
221 | struct afu *afu = cmd->parent; | ||
222 | struct cxlflash_cfg *cfg = afu->parent; | ||
223 | bool cmd_is_tmf; | ||
224 | |||
225 | spin_lock_irqsave(&cmd->slock, lock_flags); | ||
226 | cmd->sa.host_use_b[0] |= B_DONE; | ||
227 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | ||
228 | |||
229 | if (cmd->rcb.scp) { | ||
230 | scp = cmd->rcb.scp; | ||
231 | if (unlikely(cmd->sa.rc.afu_rc || | ||
232 | cmd->sa.rc.scsi_rc || | ||
233 | cmd->sa.rc.fc_rc)) | ||
234 | process_cmd_err(cmd, scp); | ||
235 | else | ||
236 | scp->result = (DID_OK << 16); | ||
237 | |||
238 | resid = cmd->sa.resid; | ||
239 | cmd_is_tmf = cmd->cmd_tmf; | ||
240 | cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */ | ||
241 | |||
242 | pr_debug("%s: calling scsi_set_resid, scp=%p " | ||
243 | "result=%X resid=%d\n", __func__, | ||
244 | scp, scp->result, resid); | ||
245 | |||
246 | scsi_set_resid(scp, resid); | ||
247 | scsi_dma_unmap(scp); | ||
248 | scp->scsi_done(scp); | ||
249 | |||
250 | if (cmd_is_tmf) { | ||
251 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
252 | cfg->tmf_active = false; | ||
253 | wake_up_all_locked(&cfg->tmf_waitq); | ||
254 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, | ||
255 | lock_flags); | ||
256 | } | ||
257 | } else | ||
258 | complete(&cmd->cevent); | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * send_tmf() - sends a Task Management Function (TMF) | ||
263 | * @afu: AFU to checkout from. | ||
264 | * @scp: SCSI command from stack. | ||
265 | * @tmfcmd: TMF command to send. | ||
266 | * | ||
267 | * Return: | ||
268 | * 0 on success | ||
269 | * SCSI_MLQUEUE_HOST_BUSY when host is busy | ||
270 | */ | ||
271 | static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) | ||
272 | { | ||
273 | struct afu_cmd *cmd; | ||
274 | |||
275 | u32 port_sel = scp->device->channel + 1; | ||
276 | short lflag = 0; | ||
277 | struct Scsi_Host *host = scp->device->host; | ||
278 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | ||
279 | ulong lock_flags; | ||
280 | int rc = 0; | ||
281 | |||
282 | cmd = cxlflash_cmd_checkout(afu); | ||
283 | if (unlikely(!cmd)) { | ||
284 | pr_err("%s: could not get a free command\n", __func__); | ||
285 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
286 | goto out; | ||
287 | } | ||
288 | |||
289 | /* If a Task Management Function is active, do not send one more. | ||
290 | */ | ||
291 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
292 | if (cfg->tmf_active) | ||
293 | wait_event_interruptible_locked_irq(cfg->tmf_waitq, | ||
294 | !cfg->tmf_active); | ||
295 | cfg->tmf_active = true; | ||
296 | cmd->cmd_tmf = true; | ||
297 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
298 | |||
299 | cmd->rcb.ctx_id = afu->ctx_hndl; | ||
300 | cmd->rcb.port_sel = port_sel; | ||
301 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | ||
302 | |||
303 | lflag = SISL_REQ_FLAGS_TMF_CMD; | ||
304 | |||
305 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | | ||
306 | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); | ||
307 | |||
308 | /* Stash the scp in the reserved field, for reuse during interrupt */ | ||
309 | cmd->rcb.scp = scp; | ||
310 | |||
311 | /* Copy the CDB from the cmd passed in */ | ||
312 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); | ||
313 | |||
314 | /* Send the command */ | ||
315 | rc = cxlflash_send_cmd(afu, cmd); | ||
316 | if (unlikely(rc)) { | ||
317 | cxlflash_cmd_checkin(cmd); | ||
318 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
319 | cfg->tmf_active = false; | ||
320 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
321 | goto out; | ||
322 | } | ||
323 | |||
324 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
325 | wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active); | ||
326 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
327 | out: | ||
328 | return rc; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * cxlflash_driver_info() - information handler for this host driver | ||
333 | * @host: SCSI host associated with device. | ||
334 | * | ||
335 | * Return: A string describing the device. | ||
336 | */ | ||
337 | static const char *cxlflash_driver_info(struct Scsi_Host *host) | ||
338 | { | ||
339 | return CXLFLASH_ADAPTER_NAME; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * cxlflash_queuecommand() - sends a mid-layer request | ||
344 | * @host: SCSI host associated with device. | ||
345 | * @scp: SCSI command to send. | ||
346 | * | ||
347 | * Return: | ||
348 | * 0 on success | ||
349 | * SCSI_MLQUEUE_HOST_BUSY when host is busy | ||
350 | */ | ||
351 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) | ||
352 | { | ||
353 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | ||
354 | struct afu *afu = cfg->afu; | ||
355 | struct pci_dev *pdev = cfg->dev; | ||
356 | struct afu_cmd *cmd; | ||
357 | u32 port_sel = scp->device->channel + 1; | ||
358 | int nseg, i, ncount; | ||
359 | struct scatterlist *sg; | ||
360 | ulong lock_flags; | ||
361 | short lflag = 0; | ||
362 | int rc = 0; | ||
363 | |||
364 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n", | ||
365 | __func__, scp, host->host_no, scp->device->channel, | ||
366 | scp->device->id, scp->device->lun, | ||
367 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | ||
368 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | ||
369 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | ||
370 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | ||
371 | |||
372 | /* If a Task Management Function is active, wait for it to complete | ||
373 | * before continuing with regular commands. | ||
374 | */ | ||
375 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
376 | if (cfg->tmf_active) { | ||
377 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
378 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
379 | goto out; | ||
380 | } | ||
381 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
382 | |||
383 | cmd = cxlflash_cmd_checkout(afu); | ||
384 | if (unlikely(!cmd)) { | ||
385 | pr_err("%s: could not get a free command\n", __func__); | ||
386 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
387 | goto out; | ||
388 | } | ||
389 | |||
390 | cmd->rcb.ctx_id = afu->ctx_hndl; | ||
391 | cmd->rcb.port_sel = port_sel; | ||
392 | cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); | ||
393 | |||
394 | if (scp->sc_data_direction == DMA_TO_DEVICE) | ||
395 | lflag = SISL_REQ_FLAGS_HOST_WRITE; | ||
396 | else | ||
397 | lflag = SISL_REQ_FLAGS_HOST_READ; | ||
398 | |||
399 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | | ||
400 | SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); | ||
401 | |||
402 | /* Stash the scp in the reserved field, for reuse during interrupt */ | ||
403 | cmd->rcb.scp = scp; | ||
404 | |||
405 | nseg = scsi_dma_map(scp); | ||
406 | if (unlikely(nseg < 0)) { | ||
407 | dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n", | ||
408 | __func__, nseg); | ||
409 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | ncount = scsi_sg_count(scp); | ||
414 | scsi_for_each_sg(scp, sg, ncount, i) { | ||
415 | cmd->rcb.data_len = sg_dma_len(sg); | ||
416 | cmd->rcb.data_ea = sg_dma_address(sg); | ||
417 | } | ||
418 | |||
419 | /* Copy the CDB from the scsi_cmnd passed in */ | ||
420 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); | ||
421 | |||
422 | /* Send the command */ | ||
423 | rc = cxlflash_send_cmd(afu, cmd); | ||
424 | if (unlikely(rc)) { | ||
425 | cxlflash_cmd_checkin(cmd); | ||
426 | scsi_dma_unmap(scp); | ||
427 | } | ||
428 | |||
429 | out: | ||
430 | return rc; | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * cxlflash_eh_device_reset_handler() - reset a single LUN | ||
435 | * @scp: SCSI command to send. | ||
436 | * | ||
437 | * Return: | ||
438 | * SUCCESS as defined in scsi/scsi.h | ||
439 | * FAILED as defined in scsi/scsi.h | ||
440 | */ | ||
441 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) | ||
442 | { | ||
443 | int rc = SUCCESS; | ||
444 | struct Scsi_Host *host = scp->device->host; | ||
445 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | ||
446 | struct afu *afu = cfg->afu; | ||
447 | int rcr = 0; | ||
448 | |||
449 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " | ||
450 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, | ||
451 | host->host_no, scp->device->channel, | ||
452 | scp->device->id, scp->device->lun, | ||
453 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | ||
454 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | ||
455 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | ||
456 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | ||
457 | |||
458 | rcr = send_tmf(afu, scp, TMF_LUN_RESET); | ||
459 | if (unlikely(rcr)) | ||
460 | rc = FAILED; | ||
461 | |||
462 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
463 | return rc; | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * cxlflash_eh_host_reset_handler() - reset the host adapter | ||
468 | * @scp: SCSI command from stack identifying host. | ||
469 | * | ||
470 | * Return: | ||
471 | * SUCCESS as defined in scsi/scsi.h | ||
472 | * FAILED as defined in scsi/scsi.h | ||
473 | */ | ||
474 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) | ||
475 | { | ||
476 | int rc = SUCCESS; | ||
477 | int rcr = 0; | ||
478 | struct Scsi_Host *host = scp->device->host; | ||
479 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; | ||
480 | |||
481 | pr_debug("%s: (scp=%p) %d/%d/%d/%llu " | ||
482 | "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, | ||
483 | host->host_no, scp->device->channel, | ||
484 | scp->device->id, scp->device->lun, | ||
485 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), | ||
486 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), | ||
487 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), | ||
488 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); | ||
489 | |||
490 | rcr = cxlflash_afu_reset(cfg); | ||
491 | if (rcr == 0) | ||
492 | rc = SUCCESS; | ||
493 | else | ||
494 | rc = FAILED; | ||
495 | |||
496 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
497 | return rc; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * cxlflash_change_queue_depth() - change the queue depth for the device | ||
502 | * @sdev: SCSI device destined for queue depth change. | ||
503 | * @qdepth: Requested queue depth value to set. | ||
504 | * | ||
505 | * The requested queue depth is capped to the maximum supported value. | ||
506 | * | ||
507 | * Return: The actual queue depth set. | ||
508 | */ | ||
509 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) | ||
510 | { | ||
511 | |||
512 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) | ||
513 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; | ||
514 | |||
515 | scsi_change_queue_depth(sdev, qdepth); | ||
516 | return sdev->queue_depth; | ||
517 | } | ||
518 | |||
519 | /** | ||
520 | * cxlflash_show_port_status() - queries and presents the current port status | ||
521 | * @dev: Generic device associated with the host owning the port. | ||
522 | * @attr: Device attribute representing the port. | ||
523 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. | ||
524 | * | ||
525 | * Return: The size of the ASCII string returned in @buf. | ||
526 | */ | ||
527 | static ssize_t cxlflash_show_port_status(struct device *dev, | ||
528 | struct device_attribute *attr, | ||
529 | char *buf) | ||
530 | { | ||
531 | struct Scsi_Host *shost = class_to_shost(dev); | ||
532 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | ||
533 | struct afu *afu = cfg->afu; | ||
534 | |||
535 | char *disp_status; | ||
536 | int rc; | ||
537 | u32 port; | ||
538 | u64 status; | ||
539 | u64 *fc_regs; | ||
540 | |||
541 | rc = kstrtouint((attr->attr.name + 4), 10, &port); | ||
542 | if (rc || (port > NUM_FC_PORTS)) | ||
543 | return 0; | ||
544 | |||
545 | fc_regs = &afu->afu_map->global.fc_regs[port][0]; | ||
546 | status = | ||
547 | (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK); | ||
548 | |||
549 | if (status == FC_MTIP_STATUS_ONLINE) | ||
550 | disp_status = "online"; | ||
551 | else if (status == FC_MTIP_STATUS_OFFLINE) | ||
552 | disp_status = "offline"; | ||
553 | else | ||
554 | disp_status = "unknown"; | ||
555 | |||
556 | return snprintf(buf, PAGE_SIZE, "%s\n", disp_status); | ||
557 | } | ||
558 | |||
559 | /** | ||
560 | * cxlflash_show_lun_mode() - presents the current LUN mode of the host | ||
561 | * @dev: Generic device associated with the host. | ||
562 | * @attr: Device attribute representing the lun mode. | ||
563 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. | ||
564 | * | ||
565 | * Return: The size of the ASCII string returned in @buf. | ||
566 | */ | ||
567 | static ssize_t cxlflash_show_lun_mode(struct device *dev, | ||
568 | struct device_attribute *attr, char *buf) | ||
569 | { | ||
570 | struct Scsi_Host *shost = class_to_shost(dev); | ||
571 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | ||
572 | struct afu *afu = cfg->afu; | ||
573 | |||
574 | return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * cxlflash_store_lun_mode() - sets the LUN mode of the host | ||
579 | * @dev: Generic device associated with the host. | ||
580 | * @attr: Device attribute representing the lun mode. | ||
581 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. | ||
582 | * @count: Length of data resizing in @buf. | ||
583 | * | ||
584 | * The CXL Flash AFU supports a dummy LUN mode where the external | ||
585 | * links and storage are not required. Space on the FPGA is used | ||
586 | * to create 1 or 2 small LUNs which are presented to the system | ||
587 | * as if they were a normal storage device. This feature is useful | ||
588 | * during development and also provides manufacturing with a way | ||
589 | * to test the AFU without an actual device. | ||
590 | * | ||
591 | * 0 = external LUN[s] (default) | ||
592 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) | ||
593 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) | ||
594 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) | ||
595 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) | ||
596 | * | ||
597 | * Return: The size of the ASCII string returned in @buf. | ||
598 | */ | ||
599 | static ssize_t cxlflash_store_lun_mode(struct device *dev, | ||
600 | struct device_attribute *attr, | ||
601 | const char *buf, size_t count) | ||
602 | { | ||
603 | struct Scsi_Host *shost = class_to_shost(dev); | ||
604 | struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; | ||
605 | struct afu *afu = cfg->afu; | ||
606 | int rc; | ||
607 | u32 lun_mode; | ||
608 | |||
609 | rc = kstrtouint(buf, 10, &lun_mode); | ||
610 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { | ||
611 | afu->internal_lun = lun_mode; | ||
612 | cxlflash_afu_reset(cfg); | ||
613 | scsi_scan_host(cfg->host); | ||
614 | } | ||
615 | |||
616 | return count; | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * cxlflash_show_dev_mode() - presents the current mode of the device | ||
621 | * @dev: Generic device associated with the device. | ||
622 | * @attr: Device attribute representing the device mode. | ||
623 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. | ||
624 | * | ||
625 | * Return: The size of the ASCII string returned in @buf. | ||
626 | */ | ||
627 | static ssize_t cxlflash_show_dev_mode(struct device *dev, | ||
628 | struct device_attribute *attr, char *buf) | ||
629 | { | ||
630 | struct scsi_device *sdev = to_scsi_device(dev); | ||
631 | |||
632 | return snprintf(buf, PAGE_SIZE, "%s\n", | ||
633 | sdev->hostdata ? "superpipe" : "legacy"); | ||
634 | } | ||
635 | |||
636 | /** | ||
637 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe | ||
638 | * @cxlflash: Internal structure associated with the host. | ||
639 | */ | ||
640 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) | ||
641 | { | ||
642 | struct pci_dev *pdev = cfg->dev; | ||
643 | |||
644 | if (pci_channel_offline(pdev)) | ||
645 | wait_event_timeout(cfg->eeh_waitq, | ||
646 | !pci_channel_offline(pdev), | ||
647 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); | ||
648 | } | ||
649 | |||
650 | /* | ||
651 | * Host attributes | ||
652 | */ | ||
653 | static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL); | ||
654 | static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL); | ||
655 | static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode, | ||
656 | cxlflash_store_lun_mode); | ||
657 | |||
658 | static struct device_attribute *cxlflash_host_attrs[] = { | ||
659 | &dev_attr_port0, | ||
660 | &dev_attr_port1, | ||
661 | &dev_attr_lun_mode, | ||
662 | NULL | ||
663 | }; | ||
664 | |||
665 | /* | ||
666 | * Device attributes | ||
667 | */ | ||
668 | static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL); | ||
669 | |||
670 | static struct device_attribute *cxlflash_dev_attrs[] = { | ||
671 | &dev_attr_mode, | ||
672 | NULL | ||
673 | }; | ||
674 | |||
675 | /* | ||
676 | * Host template | ||
677 | */ | ||
678 | static struct scsi_host_template driver_template = { | ||
679 | .module = THIS_MODULE, | ||
680 | .name = CXLFLASH_ADAPTER_NAME, | ||
681 | .info = cxlflash_driver_info, | ||
682 | .proc_name = CXLFLASH_NAME, | ||
683 | .queuecommand = cxlflash_queuecommand, | ||
684 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, | ||
685 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, | ||
686 | .change_queue_depth = cxlflash_change_queue_depth, | ||
687 | .cmd_per_lun = 16, | ||
688 | .can_queue = CXLFLASH_MAX_CMDS, | ||
689 | .this_id = -1, | ||
690 | .sg_tablesize = SG_NONE, /* No scatter gather support. */ | ||
691 | .max_sectors = CXLFLASH_MAX_SECTORS, | ||
692 | .use_clustering = ENABLE_CLUSTERING, | ||
693 | .shost_attrs = cxlflash_host_attrs, | ||
694 | .sdev_attrs = cxlflash_dev_attrs, | ||
695 | }; | ||
696 | |||
697 | /* | ||
698 | * Device dependent values | ||
699 | */ | ||
700 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; | ||
701 | |||
702 | /* | ||
703 | * PCI device binding table | ||
704 | */ | ||
705 | static struct pci_device_id cxlflash_pci_table[] = { | ||
706 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, | ||
707 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, | ||
708 | {} | ||
709 | }; | ||
710 | |||
711 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); | ||
712 | |||
713 | /** | ||
714 | * free_mem() - free memory associated with the AFU | ||
715 | * @cxlflash: Internal structure associated with the host. | ||
716 | */ | ||
717 | static void free_mem(struct cxlflash_cfg *cfg) | ||
718 | { | ||
719 | int i; | ||
720 | char *buf = NULL; | ||
721 | struct afu *afu = cfg->afu; | ||
722 | |||
723 | if (cfg->afu) { | ||
724 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | ||
725 | buf = afu->cmd[i].buf; | ||
726 | if (!((u64)buf & (PAGE_SIZE - 1))) | ||
727 | free_page((ulong)buf); | ||
728 | } | ||
729 | |||
730 | free_pages((ulong)afu, get_order(sizeof(struct afu))); | ||
731 | cfg->afu = NULL; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | /** | ||
736 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space | ||
737 | * @cxlflash: Internal structure associated with the host. | ||
738 | * | ||
739 | * Safe to call with AFU in a partially allocated/initialized state. | ||
740 | */ | ||
741 | static void stop_afu(struct cxlflash_cfg *cfg) | ||
742 | { | ||
743 | int i; | ||
744 | struct afu *afu = cfg->afu; | ||
745 | |||
746 | if (likely(afu)) { | ||
747 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) | ||
748 | complete(&afu->cmd[i].cevent); | ||
749 | |||
750 | if (likely(afu->afu_map)) { | ||
751 | cxl_psa_unmap((void *)afu->afu_map); | ||
752 | afu->afu_map = NULL; | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /** | ||
758 | * term_mc() - terminates the master context | ||
759 | * @cxlflash: Internal structure associated with the host. | ||
760 | * @level: Depth of allocation, where to begin waterfall tear down. | ||
761 | * | ||
762 | * Safe to call with AFU/MC in partially allocated/initialized state. | ||
763 | */ | ||
764 | static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) | ||
765 | { | ||
766 | int rc = 0; | ||
767 | struct afu *afu = cfg->afu; | ||
768 | |||
769 | if (!afu || !cfg->mcctx) { | ||
770 | pr_err("%s: returning from term_mc with NULL afu or MC\n", | ||
771 | __func__); | ||
772 | return; | ||
773 | } | ||
774 | |||
775 | switch (level) { | ||
776 | case UNDO_START: | ||
777 | rc = cxl_stop_context(cfg->mcctx); | ||
778 | BUG_ON(rc); | ||
779 | case UNMAP_THREE: | ||
780 | cxl_unmap_afu_irq(cfg->mcctx, 3, afu); | ||
781 | case UNMAP_TWO: | ||
782 | cxl_unmap_afu_irq(cfg->mcctx, 2, afu); | ||
783 | case UNMAP_ONE: | ||
784 | cxl_unmap_afu_irq(cfg->mcctx, 1, afu); | ||
785 | case FREE_IRQ: | ||
786 | cxl_free_afu_irqs(cfg->mcctx); | ||
787 | case RELEASE_CONTEXT: | ||
788 | cfg->mcctx = NULL; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | /** | ||
793 | * term_afu() - terminates the AFU | ||
794 | * @cxlflash: Internal structure associated with the host. | ||
795 | * | ||
796 | * Safe to call with AFU/MC in partially allocated/initialized state. | ||
797 | */ | ||
798 | static void term_afu(struct cxlflash_cfg *cfg) | ||
799 | { | ||
800 | term_mc(cfg, UNDO_START); | ||
801 | |||
802 | if (cfg->afu) | ||
803 | stop_afu(cfg); | ||
804 | |||
805 | pr_debug("%s: returning\n", __func__); | ||
806 | } | ||
807 | |||
808 | /** | ||
809 | * cxlflash_remove() - PCI entry point to tear down host | ||
810 | * @pdev: PCI device associated with the host. | ||
811 | * | ||
812 | * Safe to use as a cleanup in partially allocated/initialized state. | ||
813 | */ | ||
814 | static void cxlflash_remove(struct pci_dev *pdev) | ||
815 | { | ||
816 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); | ||
817 | ulong lock_flags; | ||
818 | |||
819 | /* If a Task Management Function is active, wait for it to complete | ||
820 | * before continuing with remove. | ||
821 | */ | ||
822 | spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags); | ||
823 | if (cfg->tmf_active) | ||
824 | wait_event_interruptible_locked_irq(cfg->tmf_waitq, | ||
825 | !cfg->tmf_active); | ||
826 | spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags); | ||
827 | |||
828 | switch (cfg->init_state) { | ||
829 | case INIT_STATE_SCSI: | ||
830 | scsi_remove_host(cfg->host); | ||
831 | scsi_host_put(cfg->host); | ||
832 | /* Fall through */ | ||
833 | case INIT_STATE_AFU: | ||
834 | term_afu(cfg); | ||
835 | case INIT_STATE_PCI: | ||
836 | pci_release_regions(cfg->dev); | ||
837 | pci_disable_device(pdev); | ||
838 | case INIT_STATE_NONE: | ||
839 | flush_work(&cfg->work_q); | ||
840 | free_mem(cfg); | ||
841 | break; | ||
842 | } | ||
843 | |||
844 | pr_debug("%s: returning\n", __func__); | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * alloc_mem() - allocates the AFU and its command pool | ||
849 | * @cxlflash: Internal structure associated with the host. | ||
850 | * | ||
851 | * A partially allocated state remains on failure. | ||
852 | * | ||
853 | * Return: | ||
854 | * 0 on success | ||
855 | * -ENOMEM on failure to allocate memory | ||
856 | */ | ||
857 | static int alloc_mem(struct cxlflash_cfg *cfg) | ||
858 | { | ||
859 | int rc = 0; | ||
860 | int i; | ||
861 | char *buf = NULL; | ||
862 | |||
863 | /* This allocation is about 12K, i.e. only 1 64k page | ||
864 | * and upto 4 4k pages | ||
865 | */ | ||
866 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
867 | get_order(sizeof(struct afu))); | ||
868 | if (unlikely(!cfg->afu)) { | ||
869 | pr_err("%s: cannot get %d free pages\n", | ||
870 | __func__, get_order(sizeof(struct afu))); | ||
871 | rc = -ENOMEM; | ||
872 | goto out; | ||
873 | } | ||
874 | cfg->afu->parent = cfg; | ||
875 | cfg->afu->afu_map = NULL; | ||
876 | |||
877 | for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { | ||
878 | if (!((u64)buf & (PAGE_SIZE - 1))) { | ||
879 | buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
880 | if (unlikely(!buf)) { | ||
881 | pr_err("%s: Allocate command buffers fail!\n", | ||
882 | __func__); | ||
883 | rc = -ENOMEM; | ||
884 | free_mem(cfg); | ||
885 | goto out; | ||
886 | } | ||
887 | } | ||
888 | |||
889 | cfg->afu->cmd[i].buf = buf; | ||
890 | atomic_set(&cfg->afu->cmd[i].free, 1); | ||
891 | cfg->afu->cmd[i].slot = i; | ||
892 | } | ||
893 | |||
894 | out: | ||
895 | return rc; | ||
896 | } | ||
897 | |||
898 | /** | ||
899 | * init_pci() - initializes the host as a PCI device | ||
900 | * @cxlflash: Internal structure associated with the host. | ||
901 | * | ||
902 | * Return: | ||
903 | * 0 on success | ||
904 | * -EIO on unable to communicate with device | ||
905 | * A return code from the PCI sub-routines | ||
906 | */ | ||
907 | static int init_pci(struct cxlflash_cfg *cfg) | ||
908 | { | ||
909 | struct pci_dev *pdev = cfg->dev; | ||
910 | int rc = 0; | ||
911 | |||
912 | cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0); | ||
913 | rc = pci_request_regions(pdev, CXLFLASH_NAME); | ||
914 | if (rc < 0) { | ||
915 | dev_err(&pdev->dev, | ||
916 | "%s: Couldn't register memory range of registers\n", | ||
917 | __func__); | ||
918 | goto out; | ||
919 | } | ||
920 | |||
921 | rc = pci_enable_device(pdev); | ||
922 | if (rc || pci_channel_offline(pdev)) { | ||
923 | if (pci_channel_offline(pdev)) { | ||
924 | cxlflash_wait_for_pci_err_recovery(cfg); | ||
925 | rc = pci_enable_device(pdev); | ||
926 | } | ||
927 | |||
928 | if (rc) { | ||
929 | dev_err(&pdev->dev, "%s: Cannot enable adapter\n", | ||
930 | __func__); | ||
931 | cxlflash_wait_for_pci_err_recovery(cfg); | ||
932 | goto out_release_regions; | ||
933 | } | ||
934 | } | ||
935 | |||
936 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
937 | if (rc < 0) { | ||
938 | dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n", | ||
939 | __func__); | ||
940 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
941 | } | ||
942 | |||
943 | if (rc < 0) { | ||
944 | dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n", | ||
945 | __func__); | ||
946 | goto out_disable; | ||
947 | } | ||
948 | |||
949 | pci_set_master(pdev); | ||
950 | |||
951 | if (pci_channel_offline(pdev)) { | ||
952 | cxlflash_wait_for_pci_err_recovery(cfg); | ||
953 | if (pci_channel_offline(pdev)) { | ||
954 | rc = -EIO; | ||
955 | goto out_msi_disable; | ||
956 | } | ||
957 | } | ||
958 | |||
959 | rc = pci_save_state(pdev); | ||
960 | |||
961 | if (rc != PCIBIOS_SUCCESSFUL) { | ||
962 | dev_err(&pdev->dev, "%s: Failed to save PCI config space\n", | ||
963 | __func__); | ||
964 | rc = -EIO; | ||
965 | goto cleanup_nolog; | ||
966 | } | ||
967 | |||
968 | out: | ||
969 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
970 | return rc; | ||
971 | |||
972 | cleanup_nolog: | ||
973 | out_msi_disable: | ||
974 | cxlflash_wait_for_pci_err_recovery(cfg); | ||
975 | out_disable: | ||
976 | pci_disable_device(pdev); | ||
977 | out_release_regions: | ||
978 | pci_release_regions(pdev); | ||
979 | goto out; | ||
980 | |||
981 | } | ||
982 | |||
983 | /** | ||
984 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan | ||
985 | * @cxlflash: Internal structure associated with the host. | ||
986 | * | ||
987 | * Return: | ||
988 | * 0 on success | ||
989 | * A return code from adding the host | ||
990 | */ | ||
991 | static int init_scsi(struct cxlflash_cfg *cfg) | ||
992 | { | ||
993 | struct pci_dev *pdev = cfg->dev; | ||
994 | int rc = 0; | ||
995 | |||
996 | rc = scsi_add_host(cfg->host, &pdev->dev); | ||
997 | if (rc) { | ||
998 | dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", | ||
999 | __func__, rc); | ||
1000 | goto out; | ||
1001 | } | ||
1002 | |||
1003 | scsi_scan_host(cfg->host); | ||
1004 | |||
1005 | out: | ||
1006 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1007 | return rc; | ||
1008 | } | ||
1009 | |||
1010 | /** | ||
1011 | * set_port_online() - transitions the specified host FC port to online state | ||
1012 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1013 | * | ||
1014 | * The provided MMIO region must be mapped prior to call. Online state means | ||
1015 | * that the FC link layer has synced, completed the handshaking process, and | ||
1016 | * is ready for login to start. | ||
1017 | */ | ||
1018 | static void set_port_online(u64 *fc_regs) | ||
1019 | { | ||
1020 | u64 cmdcfg; | ||
1021 | |||
1022 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | ||
1023 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ | ||
1024 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ | ||
1025 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | ||
1026 | } | ||
1027 | |||
1028 | /** | ||
1029 | * set_port_offline() - transitions the specified host FC port to offline state | ||
1030 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1031 | * | ||
1032 | * The provided MMIO region must be mapped prior to call. | ||
1033 | */ | ||
1034 | static void set_port_offline(u64 *fc_regs) | ||
1035 | { | ||
1036 | u64 cmdcfg; | ||
1037 | |||
1038 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); | ||
1039 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ | ||
1040 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ | ||
1041 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); | ||
1042 | } | ||
1043 | |||
1044 | /** | ||
1045 | * wait_port_online() - waits for the specified host FC port come online | ||
1046 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1047 | * @delay_us: Number of microseconds to delay between reading port status. | ||
1048 | * @nretry: Number of cycles to retry reading port status. | ||
1049 | * | ||
1050 | * The provided MMIO region must be mapped prior to call. This will timeout | ||
1051 | * when the cable is not plugged in. | ||
1052 | * | ||
1053 | * Return: | ||
1054 | * TRUE (1) when the specified port is online | ||
1055 | * FALSE (0) when the specified port fails to come online after timeout | ||
1056 | * -EINVAL when @delay_us is less than 1000 | ||
1057 | */ | ||
1058 | static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry) | ||
1059 | { | ||
1060 | u64 status; | ||
1061 | |||
1062 | if (delay_us < 1000) { | ||
1063 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); | ||
1064 | return -EINVAL; | ||
1065 | } | ||
1066 | |||
1067 | do { | ||
1068 | msleep(delay_us / 1000); | ||
1069 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | ||
1070 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && | ||
1071 | nretry--); | ||
1072 | |||
1073 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); | ||
1074 | } | ||
1075 | |||
1076 | /** | ||
1077 | * wait_port_offline() - waits for the specified host FC port go offline | ||
1078 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1079 | * @delay_us: Number of microseconds to delay between reading port status. | ||
1080 | * @nretry: Number of cycles to retry reading port status. | ||
1081 | * | ||
1082 | * The provided MMIO region must be mapped prior to call. | ||
1083 | * | ||
1084 | * Return: | ||
1085 | * TRUE (1) when the specified port is offline | ||
1086 | * FALSE (0) when the specified port fails to go offline after timeout | ||
1087 | * -EINVAL when @delay_us is less than 1000 | ||
1088 | */ | ||
1089 | static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry) | ||
1090 | { | ||
1091 | u64 status; | ||
1092 | |||
1093 | if (delay_us < 1000) { | ||
1094 | pr_err("%s: invalid delay specified %d\n", __func__, delay_us); | ||
1095 | return -EINVAL; | ||
1096 | } | ||
1097 | |||
1098 | do { | ||
1099 | msleep(delay_us / 1000); | ||
1100 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); | ||
1101 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && | ||
1102 | nretry--); | ||
1103 | |||
1104 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); | ||
1105 | } | ||
1106 | |||
1107 | /** | ||
1108 | * afu_set_wwpn() - configures the WWPN for the specified host FC port | ||
1109 | * @afu: AFU associated with the host that owns the specified FC port. | ||
1110 | * @port: Port number being configured. | ||
1111 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1112 | * @wwpn: The world-wide-port-number previously discovered for port. | ||
1113 | * | ||
1114 | * The provided MMIO region must be mapped prior to call. As part of the | ||
1115 | * sequence to configure the WWPN, the port is toggled offline and then back | ||
1116 | * online. This toggling action can cause this routine to delay up to a few | ||
1117 | * seconds. When configured to use the internal LUN feature of the AFU, a | ||
1118 | * failure to come online is overridden. | ||
1119 | * | ||
1120 | * Return: | ||
1121 | * 0 when the WWPN is successfully written and the port comes back online | ||
1122 | * -1 when the port fails to go offline or come back up online | ||
1123 | */ | ||
1124 | static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn) | ||
1125 | { | ||
1126 | int ret = 0; | ||
1127 | |||
1128 | set_port_offline(fc_regs); | ||
1129 | |||
1130 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | ||
1131 | FC_PORT_STATUS_RETRY_CNT)) { | ||
1132 | pr_debug("%s: wait on port %d to go offline timed out\n", | ||
1133 | __func__, port); | ||
1134 | ret = -1; /* but continue on to leave the port back online */ | ||
1135 | } | ||
1136 | |||
1137 | if (ret == 0) | ||
1138 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); | ||
1139 | |||
1140 | set_port_online(fc_regs); | ||
1141 | |||
1142 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | ||
1143 | FC_PORT_STATUS_RETRY_CNT)) { | ||
1144 | pr_debug("%s: wait on port %d to go online timed out\n", | ||
1145 | __func__, port); | ||
1146 | ret = -1; | ||
1147 | |||
1148 | /* | ||
1149 | * Override for internal lun!!! | ||
1150 | */ | ||
1151 | if (afu->internal_lun) { | ||
1152 | pr_debug("%s: Overriding port %d online timeout!!!\n", | ||
1153 | __func__, port); | ||
1154 | ret = 0; | ||
1155 | } | ||
1156 | } | ||
1157 | |||
1158 | pr_debug("%s: returning rc=%d\n", __func__, ret); | ||
1159 | |||
1160 | return ret; | ||
1161 | } | ||
1162 | |||
1163 | /** | ||
1164 | * afu_link_reset() - resets the specified host FC port | ||
1165 | * @afu: AFU associated with the host that owns the specified FC port. | ||
1166 | * @port: Port number being configured. | ||
1167 | * @fc_regs: Top of MMIO region defined for specified port. | ||
1168 | * | ||
1169 | * The provided MMIO region must be mapped prior to call. The sequence to | ||
1170 | * reset the port involves toggling it offline and then back online. This | ||
1171 | * action can cause this routine to delay up to a few seconds. An effort | ||
1172 | * is made to maintain link with the device by switching to host to use | ||
1173 | * the alternate port exclusively while the reset takes place. | ||
1174 | * failure to come online is overridden. | ||
1175 | */ | ||
1176 | static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs) | ||
1177 | { | ||
1178 | u64 port_sel; | ||
1179 | |||
1180 | /* first switch the AFU to the other links, if any */ | ||
1181 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); | ||
1182 | port_sel &= ~(1 << port); | ||
1183 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); | ||
1184 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | ||
1185 | |||
1186 | set_port_offline(fc_regs); | ||
1187 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | ||
1188 | FC_PORT_STATUS_RETRY_CNT)) | ||
1189 | pr_err("%s: wait on port %d to go offline timed out\n", | ||
1190 | __func__, port); | ||
1191 | |||
1192 | set_port_online(fc_regs); | ||
1193 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, | ||
1194 | FC_PORT_STATUS_RETRY_CNT)) | ||
1195 | pr_err("%s: wait on port %d to go online timed out\n", | ||
1196 | __func__, port); | ||
1197 | |||
1198 | /* switch back to include this port */ | ||
1199 | port_sel |= (1 << port); | ||
1200 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); | ||
1201 | cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); | ||
1202 | |||
1203 | pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); | ||
1204 | } | ||
1205 | |||
1206 | /* | ||
1207 | * Asynchronous interrupt information table | ||
1208 | */ | ||
1209 | static const struct asyc_intr_info ainfo[] = { | ||
1210 | {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, | ||
1211 | {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, | ||
1212 | {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, | ||
1213 | {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0}, | ||
1214 | {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, | ||
1215 | {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0}, | ||
1216 | {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, | ||
1217 | {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, | ||
1218 | {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, | ||
1219 | {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, | ||
1220 | {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, | ||
1221 | {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0}, | ||
1222 | {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, | ||
1223 | {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0}, | ||
1224 | {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, | ||
1225 | {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, | ||
1226 | {0x0, "", 0, 0} /* terminator */ | ||
1227 | }; | ||
1228 | |||
1229 | /** | ||
1230 | * find_ainfo() - locates and returns asynchronous interrupt information | ||
1231 | * @status: Status code set by AFU on error. | ||
1232 | * | ||
1233 | * Return: The located information or NULL when the status code is invalid. | ||
1234 | */ | ||
1235 | static const struct asyc_intr_info *find_ainfo(u64 status) | ||
1236 | { | ||
1237 | const struct asyc_intr_info *info; | ||
1238 | |||
1239 | for (info = &ainfo[0]; info->status; info++) | ||
1240 | if (info->status == status) | ||
1241 | return info; | ||
1242 | |||
1243 | return NULL; | ||
1244 | } | ||
1245 | |||
1246 | /** | ||
1247 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts | ||
1248 | * @afu: AFU associated with the host. | ||
1249 | */ | ||
1250 | static void afu_err_intr_init(struct afu *afu) | ||
1251 | { | ||
1252 | int i; | ||
1253 | u64 reg; | ||
1254 | |||
1255 | /* global async interrupts: AFU clears afu_ctrl on context exit | ||
1256 | * if async interrupts were sent to that context. This prevents | ||
1257 | * the AFU form sending further async interrupts when | ||
1258 | * there is | ||
1259 | * nobody to receive them. | ||
1260 | */ | ||
1261 | |||
1262 | /* mask all */ | ||
1263 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); | ||
1264 | /* set LISN# to send and point to master context */ | ||
1265 | reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); | ||
1266 | |||
1267 | if (afu->internal_lun) | ||
1268 | reg |= 1; /* Bit 63 indicates local lun */ | ||
1269 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); | ||
1270 | /* clear all */ | ||
1271 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | ||
1272 | /* unmask bits that are of interest */ | ||
1273 | /* note: afu can send an interrupt after this step */ | ||
1274 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); | ||
1275 | /* clear again in case a bit came on after previous clear but before */ | ||
1276 | /* unmask */ | ||
1277 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); | ||
1278 | |||
1279 | /* Clear/Set internal lun bits */ | ||
1280 | reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | ||
1281 | reg &= SISL_FC_INTERNAL_MASK; | ||
1282 | if (afu->internal_lun) | ||
1283 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); | ||
1284 | writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); | ||
1285 | |||
1286 | /* now clear FC errors */ | ||
1287 | for (i = 0; i < NUM_FC_PORTS; i++) { | ||
1288 | writeq_be(0xFFFFFFFFU, | ||
1289 | &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); | ||
1290 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); | ||
1291 | } | ||
1292 | |||
1293 | /* sync interrupts for master's IOARRIN write */ | ||
1294 | /* note that unlike asyncs, there can be no pending sync interrupts */ | ||
1295 | /* at this time (this is a fresh context and master has not written */ | ||
1296 | /* IOARRIN yet), so there is nothing to clear. */ | ||
1297 | |||
1298 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ | ||
1299 | writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); | ||
1300 | writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); | ||
1301 | } | ||
1302 | |||
1303 | /** | ||
1304 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors | ||
1305 | * @irq: Interrupt number. | ||
1306 | * @data: Private data provided at interrupt registration, the AFU. | ||
1307 | * | ||
1308 | * Return: Always return IRQ_HANDLED. | ||
1309 | */ | ||
1310 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) | ||
1311 | { | ||
1312 | struct afu *afu = (struct afu *)data; | ||
1313 | u64 reg; | ||
1314 | u64 reg_unmasked; | ||
1315 | |||
1316 | reg = readq_be(&afu->host_map->intr_status); | ||
1317 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); | ||
1318 | |||
1319 | if (reg_unmasked == 0UL) { | ||
1320 | pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", | ||
1321 | __func__, (u64)afu, reg); | ||
1322 | goto cxlflash_sync_err_irq_exit; | ||
1323 | } | ||
1324 | |||
1325 | pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", | ||
1326 | __func__, (u64)afu, reg); | ||
1327 | |||
1328 | writeq_be(reg_unmasked, &afu->host_map->intr_clear); | ||
1329 | |||
1330 | cxlflash_sync_err_irq_exit: | ||
1331 | pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); | ||
1332 | return IRQ_HANDLED; | ||
1333 | } | ||
1334 | |||
1335 | /** | ||
1336 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) | ||
1337 | * @irq: Interrupt number. | ||
1338 | * @data: Private data provided at interrupt registration, the AFU. | ||
1339 | * | ||
1340 | * Return: Always return IRQ_HANDLED. | ||
1341 | */ | ||
1342 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) | ||
1343 | { | ||
1344 | struct afu *afu = (struct afu *)data; | ||
1345 | struct afu_cmd *cmd; | ||
1346 | bool toggle = afu->toggle; | ||
1347 | u64 entry, | ||
1348 | *hrrq_start = afu->hrrq_start, | ||
1349 | *hrrq_end = afu->hrrq_end, | ||
1350 | *hrrq_curr = afu->hrrq_curr; | ||
1351 | |||
1352 | /* Process however many RRQ entries that are ready */ | ||
1353 | while (true) { | ||
1354 | entry = *hrrq_curr; | ||
1355 | |||
1356 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) | ||
1357 | break; | ||
1358 | |||
1359 | cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); | ||
1360 | cmd_complete(cmd); | ||
1361 | |||
1362 | /* Advance to next entry or wrap and flip the toggle bit */ | ||
1363 | if (hrrq_curr < hrrq_end) | ||
1364 | hrrq_curr++; | ||
1365 | else { | ||
1366 | hrrq_curr = hrrq_start; | ||
1367 | toggle ^= SISL_RESP_HANDLE_T_BIT; | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | afu->hrrq_curr = hrrq_curr; | ||
1372 | afu->toggle = toggle; | ||
1373 | |||
1374 | return IRQ_HANDLED; | ||
1375 | } | ||
1376 | |||
1377 | /** | ||
1378 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors | ||
1379 | * @irq: Interrupt number. | ||
1380 | * @data: Private data provided at interrupt registration, the AFU. | ||
1381 | * | ||
1382 | * Return: Always return IRQ_HANDLED. | ||
1383 | */ | ||
1384 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) | ||
1385 | { | ||
1386 | struct afu *afu = (struct afu *)data; | ||
1387 | struct cxlflash_cfg *cfg; | ||
1388 | u64 reg_unmasked; | ||
1389 | const struct asyc_intr_info *info; | ||
1390 | struct sisl_global_map *global = &afu->afu_map->global; | ||
1391 | u64 reg; | ||
1392 | u8 port; | ||
1393 | int i; | ||
1394 | |||
1395 | cfg = afu->parent; | ||
1396 | |||
1397 | reg = readq_be(&global->regs.aintr_status); | ||
1398 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); | ||
1399 | |||
1400 | if (reg_unmasked == 0) { | ||
1401 | pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n", | ||
1402 | __func__, reg); | ||
1403 | goto out; | ||
1404 | } | ||
1405 | |||
1406 | /* it is OK to clear AFU status before FC_ERROR */ | ||
1407 | writeq_be(reg_unmasked, &global->regs.aintr_clear); | ||
1408 | |||
1409 | /* check each bit that is on */ | ||
1410 | for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { | ||
1411 | info = find_ainfo(1ULL << i); | ||
1412 | if ((reg_unmasked & 0x1) || !info) | ||
1413 | continue; | ||
1414 | |||
1415 | port = info->port; | ||
1416 | |||
1417 | pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n", | ||
1418 | __func__, port, info->desc, | ||
1419 | readq_be(&global->fc_regs[port][FC_STATUS / 8])); | ||
1420 | |||
1421 | /* | ||
1422 | * do link reset first, some OTHER errors will set FC_ERROR | ||
1423 | * again if cleared before or w/o a reset | ||
1424 | */ | ||
1425 | if (info->action & LINK_RESET) { | ||
1426 | pr_err("%s: FC Port %d: resetting link\n", | ||
1427 | __func__, port); | ||
1428 | cfg->lr_state = LINK_RESET_REQUIRED; | ||
1429 | cfg->lr_port = port; | ||
1430 | schedule_work(&cfg->work_q); | ||
1431 | } | ||
1432 | |||
1433 | if (info->action & CLR_FC_ERROR) { | ||
1434 | reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); | ||
1435 | |||
1436 | /* | ||
1437 | * since all errors are unmasked, FC_ERROR and FC_ERRCAP | ||
1438 | * should be the same and tracing one is sufficient. | ||
1439 | */ | ||
1440 | |||
1441 | pr_err("%s: fc %d: clearing fc_error 0x%08llX\n", | ||
1442 | __func__, port, reg); | ||
1443 | |||
1444 | writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); | ||
1445 | writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1449 | out: | ||
1450 | pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu); | ||
1451 | return IRQ_HANDLED; | ||
1452 | } | ||
1453 | |||
1454 | /** | ||
1455 | * start_context() - starts the master context | ||
1456 | * @cxlflash: Internal structure associated with the host. | ||
1457 | * | ||
1458 | * Return: A success or failure value from CXL services. | ||
1459 | */ | ||
1460 | static int start_context(struct cxlflash_cfg *cfg) | ||
1461 | { | ||
1462 | int rc = 0; | ||
1463 | |||
1464 | rc = cxl_start_context(cfg->mcctx, | ||
1465 | cfg->afu->work.work_element_descriptor, | ||
1466 | NULL); | ||
1467 | |||
1468 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1469 | return rc; | ||
1470 | } | ||
1471 | |||
1472 | /** | ||
1473 | * read_vpd() - obtains the WWPNs from VPD | ||
1474 | * @cxlflash: Internal structure associated with the host. | ||
1475 | * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs | ||
1476 | * | ||
1477 | * Return: | ||
1478 | * 0 on success | ||
1479 | * -ENODEV when VPD or WWPN keywords not found | ||
1480 | */ | ||
1481 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) | ||
1482 | { | ||
1483 | struct pci_dev *dev = cfg->parent_dev; | ||
1484 | int rc = 0; | ||
1485 | int ro_start, ro_size, i, j, k; | ||
1486 | ssize_t vpd_size; | ||
1487 | char vpd_data[CXLFLASH_VPD_LEN]; | ||
1488 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; | ||
1489 | char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; | ||
1490 | |||
1491 | /* Get the VPD data from the device */ | ||
1492 | vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); | ||
1493 | if (unlikely(vpd_size <= 0)) { | ||
1494 | pr_err("%s: Unable to read VPD (size = %ld)\n", | ||
1495 | __func__, vpd_size); | ||
1496 | rc = -ENODEV; | ||
1497 | goto out; | ||
1498 | } | ||
1499 | |||
1500 | /* Get the read only section offset */ | ||
1501 | ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, | ||
1502 | PCI_VPD_LRDT_RO_DATA); | ||
1503 | if (unlikely(ro_start < 0)) { | ||
1504 | pr_err("%s: VPD Read-only data not found\n", __func__); | ||
1505 | rc = -ENODEV; | ||
1506 | goto out; | ||
1507 | } | ||
1508 | |||
1509 | /* Get the read only section size, cap when extends beyond read VPD */ | ||
1510 | ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); | ||
1511 | j = ro_size; | ||
1512 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | ||
1513 | if (unlikely((i + j) > vpd_size)) { | ||
1514 | pr_debug("%s: Might need to read more VPD (%d > %ld)\n", | ||
1515 | __func__, (i + j), vpd_size); | ||
1516 | ro_size = vpd_size - i; | ||
1517 | } | ||
1518 | |||
1519 | /* | ||
1520 | * Find the offset of the WWPN tag within the read only | ||
1521 | * VPD data and validate the found field (partials are | ||
1522 | * no good to us). Convert the ASCII data to an integer | ||
1523 | * value. Note that we must copy to a temporary buffer | ||
1524 | * because the conversion service requires that the ASCII | ||
1525 | * string be terminated. | ||
1526 | */ | ||
1527 | for (k = 0; k < NUM_FC_PORTS; k++) { | ||
1528 | j = ro_size; | ||
1529 | i = ro_start + PCI_VPD_LRDT_TAG_SIZE; | ||
1530 | |||
1531 | i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); | ||
1532 | if (unlikely(i < 0)) { | ||
1533 | pr_err("%s: Port %d WWPN not found in VPD\n", | ||
1534 | __func__, k); | ||
1535 | rc = -ENODEV; | ||
1536 | goto out; | ||
1537 | } | ||
1538 | |||
1539 | j = pci_vpd_info_field_size(&vpd_data[i]); | ||
1540 | i += PCI_VPD_INFO_FLD_HDR_SIZE; | ||
1541 | if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { | ||
1542 | pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n", | ||
1543 | __func__, k); | ||
1544 | rc = -ENODEV; | ||
1545 | goto out; | ||
1546 | } | ||
1547 | |||
1548 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); | ||
1549 | rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); | ||
1550 | if (unlikely(rc)) { | ||
1551 | pr_err("%s: Fail to convert port %d WWPN to integer\n", | ||
1552 | __func__, k); | ||
1553 | rc = -ENODEV; | ||
1554 | goto out; | ||
1555 | } | ||
1556 | } | ||
1557 | |||
1558 | out: | ||
1559 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1560 | return rc; | ||
1561 | } | ||
1562 | |||
1563 | /** | ||
1564 | * cxlflash_context_reset() - timeout handler for AFU commands | ||
1565 | * @cmd: AFU command that timed out. | ||
1566 | * | ||
1567 | * Sends a reset to the AFU. | ||
1568 | */ | ||
1569 | void cxlflash_context_reset(struct afu_cmd *cmd) | ||
1570 | { | ||
1571 | int nretry = 0; | ||
1572 | u64 rrin = 0x1; | ||
1573 | u64 room = 0; | ||
1574 | struct afu *afu = cmd->parent; | ||
1575 | ulong lock_flags; | ||
1576 | |||
1577 | pr_debug("%s: cmd=%p\n", __func__, cmd); | ||
1578 | |||
1579 | spin_lock_irqsave(&cmd->slock, lock_flags); | ||
1580 | |||
1581 | /* Already completed? */ | ||
1582 | if (cmd->sa.host_use_b[0] & B_DONE) { | ||
1583 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | ||
1584 | return; | ||
1585 | } | ||
1586 | |||
1587 | cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); | ||
1588 | spin_unlock_irqrestore(&cmd->slock, lock_flags); | ||
1589 | |||
1590 | /* | ||
1591 | * We really want to send this reset at all costs, so spread | ||
1592 | * out wait time on successive retries for available room. | ||
1593 | */ | ||
1594 | do { | ||
1595 | room = readq_be(&afu->host_map->cmd_room); | ||
1596 | atomic64_set(&afu->room, room); | ||
1597 | if (room) | ||
1598 | goto write_rrin; | ||
1599 | udelay(nretry); | ||
1600 | } while (nretry++ < MC_ROOM_RETRY_CNT); | ||
1601 | |||
1602 | pr_err("%s: no cmd_room to send reset\n", __func__); | ||
1603 | return; | ||
1604 | |||
1605 | write_rrin: | ||
1606 | nretry = 0; | ||
1607 | writeq_be(rrin, &afu->host_map->ioarrin); | ||
1608 | do { | ||
1609 | rrin = readq_be(&afu->host_map->ioarrin); | ||
1610 | if (rrin != 0x1) | ||
1611 | break; | ||
1612 | /* Double delay each time */ | ||
1613 | udelay(2 ^ nretry); | ||
1614 | } while (nretry++ < MC_ROOM_RETRY_CNT); | ||
1615 | } | ||
1616 | |||
1617 | /** | ||
1618 | * init_pcr() - initialize the provisioning and control registers | ||
1619 | * @cxlflash: Internal structure associated with the host. | ||
1620 | * | ||
1621 | * Also sets up fast access to the mapped registers and initializes AFU | ||
1622 | * command fields that never change. | ||
1623 | */ | ||
1624 | void init_pcr(struct cxlflash_cfg *cfg) | ||
1625 | { | ||
1626 | struct afu *afu = cfg->afu; | ||
1627 | struct sisl_ctrl_map *ctrl_map; | ||
1628 | int i; | ||
1629 | |||
1630 | for (i = 0; i < MAX_CONTEXT; i++) { | ||
1631 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; | ||
1632 | /* disrupt any clients that could be running */ | ||
1633 | /* e. g. clients that survived a master restart */ | ||
1634 | writeq_be(0, &ctrl_map->rht_start); | ||
1635 | writeq_be(0, &ctrl_map->rht_cnt_id); | ||
1636 | writeq_be(0, &ctrl_map->ctx_cap); | ||
1637 | } | ||
1638 | |||
1639 | /* copy frequently used fields into afu */ | ||
1640 | afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); | ||
1641 | /* ctx_hndl is 16 bits in CAIA */ | ||
1642 | afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; | ||
1643 | afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; | ||
1644 | |||
1645 | /* Program the Endian Control for the master context */ | ||
1646 | writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); | ||
1647 | |||
1648 | /* initialize cmd fields that never change */ | ||
1649 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | ||
1650 | afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; | ||
1651 | afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; | ||
1652 | afu->cmd[i].rcb.rrq = 0x0; | ||
1653 | } | ||
1654 | } | ||
1655 | |||
1656 | /** | ||
1657 | * init_global() - initialize AFU global registers | ||
1658 | * @cxlflash: Internal structure associated with the host. | ||
1659 | */ | ||
1660 | int init_global(struct cxlflash_cfg *cfg) | ||
1661 | { | ||
1662 | struct afu *afu = cfg->afu; | ||
1663 | u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ | ||
1664 | int i = 0, num_ports = 0; | ||
1665 | int rc = 0; | ||
1666 | u64 reg; | ||
1667 | |||
1668 | rc = read_vpd(cfg, &wwpn[0]); | ||
1669 | if (rc) { | ||
1670 | pr_err("%s: could not read vpd rc=%d\n", __func__, rc); | ||
1671 | goto out; | ||
1672 | } | ||
1673 | |||
1674 | pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); | ||
1675 | |||
1676 | /* set up RRQ in AFU for master issued cmds */ | ||
1677 | writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); | ||
1678 | writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); | ||
1679 | |||
1680 | /* AFU configuration */ | ||
1681 | reg = readq_be(&afu->afu_map->global.regs.afu_config); | ||
1682 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; | ||
1683 | /* enable all auto retry options and control endianness */ | ||
1684 | /* leave others at default: */ | ||
1685 | /* CTX_CAP write protected, mbox_r does not clear on read and */ | ||
1686 | /* checker on if dual afu */ | ||
1687 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); | ||
1688 | |||
1689 | /* global port select: select either port */ | ||
1690 | if (afu->internal_lun) { | ||
1691 | /* only use port 0 */ | ||
1692 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); | ||
1693 | num_ports = NUM_FC_PORTS - 1; | ||
1694 | } else { | ||
1695 | writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); | ||
1696 | num_ports = NUM_FC_PORTS; | ||
1697 | } | ||
1698 | |||
1699 | for (i = 0; i < num_ports; i++) { | ||
1700 | /* unmask all errors (but they are still masked at AFU) */ | ||
1701 | writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); | ||
1702 | /* clear CRC error cnt & set a threshold */ | ||
1703 | (void)readq_be(&afu->afu_map->global. | ||
1704 | fc_regs[i][FC_CNT_CRCERR / 8]); | ||
1705 | writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] | ||
1706 | [FC_CRC_THRESH / 8]); | ||
1707 | |||
1708 | /* set WWPNs. If already programmed, wwpn[i] is 0 */ | ||
1709 | if (wwpn[i] != 0 && | ||
1710 | afu_set_wwpn(afu, i, | ||
1711 | &afu->afu_map->global.fc_regs[i][0], | ||
1712 | wwpn[i])) { | ||
1713 | pr_err("%s: failed to set WWPN on port %d\n", | ||
1714 | __func__, i); | ||
1715 | rc = -EIO; | ||
1716 | goto out; | ||
1717 | } | ||
1718 | /* Programming WWPN back to back causes additional | ||
1719 | * offline/online transitions and a PLOGI | ||
1720 | */ | ||
1721 | msleep(100); | ||
1722 | |||
1723 | } | ||
1724 | |||
1725 | /* set up master's own CTX_CAP to allow real mode, host translation */ | ||
1726 | /* tbls, afu cmds and read/write GSCSI cmds. */ | ||
1727 | /* First, unlock ctx_cap write by reading mbox */ | ||
1728 | (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ | ||
1729 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | | ||
1730 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | | ||
1731 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), | ||
1732 | &afu->ctrl_map->ctx_cap); | ||
1733 | /* init heartbeat */ | ||
1734 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); | ||
1735 | |||
1736 | out: | ||
1737 | return rc; | ||
1738 | } | ||
1739 | |||
1740 | /** | ||
1741 | * start_afu() - initializes and starts the AFU | ||
1742 | * @cxlflash: Internal structure associated with the host. | ||
1743 | */ | ||
1744 | static int start_afu(struct cxlflash_cfg *cfg) | ||
1745 | { | ||
1746 | struct afu *afu = cfg->afu; | ||
1747 | struct afu_cmd *cmd; | ||
1748 | |||
1749 | int i = 0; | ||
1750 | int rc = 0; | ||
1751 | |||
1752 | for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { | ||
1753 | cmd = &afu->cmd[i]; | ||
1754 | |||
1755 | init_completion(&cmd->cevent); | ||
1756 | spin_lock_init(&cmd->slock); | ||
1757 | cmd->parent = afu; | ||
1758 | } | ||
1759 | |||
1760 | init_pcr(cfg); | ||
1761 | |||
1762 | /* initialize RRQ pointers */ | ||
1763 | afu->hrrq_start = &afu->rrq_entry[0]; | ||
1764 | afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; | ||
1765 | afu->hrrq_curr = afu->hrrq_start; | ||
1766 | afu->toggle = 1; | ||
1767 | |||
1768 | rc = init_global(cfg); | ||
1769 | |||
1770 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1771 | return rc; | ||
1772 | } | ||
1773 | |||
1774 | /** | ||
1775 | * init_mc() - create and register as the master context | ||
1776 | * @cxlflash: Internal structure associated with the host. | ||
1777 | * | ||
1778 | * Return: | ||
1779 | * 0 on success | ||
1780 | * -ENOMEM when unable to obtain a context from CXL services | ||
1781 | * A failure value from CXL services. | ||
1782 | */ | ||
1783 | static int init_mc(struct cxlflash_cfg *cfg) | ||
1784 | { | ||
1785 | struct cxl_context *ctx; | ||
1786 | struct device *dev = &cfg->dev->dev; | ||
1787 | struct afu *afu = cfg->afu; | ||
1788 | int rc = 0; | ||
1789 | enum undo_level level; | ||
1790 | |||
1791 | ctx = cxl_get_context(cfg->dev); | ||
1792 | if (unlikely(!ctx)) | ||
1793 | return -ENOMEM; | ||
1794 | cfg->mcctx = ctx; | ||
1795 | |||
1796 | /* Set it up as a master with the CXL */ | ||
1797 | cxl_set_master(ctx); | ||
1798 | |||
1799 | /* During initialization reset the AFU to start from a clean slate */ | ||
1800 | rc = cxl_afu_reset(cfg->mcctx); | ||
1801 | if (unlikely(rc)) { | ||
1802 | dev_err(dev, "%s: initial AFU reset failed rc=%d\n", | ||
1803 | __func__, rc); | ||
1804 | level = RELEASE_CONTEXT; | ||
1805 | goto out; | ||
1806 | } | ||
1807 | |||
1808 | rc = cxl_allocate_afu_irqs(ctx, 3); | ||
1809 | if (unlikely(rc)) { | ||
1810 | dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", | ||
1811 | __func__, rc); | ||
1812 | level = RELEASE_CONTEXT; | ||
1813 | goto out; | ||
1814 | } | ||
1815 | |||
1816 | rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, | ||
1817 | "SISL_MSI_SYNC_ERROR"); | ||
1818 | if (unlikely(rc <= 0)) { | ||
1819 | dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", | ||
1820 | __func__); | ||
1821 | level = FREE_IRQ; | ||
1822 | goto out; | ||
1823 | } | ||
1824 | |||
1825 | rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, | ||
1826 | "SISL_MSI_RRQ_UPDATED"); | ||
1827 | if (unlikely(rc <= 0)) { | ||
1828 | dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", | ||
1829 | __func__); | ||
1830 | level = UNMAP_ONE; | ||
1831 | goto out; | ||
1832 | } | ||
1833 | |||
1834 | rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, | ||
1835 | "SISL_MSI_ASYNC_ERROR"); | ||
1836 | if (unlikely(rc <= 0)) { | ||
1837 | dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", | ||
1838 | __func__); | ||
1839 | level = UNMAP_TWO; | ||
1840 | goto out; | ||
1841 | } | ||
1842 | |||
1843 | rc = 0; | ||
1844 | |||
1845 | /* This performs the equivalent of the CXL_IOCTL_START_WORK. | ||
1846 | * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process | ||
1847 | * element (pe) that is embedded in the context (ctx) | ||
1848 | */ | ||
1849 | rc = start_context(cfg); | ||
1850 | if (unlikely(rc)) { | ||
1851 | dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); | ||
1852 | level = UNMAP_THREE; | ||
1853 | goto out; | ||
1854 | } | ||
1855 | ret: | ||
1856 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1857 | return rc; | ||
1858 | out: | ||
1859 | term_mc(cfg, level); | ||
1860 | goto ret; | ||
1861 | } | ||
1862 | |||
1863 | /** | ||
1864 | * init_afu() - setup as master context and start AFU | ||
1865 | * @cxlflash: Internal structure associated with the host. | ||
1866 | * | ||
1867 | * This routine is a higher level of control for configuring the | ||
1868 | * AFU on probe and reset paths. | ||
1869 | * | ||
1870 | * Return: | ||
1871 | * 0 on success | ||
1872 | * -ENOMEM when unable to map the AFU MMIO space | ||
1873 | * A failure value from internal services. | ||
1874 | */ | ||
1875 | static int init_afu(struct cxlflash_cfg *cfg) | ||
1876 | { | ||
1877 | u64 reg; | ||
1878 | int rc = 0; | ||
1879 | struct afu *afu = cfg->afu; | ||
1880 | struct device *dev = &cfg->dev->dev; | ||
1881 | |||
1882 | rc = init_mc(cfg); | ||
1883 | if (rc) { | ||
1884 | dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", | ||
1885 | __func__, rc); | ||
1886 | goto err1; | ||
1887 | } | ||
1888 | |||
1889 | /* Map the entire MMIO space of the AFU. | ||
1890 | */ | ||
1891 | afu->afu_map = cxl_psa_map(cfg->mcctx); | ||
1892 | if (!afu->afu_map) { | ||
1893 | rc = -ENOMEM; | ||
1894 | term_mc(cfg, UNDO_START); | ||
1895 | dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); | ||
1896 | goto err1; | ||
1897 | } | ||
1898 | |||
1899 | /* don't byte reverse on reading afu_version, else the string form */ | ||
1900 | /* will be backwards */ | ||
1901 | reg = afu->afu_map->global.regs.afu_version; | ||
1902 | memcpy(afu->version, ®, 8); | ||
1903 | afu->interface_version = | ||
1904 | readq_be(&afu->afu_map->global.regs.interface_version); | ||
1905 | pr_debug("%s: afu version %s, interface version 0x%llX\n", | ||
1906 | __func__, afu->version, afu->interface_version); | ||
1907 | |||
1908 | rc = start_afu(cfg); | ||
1909 | if (rc) { | ||
1910 | dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", | ||
1911 | __func__, rc); | ||
1912 | term_mc(cfg, UNDO_START); | ||
1913 | cxl_psa_unmap((void *)afu->afu_map); | ||
1914 | afu->afu_map = NULL; | ||
1915 | goto err1; | ||
1916 | } | ||
1917 | |||
1918 | afu_err_intr_init(cfg->afu); | ||
1919 | atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); | ||
1920 | |||
1921 | err1: | ||
1922 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
1923 | return rc; | ||
1924 | } | ||
1925 | |||
1926 | /** | ||
1927 | * cxlflash_send_cmd() - sends an AFU command | ||
1928 | * @afu: AFU associated with the host. | ||
1929 | * @cmd: AFU command to send. | ||
1930 | * | ||
1931 | * Return: | ||
1932 | * 0 on success | ||
1933 | * -1 on failure | ||
1934 | */ | ||
1935 | int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd) | ||
1936 | { | ||
1937 | struct cxlflash_cfg *cfg = afu->parent; | ||
1938 | int nretry = 0; | ||
1939 | int rc = 0; | ||
1940 | u64 room; | ||
1941 | long newval; | ||
1942 | |||
1943 | /* | ||
1944 | * This routine is used by critical users such an AFU sync and to | ||
1945 | * send a task management function (TMF). Thus we want to retry a | ||
1946 | * bit before returning an error. To avoid the performance penalty | ||
1947 | * of MMIO, we spread the update of 'room' over multiple commands. | ||
1948 | */ | ||
1949 | retry: | ||
1950 | newval = atomic64_dec_if_positive(&afu->room); | ||
1951 | if (!newval) { | ||
1952 | do { | ||
1953 | room = readq_be(&afu->host_map->cmd_room); | ||
1954 | atomic64_set(&afu->room, room); | ||
1955 | if (room) | ||
1956 | goto write_ioarrin; | ||
1957 | udelay(nretry); | ||
1958 | } while (nretry++ < MC_ROOM_RETRY_CNT); | ||
1959 | |||
1960 | pr_err("%s: no cmd_room to send 0x%X\n", | ||
1961 | __func__, cmd->rcb.cdb[0]); | ||
1962 | |||
1963 | goto no_room; | ||
1964 | } else if (unlikely(newval < 0)) { | ||
1965 | /* This should be rare. i.e. Only if two threads race and | ||
1966 | * decrement before the MMIO read is done. In this case | ||
1967 | * just benefit from the other thread having updated | ||
1968 | * afu->room. | ||
1969 | */ | ||
1970 | if (nretry++ < MC_ROOM_RETRY_CNT) { | ||
1971 | udelay(nretry); | ||
1972 | goto retry; | ||
1973 | } | ||
1974 | |||
1975 | goto no_room; | ||
1976 | } | ||
1977 | |||
1978 | write_ioarrin: | ||
1979 | writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); | ||
1980 | out: | ||
1981 | pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, | ||
1982 | cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); | ||
1983 | return rc; | ||
1984 | |||
1985 | no_room: | ||
1986 | afu->read_room = true; | ||
1987 | schedule_work(&cfg->work_q); | ||
1988 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
1989 | goto out; | ||
1990 | } | ||
1991 | |||
1992 | /** | ||
1993 | * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command | ||
1994 | * @afu: AFU associated with the host. | ||
1995 | * @cmd: AFU command that was sent. | ||
1996 | */ | ||
1997 | void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd) | ||
1998 | { | ||
1999 | ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ); | ||
2000 | |||
2001 | timeout = wait_for_completion_timeout(&cmd->cevent, timeout); | ||
2002 | if (!timeout) | ||
2003 | cxlflash_context_reset(cmd); | ||
2004 | |||
2005 | if (unlikely(cmd->sa.ioasc != 0)) | ||
2006 | pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " | ||
2007 | "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], | ||
2008 | cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, | ||
2009 | cmd->sa.rc.fc_rc); | ||
2010 | } | ||
2011 | |||
2012 | /** | ||
2013 | * cxlflash_afu_sync() - builds and sends an AFU sync command | ||
2014 | * @afu: AFU associated with the host. | ||
2015 | * @ctx_hndl_u: Identifies context requesting sync. | ||
2016 | * @res_hndl_u: Identifies resource requesting sync. | ||
2017 | * @mode: Type of sync to issue (lightweight, heavyweight, global). | ||
2018 | * | ||
2019 | * The AFU can only take 1 sync command at a time. This routine enforces this | ||
2020 | * limitation by using a mutex to provide exlusive access to the AFU during | ||
2021 | * the sync. This design point requires calling threads to not be on interrupt | ||
2022 | * context due to the possibility of sleeping during concurrent sync operations. | ||
2023 | * | ||
2024 | * Return: | ||
2025 | * 0 on success | ||
2026 | * -1 on failure | ||
2027 | */ | ||
2028 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, | ||
2029 | res_hndl_t res_hndl_u, u8 mode) | ||
2030 | { | ||
2031 | struct afu_cmd *cmd = NULL; | ||
2032 | int rc = 0; | ||
2033 | int retry_cnt = 0; | ||
2034 | static DEFINE_MUTEX(sync_active); | ||
2035 | |||
2036 | mutex_lock(&sync_active); | ||
2037 | retry: | ||
2038 | cmd = cxlflash_cmd_checkout(afu); | ||
2039 | if (unlikely(!cmd)) { | ||
2040 | retry_cnt++; | ||
2041 | udelay(1000 * retry_cnt); | ||
2042 | if (retry_cnt < MC_RETRY_CNT) | ||
2043 | goto retry; | ||
2044 | pr_err("%s: could not get a free command\n", __func__); | ||
2045 | rc = -1; | ||
2046 | goto out; | ||
2047 | } | ||
2048 | |||
2049 | pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); | ||
2050 | |||
2051 | memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); | ||
2052 | |||
2053 | cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; | ||
2054 | cmd->rcb.port_sel = 0x0; /* NA */ | ||
2055 | cmd->rcb.lun_id = 0x0; /* NA */ | ||
2056 | cmd->rcb.data_len = 0x0; | ||
2057 | cmd->rcb.data_ea = 0x0; | ||
2058 | cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; | ||
2059 | |||
2060 | cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ | ||
2061 | cmd->rcb.cdb[1] = mode; | ||
2062 | |||
2063 | /* The cdb is aligned, no unaligned accessors required */ | ||
2064 | *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u); | ||
2065 | *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u); | ||
2066 | |||
2067 | rc = cxlflash_send_cmd(afu, cmd); | ||
2068 | if (unlikely(rc)) | ||
2069 | goto out; | ||
2070 | |||
2071 | cxlflash_wait_resp(afu, cmd); | ||
2072 | |||
2073 | /* set on timeout */ | ||
2074 | if (unlikely((cmd->sa.ioasc != 0) || | ||
2075 | (cmd->sa.host_use_b[0] & B_ERROR))) | ||
2076 | rc = -1; | ||
2077 | out: | ||
2078 | mutex_unlock(&sync_active); | ||
2079 | if (cmd) | ||
2080 | cxlflash_cmd_checkin(cmd); | ||
2081 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
2082 | return rc; | ||
2083 | } | ||
2084 | |||
2085 | /** | ||
2086 | * cxlflash_afu_reset() - resets the AFU | ||
2087 | * @cxlflash: Internal structure associated with the host. | ||
2088 | * | ||
2089 | * Return: | ||
2090 | * 0 on success | ||
2091 | * A failure value from internal services. | ||
2092 | */ | ||
2093 | int cxlflash_afu_reset(struct cxlflash_cfg *cfg) | ||
2094 | { | ||
2095 | int rc = 0; | ||
2096 | /* Stop the context before the reset. Since the context is | ||
2097 | * no longer available restart it after the reset is complete | ||
2098 | */ | ||
2099 | |||
2100 | term_afu(cfg); | ||
2101 | |||
2102 | rc = init_afu(cfg); | ||
2103 | |||
2104 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
2105 | return rc; | ||
2106 | } | ||
2107 | |||
2108 | /** | ||
2109 | * cxlflash_worker_thread() - work thread handler for the AFU | ||
2110 | * @work: Work structure contained within cxlflash associated with host. | ||
2111 | * | ||
2112 | * Handles the following events: | ||
2113 | * - Link reset which cannot be performed on interrupt context due to | ||
2114 | * blocking up to a few seconds | ||
2115 | * - Read AFU command room | ||
2116 | */ | ||
2117 | static void cxlflash_worker_thread(struct work_struct *work) | ||
2118 | { | ||
2119 | struct cxlflash_cfg *cfg = | ||
2120 | container_of(work, struct cxlflash_cfg, work_q); | ||
2121 | struct afu *afu = cfg->afu; | ||
2122 | int port; | ||
2123 | ulong lock_flags; | ||
2124 | |||
2125 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); | ||
2126 | |||
2127 | if (cfg->lr_state == LINK_RESET_REQUIRED) { | ||
2128 | port = cfg->lr_port; | ||
2129 | if (port < 0) | ||
2130 | pr_err("%s: invalid port index %d\n", __func__, port); | ||
2131 | else { | ||
2132 | spin_unlock_irqrestore(cfg->host->host_lock, | ||
2133 | lock_flags); | ||
2134 | |||
2135 | /* The reset can block... */ | ||
2136 | afu_link_reset(afu, port, | ||
2137 | &afu->afu_map-> | ||
2138 | global.fc_regs[port][0]); | ||
2139 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); | ||
2140 | } | ||
2141 | |||
2142 | cfg->lr_state = LINK_RESET_COMPLETE; | ||
2143 | } | ||
2144 | |||
2145 | if (afu->read_room) { | ||
2146 | atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); | ||
2147 | afu->read_room = false; | ||
2148 | } | ||
2149 | |||
2150 | spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); | ||
2151 | } | ||
2152 | |||
2153 | /** | ||
2154 | * cxlflash_probe() - PCI entry point to add host | ||
2155 | * @pdev: PCI device associated with the host. | ||
2156 | * @dev_id: PCI device id associated with device. | ||
2157 | * | ||
2158 | * Return: 0 on success / non-zero on failure | ||
2159 | */ | ||
2160 | static int cxlflash_probe(struct pci_dev *pdev, | ||
2161 | const struct pci_device_id *dev_id) | ||
2162 | { | ||
2163 | struct Scsi_Host *host; | ||
2164 | struct cxlflash_cfg *cfg = NULL; | ||
2165 | struct device *phys_dev; | ||
2166 | struct dev_dependent_vals *ddv; | ||
2167 | int rc = 0; | ||
2168 | |||
2169 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", | ||
2170 | __func__, pdev->irq); | ||
2171 | |||
2172 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; | ||
2173 | driver_template.max_sectors = ddv->max_sectors; | ||
2174 | |||
2175 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); | ||
2176 | if (!host) { | ||
2177 | dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", | ||
2178 | __func__); | ||
2179 | rc = -ENOMEM; | ||
2180 | goto out; | ||
2181 | } | ||
2182 | |||
2183 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; | ||
2184 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; | ||
2185 | host->max_channel = NUM_FC_PORTS - 1; | ||
2186 | host->unique_id = host->host_no; | ||
2187 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | ||
2188 | |||
2189 | cfg = (struct cxlflash_cfg *)host->hostdata; | ||
2190 | cfg->host = host; | ||
2191 | rc = alloc_mem(cfg); | ||
2192 | if (rc) { | ||
2193 | dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", | ||
2194 | __func__); | ||
2195 | rc = -ENOMEM; | ||
2196 | goto out; | ||
2197 | } | ||
2198 | |||
2199 | cfg->init_state = INIT_STATE_NONE; | ||
2200 | cfg->dev = pdev; | ||
2201 | cfg->dev_id = (struct pci_device_id *)dev_id; | ||
2202 | cfg->mcctx = NULL; | ||
2203 | cfg->err_recovery_active = 0; | ||
2204 | |||
2205 | init_waitqueue_head(&cfg->tmf_waitq); | ||
2206 | init_waitqueue_head(&cfg->eeh_waitq); | ||
2207 | |||
2208 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); | ||
2209 | cfg->lr_state = LINK_RESET_INVALID; | ||
2210 | cfg->lr_port = -1; | ||
2211 | |||
2212 | pci_set_drvdata(pdev, cfg); | ||
2213 | |||
2214 | /* Use the special service provided to look up the physical | ||
2215 | * PCI device, since we are called on the probe of the virtual | ||
2216 | * PCI host bus (vphb) | ||
2217 | */ | ||
2218 | phys_dev = cxl_get_phys_dev(pdev); | ||
2219 | if (!dev_is_pci(phys_dev)) { | ||
2220 | pr_err("%s: not a pci dev\n", __func__); | ||
2221 | rc = -ENODEV; | ||
2222 | goto out_remove; | ||
2223 | } | ||
2224 | cfg->parent_dev = to_pci_dev(phys_dev); | ||
2225 | |||
2226 | cfg->cxl_afu = cxl_pci_to_afu(pdev); | ||
2227 | |||
2228 | rc = init_pci(cfg); | ||
2229 | if (rc) { | ||
2230 | dev_err(&pdev->dev, "%s: call to init_pci " | ||
2231 | "failed rc=%d!\n", __func__, rc); | ||
2232 | goto out_remove; | ||
2233 | } | ||
2234 | cfg->init_state = INIT_STATE_PCI; | ||
2235 | |||
2236 | rc = init_afu(cfg); | ||
2237 | if (rc) { | ||
2238 | dev_err(&pdev->dev, "%s: call to init_afu " | ||
2239 | "failed rc=%d!\n", __func__, rc); | ||
2240 | goto out_remove; | ||
2241 | } | ||
2242 | cfg->init_state = INIT_STATE_AFU; | ||
2243 | |||
2244 | |||
2245 | rc = init_scsi(cfg); | ||
2246 | if (rc) { | ||
2247 | dev_err(&pdev->dev, "%s: call to init_scsi " | ||
2248 | "failed rc=%d!\n", __func__, rc); | ||
2249 | goto out_remove; | ||
2250 | } | ||
2251 | cfg->init_state = INIT_STATE_SCSI; | ||
2252 | |||
2253 | out: | ||
2254 | pr_debug("%s: returning rc=%d\n", __func__, rc); | ||
2255 | return rc; | ||
2256 | |||
2257 | out_remove: | ||
2258 | cxlflash_remove(pdev); | ||
2259 | goto out; | ||
2260 | } | ||
2261 | |||
2262 | /* | ||
2263 | * PCI device structure | ||
2264 | */ | ||
2265 | static struct pci_driver cxlflash_driver = { | ||
2266 | .name = CXLFLASH_NAME, | ||
2267 | .id_table = cxlflash_pci_table, | ||
2268 | .probe = cxlflash_probe, | ||
2269 | .remove = cxlflash_remove, | ||
2270 | }; | ||
2271 | |||
2272 | /** | ||
2273 | * init_cxlflash() - module entry point | ||
2274 | * | ||
2275 | * Return: 0 on success / non-zero on failure | ||
2276 | */ | ||
2277 | static int __init init_cxlflash(void) | ||
2278 | { | ||
2279 | pr_info("%s: IBM Power CXL Flash Adapter: %s\n", | ||
2280 | __func__, CXLFLASH_DRIVER_DATE); | ||
2281 | |||
2282 | return pci_register_driver(&cxlflash_driver); | ||
2283 | } | ||
2284 | |||
2285 | /** | ||
2286 | * exit_cxlflash() - module exit point | ||
2287 | */ | ||
2288 | static void __exit exit_cxlflash(void) | ||
2289 | { | ||
2290 | pci_unregister_driver(&cxlflash_driver); | ||
2291 | } | ||
2292 | |||
2293 | module_init(init_cxlflash); | ||
2294 | module_exit(exit_cxlflash); | ||