diff options
Diffstat (limited to 'drivers/scsi')
57 files changed, 4247 insertions, 938 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a06e608789e3..29684c8142b0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -619,6 +619,7 @@ config SCSI_ARCMSR | |||
619 | 619 | ||
620 | source "drivers/scsi/megaraid/Kconfig.megaraid" | 620 | source "drivers/scsi/megaraid/Kconfig.megaraid" |
621 | source "drivers/scsi/mpt2sas/Kconfig" | 621 | source "drivers/scsi/mpt2sas/Kconfig" |
622 | source "drivers/scsi/ufs/Kconfig" | ||
622 | 623 | ||
623 | config SCSI_HPTIOP | 624 | config SCSI_HPTIOP |
624 | tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" | 625 | tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index ad24e065b1e5..8deedeaf5608 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o | |||
108 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ | 108 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ |
109 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ | 109 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ |
110 | obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/ | 110 | obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/ |
111 | obj-$(CONFIG_SCSI_UFSHCD) += ufs/ | ||
111 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o | 112 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o |
112 | obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o | 113 | obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o |
113 | obj-$(CONFIG_SCSI_GDTH) += gdth.o | 114 | obj-$(CONFIG_SCSI_GDTH) += gdth.o |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index f29d5121d5ed..68ce08552f69 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -2582,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2582 | * this than via the PCI device table | 2582 | * this than via the PCI device table |
2583 | */ | 2583 | */ |
2584 | if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { | 2584 | if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { |
2585 | error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); | 2585 | atpdev->chip_ver = pdev->revision; |
2586 | if (atpdev->chip_ver < 2) | 2586 | if (atpdev->chip_ver < 2) |
2587 | goto err_eio; | 2587 | goto err_eio; |
2588 | } | 2588 | } |
@@ -2601,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2601 | base_io &= 0xfffffff8; | 2601 | base_io &= 0xfffffff8; |
2602 | 2602 | ||
2603 | if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { | 2603 | if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { |
2604 | error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); | 2604 | atpdev->chip_ver = pdev->revision; |
2605 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 | 2605 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 |
2606 | 2606 | ||
2607 | host_id = inb(base_io + 0x39); | 2607 | host_id = inb(base_io + 0x39); |
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index a796de935054..4ad7e368bbc2 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h | |||
@@ -225,9 +225,9 @@ struct bfa_faa_args_s { | |||
225 | }; | 225 | }; |
226 | 226 | ||
227 | struct bfa_iocfc_s { | 227 | struct bfa_iocfc_s { |
228 | bfa_fsm_t fsm; | ||
228 | struct bfa_s *bfa; | 229 | struct bfa_s *bfa; |
229 | struct bfa_iocfc_cfg_s cfg; | 230 | struct bfa_iocfc_cfg_s cfg; |
230 | int action; | ||
231 | u32 req_cq_pi[BFI_IOC_MAX_CQS]; | 231 | u32 req_cq_pi[BFI_IOC_MAX_CQS]; |
232 | u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; | 232 | u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; |
233 | u8 hw_qid[BFI_IOC_MAX_CQS]; | 233 | u8 hw_qid[BFI_IOC_MAX_CQS]; |
@@ -236,7 +236,9 @@ struct bfa_iocfc_s { | |||
236 | struct bfa_cb_qe_s dis_hcb_qe; | 236 | struct bfa_cb_qe_s dis_hcb_qe; |
237 | struct bfa_cb_qe_s en_hcb_qe; | 237 | struct bfa_cb_qe_s en_hcb_qe; |
238 | struct bfa_cb_qe_s stats_hcb_qe; | 238 | struct bfa_cb_qe_s stats_hcb_qe; |
239 | bfa_boolean_t cfgdone; | 239 | bfa_boolean_t submod_enabled; |
240 | bfa_boolean_t cb_reqd; /* Driver call back reqd */ | ||
241 | bfa_status_t op_status; /* Status of bfa iocfc op */ | ||
240 | 242 | ||
241 | struct bfa_dma_s cfg_info; | 243 | struct bfa_dma_s cfg_info; |
242 | struct bfi_iocfc_cfg_s *cfginfo; | 244 | struct bfi_iocfc_cfg_s *cfginfo; |
@@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, | |||
341 | void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, | 343 | void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, |
342 | u32 *end); | 344 | u32 *end); |
343 | void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); | 345 | void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); |
344 | wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); | ||
345 | wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); | ||
346 | int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, | 346 | int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, |
347 | struct bfi_pbc_vport_s *pbc_vport); | 347 | struct bfi_pbc_vport_s *pbc_vport); |
348 | 348 | ||
@@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, | |||
428 | 428 | ||
429 | void bfa_iocfc_enable(struct bfa_s *bfa); | 429 | void bfa_iocfc_enable(struct bfa_s *bfa); |
430 | void bfa_iocfc_disable(struct bfa_s *bfa); | 430 | void bfa_iocfc_disable(struct bfa_s *bfa); |
431 | void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status); | ||
432 | #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ | 431 | #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ |
433 | bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) | 432 | bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) |
434 | 433 | ||
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 4bd546bcc240..456e5762977d 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
@@ -200,13 +200,431 @@ enum { | |||
200 | #define DEF_CFG_NUM_SBOOT_LUNS 16 | 200 | #define DEF_CFG_NUM_SBOOT_LUNS 16 |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * IOCFC state machine definitions/declarations | ||
204 | */ | ||
205 | bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event); | ||
206 | bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event); | ||
207 | bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event); | ||
208 | bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait, | ||
209 | struct bfa_iocfc_s, enum iocfc_event); | ||
210 | bfa_fsm_state_decl(bfa_iocfc, init_cfg_done, | ||
211 | struct bfa_iocfc_s, enum iocfc_event); | ||
212 | bfa_fsm_state_decl(bfa_iocfc, operational, | ||
213 | struct bfa_iocfc_s, enum iocfc_event); | ||
214 | bfa_fsm_state_decl(bfa_iocfc, dconf_write, | ||
215 | struct bfa_iocfc_s, enum iocfc_event); | ||
216 | bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event); | ||
217 | bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event); | ||
218 | bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event); | ||
219 | bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event); | ||
220 | bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event); | ||
221 | bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event); | ||
222 | bfa_fsm_state_decl(bfa_iocfc, init_failed, | ||
223 | struct bfa_iocfc_s, enum iocfc_event); | ||
224 | |||
225 | /* | ||
203 | * forward declaration for IOC FC functions | 226 | * forward declaration for IOC FC functions |
204 | */ | 227 | */ |
228 | static void bfa_iocfc_start_submod(struct bfa_s *bfa); | ||
229 | static void bfa_iocfc_disable_submod(struct bfa_s *bfa); | ||
230 | static void bfa_iocfc_send_cfg(void *bfa_arg); | ||
205 | static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); | 231 | static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); |
206 | static void bfa_iocfc_disable_cbfn(void *bfa_arg); | 232 | static void bfa_iocfc_disable_cbfn(void *bfa_arg); |
207 | static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); | 233 | static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); |
208 | static void bfa_iocfc_reset_cbfn(void *bfa_arg); | 234 | static void bfa_iocfc_reset_cbfn(void *bfa_arg); |
209 | static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; | 235 | static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; |
236 | static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete); | ||
237 | static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl); | ||
238 | static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl); | ||
239 | static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl); | ||
240 | |||
241 | static void | ||
242 | bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc) | ||
243 | { | ||
244 | } | ||
245 | |||
246 | static void | ||
247 | bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
248 | { | ||
249 | bfa_trc(iocfc->bfa, event); | ||
250 | |||
251 | switch (event) { | ||
252 | case IOCFC_E_INIT: | ||
253 | case IOCFC_E_ENABLE: | ||
254 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing); | ||
255 | break; | ||
256 | default: | ||
257 | bfa_sm_fault(iocfc->bfa, event); | ||
258 | break; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static void | ||
263 | bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc) | ||
264 | { | ||
265 | bfa_ioc_enable(&iocfc->bfa->ioc); | ||
266 | } | ||
267 | |||
268 | static void | ||
269 | bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
270 | { | ||
271 | bfa_trc(iocfc->bfa, event); | ||
272 | |||
273 | switch (event) { | ||
274 | case IOCFC_E_IOC_ENABLED: | ||
275 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); | ||
276 | break; | ||
277 | case IOCFC_E_IOC_FAILED: | ||
278 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | ||
279 | break; | ||
280 | default: | ||
281 | bfa_sm_fault(iocfc->bfa, event); | ||
282 | break; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | static void | ||
287 | bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc) | ||
288 | { | ||
289 | bfa_dconf_modinit(iocfc->bfa); | ||
290 | } | ||
291 | |||
292 | static void | ||
293 | bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
294 | { | ||
295 | bfa_trc(iocfc->bfa, event); | ||
296 | |||
297 | switch (event) { | ||
298 | case IOCFC_E_DCONF_DONE: | ||
299 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); | ||
300 | break; | ||
301 | case IOCFC_E_IOC_FAILED: | ||
302 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | ||
303 | break; | ||
304 | default: | ||
305 | bfa_sm_fault(iocfc->bfa, event); | ||
306 | break; | ||
307 | } | ||
308 | } | ||
309 | |||
310 | static void | ||
311 | bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc) | ||
312 | { | ||
313 | bfa_iocfc_send_cfg(iocfc->bfa); | ||
314 | } | ||
315 | |||
316 | static void | ||
317 | bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
318 | { | ||
319 | bfa_trc(iocfc->bfa, event); | ||
320 | |||
321 | switch (event) { | ||
322 | case IOCFC_E_CFG_DONE: | ||
323 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); | ||
324 | break; | ||
325 | case IOCFC_E_IOC_FAILED: | ||
326 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); | ||
327 | break; | ||
328 | default: | ||
329 | bfa_sm_fault(iocfc->bfa, event); | ||
330 | break; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | static void | ||
335 | bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc) | ||
336 | { | ||
337 | iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; | ||
338 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, | ||
339 | bfa_iocfc_init_cb, iocfc->bfa); | ||
340 | } | ||
341 | |||
342 | static void | ||
343 | bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
344 | { | ||
345 | bfa_trc(iocfc->bfa, event); | ||
346 | |||
347 | switch (event) { | ||
348 | case IOCFC_E_START: | ||
349 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); | ||
350 | break; | ||
351 | case IOCFC_E_STOP: | ||
352 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
353 | break; | ||
354 | case IOCFC_E_DISABLE: | ||
355 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
356 | break; | ||
357 | case IOCFC_E_IOC_FAILED: | ||
358 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | ||
359 | break; | ||
360 | default: | ||
361 | bfa_sm_fault(iocfc->bfa, event); | ||
362 | break; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | static void | ||
367 | bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc) | ||
368 | { | ||
369 | bfa_fcport_init(iocfc->bfa); | ||
370 | bfa_iocfc_start_submod(iocfc->bfa); | ||
371 | } | ||
372 | |||
373 | static void | ||
374 | bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
375 | { | ||
376 | bfa_trc(iocfc->bfa, event); | ||
377 | |||
378 | switch (event) { | ||
379 | case IOCFC_E_STOP: | ||
380 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); | ||
381 | break; | ||
382 | case IOCFC_E_DISABLE: | ||
383 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
384 | break; | ||
385 | case IOCFC_E_IOC_FAILED: | ||
386 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | ||
387 | break; | ||
388 | default: | ||
389 | bfa_sm_fault(iocfc->bfa, event); | ||
390 | break; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static void | ||
395 | bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc) | ||
396 | { | ||
397 | bfa_dconf_modexit(iocfc->bfa); | ||
398 | } | ||
399 | |||
400 | static void | ||
401 | bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
402 | { | ||
403 | bfa_trc(iocfc->bfa, event); | ||
404 | |||
405 | switch (event) { | ||
406 | case IOCFC_E_DCONF_DONE: | ||
407 | case IOCFC_E_IOC_FAILED: | ||
408 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
409 | break; | ||
410 | default: | ||
411 | bfa_sm_fault(iocfc->bfa, event); | ||
412 | break; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | static void | ||
417 | bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc) | ||
418 | { | ||
419 | bfa_ioc_disable(&iocfc->bfa->ioc); | ||
420 | } | ||
421 | |||
422 | static void | ||
423 | bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
424 | { | ||
425 | bfa_trc(iocfc->bfa, event); | ||
426 | |||
427 | switch (event) { | ||
428 | case IOCFC_E_IOC_DISABLED: | ||
429 | bfa_isr_disable(iocfc->bfa); | ||
430 | bfa_iocfc_disable_submod(iocfc->bfa); | ||
431 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); | ||
432 | iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; | ||
433 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, | ||
434 | bfa_iocfc_stop_cb, iocfc->bfa); | ||
435 | break; | ||
436 | default: | ||
437 | bfa_sm_fault(iocfc->bfa, event); | ||
438 | break; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc) | ||
444 | { | ||
445 | bfa_ioc_enable(&iocfc->bfa->ioc); | ||
446 | } | ||
447 | |||
448 | static void | ||
449 | bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
450 | { | ||
451 | bfa_trc(iocfc->bfa, event); | ||
452 | |||
453 | switch (event) { | ||
454 | case IOCFC_E_IOC_ENABLED: | ||
455 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); | ||
456 | break; | ||
457 | case IOCFC_E_IOC_FAILED: | ||
458 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | ||
459 | |||
460 | if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) | ||
461 | break; | ||
462 | |||
463 | iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; | ||
464 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, | ||
465 | bfa_iocfc_enable_cb, iocfc->bfa); | ||
466 | iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; | ||
467 | break; | ||
468 | default: | ||
469 | bfa_sm_fault(iocfc->bfa, event); | ||
470 | break; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | static void | ||
475 | bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc) | ||
476 | { | ||
477 | bfa_iocfc_send_cfg(iocfc->bfa); | ||
478 | } | ||
479 | |||
480 | static void | ||
481 | bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
482 | { | ||
483 | bfa_trc(iocfc->bfa, event); | ||
484 | |||
485 | switch (event) { | ||
486 | case IOCFC_E_CFG_DONE: | ||
487 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational); | ||
488 | if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) | ||
489 | break; | ||
490 | |||
491 | iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; | ||
492 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, | ||
493 | bfa_iocfc_enable_cb, iocfc->bfa); | ||
494 | iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; | ||
495 | break; | ||
496 | case IOCFC_E_IOC_FAILED: | ||
497 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); | ||
498 | if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) | ||
499 | break; | ||
500 | |||
501 | iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; | ||
502 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe, | ||
503 | bfa_iocfc_enable_cb, iocfc->bfa); | ||
504 | iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; | ||
505 | break; | ||
506 | default: | ||
507 | bfa_sm_fault(iocfc->bfa, event); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc) | ||
514 | { | ||
515 | bfa_ioc_disable(&iocfc->bfa->ioc); | ||
516 | } | ||
517 | |||
518 | static void | ||
519 | bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
520 | { | ||
521 | bfa_trc(iocfc->bfa, event); | ||
522 | |||
523 | switch (event) { | ||
524 | case IOCFC_E_IOC_DISABLED: | ||
525 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); | ||
526 | break; | ||
527 | default: | ||
528 | bfa_sm_fault(iocfc->bfa, event); | ||
529 | break; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | static void | ||
534 | bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc) | ||
535 | { | ||
536 | bfa_isr_disable(iocfc->bfa); | ||
537 | bfa_iocfc_disable_submod(iocfc->bfa); | ||
538 | iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; | ||
539 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, | ||
540 | bfa_iocfc_disable_cb, iocfc->bfa); | ||
541 | } | ||
542 | |||
543 | static void | ||
544 | bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
545 | { | ||
546 | bfa_trc(iocfc->bfa, event); | ||
547 | |||
548 | switch (event) { | ||
549 | case IOCFC_E_STOP: | ||
550 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); | ||
551 | break; | ||
552 | case IOCFC_E_ENABLE: | ||
553 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling); | ||
554 | break; | ||
555 | default: | ||
556 | bfa_sm_fault(iocfc->bfa, event); | ||
557 | break; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | static void | ||
562 | bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc) | ||
563 | { | ||
564 | bfa_isr_disable(iocfc->bfa); | ||
565 | bfa_iocfc_disable_submod(iocfc->bfa); | ||
566 | } | ||
567 | |||
568 | static void | ||
569 | bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
570 | { | ||
571 | bfa_trc(iocfc->bfa, event); | ||
572 | |||
573 | switch (event) { | ||
574 | case IOCFC_E_STOP: | ||
575 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write); | ||
576 | break; | ||
577 | case IOCFC_E_DISABLE: | ||
578 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling); | ||
579 | break; | ||
580 | case IOCFC_E_IOC_ENABLED: | ||
581 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); | ||
582 | break; | ||
583 | case IOCFC_E_IOC_FAILED: | ||
584 | break; | ||
585 | default: | ||
586 | bfa_sm_fault(iocfc->bfa, event); | ||
587 | break; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | static void | ||
592 | bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc) | ||
593 | { | ||
594 | bfa_isr_disable(iocfc->bfa); | ||
595 | iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED; | ||
596 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe, | ||
597 | bfa_iocfc_init_cb, iocfc->bfa); | ||
598 | } | ||
599 | |||
600 | static void | ||
601 | bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event) | ||
602 | { | ||
603 | bfa_trc(iocfc->bfa, event); | ||
604 | |||
605 | switch (event) { | ||
606 | case IOCFC_E_STOP: | ||
607 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping); | ||
608 | break; | ||
609 | case IOCFC_E_DISABLE: | ||
610 | bfa_ioc_disable(&iocfc->bfa->ioc); | ||
611 | break; | ||
612 | case IOCFC_E_IOC_ENABLED: | ||
613 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); | ||
614 | break; | ||
615 | case IOCFC_E_IOC_DISABLED: | ||
616 | bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped); | ||
617 | iocfc->bfa->iocfc.op_status = BFA_STATUS_OK; | ||
618 | bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe, | ||
619 | bfa_iocfc_disable_cb, iocfc->bfa); | ||
620 | break; | ||
621 | case IOCFC_E_IOC_FAILED: | ||
622 | break; | ||
623 | default: | ||
624 | bfa_sm_fault(iocfc->bfa, event); | ||
625 | break; | ||
626 | } | ||
627 | } | ||
210 | 628 | ||
211 | /* | 629 | /* |
212 | * BFA Interrupt handling functions | 630 | * BFA Interrupt handling functions |
@@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid) | |||
231 | } | 649 | } |
232 | } | 650 | } |
233 | 651 | ||
234 | static inline void | 652 | bfa_boolean_t |
235 | bfa_isr_rspq(struct bfa_s *bfa, int qid) | 653 | bfa_isr_rspq(struct bfa_s *bfa, int qid) |
236 | { | 654 | { |
237 | struct bfi_msg_s *m; | 655 | struct bfi_msg_s *m; |
238 | u32 pi, ci; | 656 | u32 pi, ci; |
239 | struct list_head *waitq; | 657 | struct list_head *waitq; |
658 | bfa_boolean_t ret; | ||
240 | 659 | ||
241 | ci = bfa_rspq_ci(bfa, qid); | 660 | ci = bfa_rspq_ci(bfa, qid); |
242 | pi = bfa_rspq_pi(bfa, qid); | 661 | pi = bfa_rspq_pi(bfa, qid); |
243 | 662 | ||
663 | ret = (ci != pi); | ||
664 | |||
244 | while (ci != pi) { | 665 | while (ci != pi) { |
245 | m = bfa_rspq_elem(bfa, qid, ci); | 666 | m = bfa_rspq_elem(bfa, qid, ci); |
246 | WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); | 667 | WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); |
@@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid) | |||
260 | waitq = bfa_reqq(bfa, qid); | 681 | waitq = bfa_reqq(bfa, qid); |
261 | if (!list_empty(waitq)) | 682 | if (!list_empty(waitq)) |
262 | bfa_reqq_resume(bfa, qid); | 683 | bfa_reqq_resume(bfa, qid); |
684 | |||
685 | return ret; | ||
263 | } | 686 | } |
264 | 687 | ||
265 | static inline void | 688 | static inline void |
@@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa) | |||
320 | { | 743 | { |
321 | u32 intr, qintr; | 744 | u32 intr, qintr; |
322 | int queue; | 745 | int queue; |
746 | bfa_boolean_t rspq_comp = BFA_FALSE; | ||
323 | 747 | ||
324 | intr = readl(bfa->iocfc.bfa_regs.intr_status); | 748 | intr = readl(bfa->iocfc.bfa_regs.intr_status); |
325 | 749 | ||
@@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa) | |||
332 | */ | 756 | */ |
333 | if (bfa->queue_process) { | 757 | if (bfa->queue_process) { |
334 | for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) | 758 | for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) |
335 | bfa_isr_rspq(bfa, queue); | 759 | if (bfa_isr_rspq(bfa, queue)) |
760 | rspq_comp = BFA_TRUE; | ||
336 | } | 761 | } |
337 | 762 | ||
338 | if (!intr) | 763 | if (!intr) |
339 | return BFA_TRUE; | 764 | return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE; |
340 | 765 | ||
341 | /* | 766 | /* |
342 | * CPE completion queue interrupt | 767 | * CPE completion queue interrupt |
@@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg) | |||
525 | * Enable interrupt coalescing if it is driver init path | 950 | * Enable interrupt coalescing if it is driver init path |
526 | * and not ioc disable/enable path. | 951 | * and not ioc disable/enable path. |
527 | */ | 952 | */ |
528 | if (!iocfc->cfgdone) | 953 | if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait)) |
529 | cfg_info->intr_attr.coalesce = BFA_TRUE; | 954 | cfg_info->intr_attr.coalesce = BFA_TRUE; |
530 | 955 | ||
531 | iocfc->cfgdone = BFA_FALSE; | ||
532 | |||
533 | /* | 956 | /* |
534 | * dma map IOC configuration itself | 957 | * dma map IOC configuration itself |
535 | */ | 958 | */ |
@@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
549 | 972 | ||
550 | bfa->bfad = bfad; | 973 | bfa->bfad = bfad; |
551 | iocfc->bfa = bfa; | 974 | iocfc->bfa = bfa; |
552 | iocfc->action = BFA_IOCFC_ACT_NONE; | ||
553 | |||
554 | iocfc->cfg = *cfg; | 975 | iocfc->cfg = *cfg; |
555 | 976 | ||
556 | /* | 977 | /* |
@@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) | |||
683 | 1104 | ||
684 | for (i = 0; hal_mods[i]; i++) | 1105 | for (i = 0; hal_mods[i]; i++) |
685 | hal_mods[i]->start(bfa); | 1106 | hal_mods[i]->start(bfa); |
1107 | |||
1108 | bfa->iocfc.submod_enabled = BFA_TRUE; | ||
686 | } | 1109 | } |
687 | 1110 | ||
688 | /* | 1111 | /* |
@@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa) | |||
693 | { | 1116 | { |
694 | int i; | 1117 | int i; |
695 | 1118 | ||
1119 | if (bfa->iocfc.submod_enabled == BFA_FALSE) | ||
1120 | return; | ||
1121 | |||
696 | for (i = 0; hal_mods[i]; i++) | 1122 | for (i = 0; hal_mods[i]; i++) |
697 | hal_mods[i]->iocdisable(bfa); | 1123 | hal_mods[i]->iocdisable(bfa); |
1124 | |||
1125 | bfa->iocfc.submod_enabled = BFA_FALSE; | ||
698 | } | 1126 | } |
699 | 1127 | ||
700 | static void | 1128 | static void |
@@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) | |||
702 | { | 1130 | { |
703 | struct bfa_s *bfa = bfa_arg; | 1131 | struct bfa_s *bfa = bfa_arg; |
704 | 1132 | ||
705 | if (complete) { | 1133 | if (complete) |
706 | if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone) | 1134 | bfa_cb_init(bfa->bfad, bfa->iocfc.op_status); |
707 | bfa_cb_init(bfa->bfad, BFA_STATUS_OK); | ||
708 | else | ||
709 | bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); | ||
710 | } else { | ||
711 | if (bfa->iocfc.cfgdone) | ||
712 | bfa->iocfc.action = BFA_IOCFC_ACT_NONE; | ||
713 | } | ||
714 | } | 1135 | } |
715 | 1136 | ||
716 | static void | 1137 | static void |
@@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) | |||
721 | 1142 | ||
722 | if (compl) | 1143 | if (compl) |
723 | complete(&bfad->comp); | 1144 | complete(&bfad->comp); |
724 | else | ||
725 | bfa->iocfc.action = BFA_IOCFC_ACT_NONE; | ||
726 | } | 1145 | } |
727 | 1146 | ||
728 | static void | 1147 | static void |
@@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) | |||
794 | fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); | 1213 | fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); |
795 | fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); | 1214 | fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); |
796 | 1215 | ||
797 | iocfc->cfgdone = BFA_TRUE; | ||
798 | |||
799 | /* | 1216 | /* |
800 | * configure queue register offsets as learnt from firmware | 1217 | * configure queue register offsets as learnt from firmware |
801 | */ | 1218 | */ |
@@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) | |||
811 | */ | 1228 | */ |
812 | bfa_msix_queue_install(bfa); | 1229 | bfa_msix_queue_install(bfa); |
813 | 1230 | ||
814 | /* | 1231 | if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) { |
815 | * Configuration is complete - initialize/start submodules | 1232 | bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn; |
816 | */ | 1233 | bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn; |
817 | bfa_fcport_init(bfa); | 1234 | bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); |
818 | |||
819 | if (iocfc->action == BFA_IOCFC_ACT_INIT) { | ||
820 | if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) | ||
821 | bfa_cb_queue(bfa, &iocfc->init_hcb_qe, | ||
822 | bfa_iocfc_init_cb, bfa); | ||
823 | } else { | ||
824 | if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) | ||
825 | bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, | ||
826 | bfa_iocfc_enable_cb, bfa); | ||
827 | bfa_iocfc_start_submod(bfa); | ||
828 | } | 1235 | } |
829 | } | 1236 | } |
1237 | |||
830 | void | 1238 | void |
831 | bfa_iocfc_reset_queues(struct bfa_s *bfa) | 1239 | bfa_iocfc_reset_queues(struct bfa_s *bfa) |
832 | { | 1240 | { |
@@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa) | |||
840 | } | 1248 | } |
841 | } | 1249 | } |
842 | 1250 | ||
1251 | /* | ||
1252 | * Process FAA pwwn msg from fw. | ||
1253 | */ | ||
1254 | static void | ||
1255 | bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg) | ||
1256 | { | ||
1257 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
1258 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
1259 | |||
1260 | cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn; | ||
1261 | cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn; | ||
1262 | |||
1263 | bfa->ioc.attr->pwwn = msg->pwwn; | ||
1264 | bfa->ioc.attr->nwwn = msg->nwwn; | ||
1265 | bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE); | ||
1266 | } | ||
1267 | |||
843 | /* Fabric Assigned Address specific functions */ | 1268 | /* Fabric Assigned Address specific functions */ |
844 | 1269 | ||
845 | /* | 1270 | /* |
@@ -855,84 +1280,13 @@ bfa_faa_validate_request(struct bfa_s *bfa) | |||
855 | if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) | 1280 | if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) |
856 | return BFA_STATUS_FEATURE_NOT_SUPPORTED; | 1281 | return BFA_STATUS_FEATURE_NOT_SUPPORTED; |
857 | } else { | 1282 | } else { |
858 | if (!bfa_ioc_is_acq_addr(&bfa->ioc)) | 1283 | return BFA_STATUS_IOC_NON_OP; |
859 | return BFA_STATUS_IOC_NON_OP; | ||
860 | } | 1284 | } |
861 | 1285 | ||
862 | return BFA_STATUS_OK; | 1286 | return BFA_STATUS_OK; |
863 | } | 1287 | } |
864 | 1288 | ||
865 | bfa_status_t | 1289 | bfa_status_t |
866 | bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg) | ||
867 | { | ||
868 | struct bfi_faa_en_dis_s faa_enable_req; | ||
869 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
870 | bfa_status_t status; | ||
871 | |||
872 | iocfc->faa_args.faa_cb.faa_cbfn = cbfn; | ||
873 | iocfc->faa_args.faa_cb.faa_cbarg = cbarg; | ||
874 | |||
875 | status = bfa_faa_validate_request(bfa); | ||
876 | if (status != BFA_STATUS_OK) | ||
877 | return status; | ||
878 | |||
879 | if (iocfc->faa_args.busy == BFA_TRUE) | ||
880 | return BFA_STATUS_DEVBUSY; | ||
881 | |||
882 | if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED) | ||
883 | return BFA_STATUS_FAA_ENABLED; | ||
884 | |||
885 | if (bfa_fcport_is_trunk_enabled(bfa)) | ||
886 | return BFA_STATUS_ERROR_TRUNK_ENABLED; | ||
887 | |||
888 | bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED); | ||
889 | iocfc->faa_args.busy = BFA_TRUE; | ||
890 | |||
891 | memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s)); | ||
892 | bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC, | ||
893 | BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa)); | ||
894 | |||
895 | bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req, | ||
896 | sizeof(struct bfi_faa_en_dis_s)); | ||
897 | |||
898 | return BFA_STATUS_OK; | ||
899 | } | ||
900 | |||
901 | bfa_status_t | ||
902 | bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, | ||
903 | void *cbarg) | ||
904 | { | ||
905 | struct bfi_faa_en_dis_s faa_disable_req; | ||
906 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
907 | bfa_status_t status; | ||
908 | |||
909 | iocfc->faa_args.faa_cb.faa_cbfn = cbfn; | ||
910 | iocfc->faa_args.faa_cb.faa_cbarg = cbarg; | ||
911 | |||
912 | status = bfa_faa_validate_request(bfa); | ||
913 | if (status != BFA_STATUS_OK) | ||
914 | return status; | ||
915 | |||
916 | if (iocfc->faa_args.busy == BFA_TRUE) | ||
917 | return BFA_STATUS_DEVBUSY; | ||
918 | |||
919 | if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED) | ||
920 | return BFA_STATUS_FAA_DISABLED; | ||
921 | |||
922 | bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED); | ||
923 | iocfc->faa_args.busy = BFA_TRUE; | ||
924 | |||
925 | memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s)); | ||
926 | bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC, | ||
927 | BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa)); | ||
928 | |||
929 | bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req, | ||
930 | sizeof(struct bfi_faa_en_dis_s)); | ||
931 | |||
932 | return BFA_STATUS_OK; | ||
933 | } | ||
934 | |||
935 | bfa_status_t | ||
936 | bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, | 1290 | bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, |
937 | bfa_cb_iocfc_t cbfn, void *cbarg) | 1291 | bfa_cb_iocfc_t cbfn, void *cbarg) |
938 | { | 1292 | { |
@@ -963,38 +1317,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, | |||
963 | } | 1317 | } |
964 | 1318 | ||
965 | /* | 1319 | /* |
966 | * FAA enable response | ||
967 | */ | ||
968 | static void | ||
969 | bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc, | ||
970 | struct bfi_faa_en_dis_rsp_s *rsp) | ||
971 | { | ||
972 | void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; | ||
973 | bfa_status_t status = rsp->status; | ||
974 | |||
975 | WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); | ||
976 | |||
977 | iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); | ||
978 | iocfc->faa_args.busy = BFA_FALSE; | ||
979 | } | ||
980 | |||
981 | /* | ||
982 | * FAA disable response | ||
983 | */ | ||
984 | static void | ||
985 | bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc, | ||
986 | struct bfi_faa_en_dis_rsp_s *rsp) | ||
987 | { | ||
988 | void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg; | ||
989 | bfa_status_t status = rsp->status; | ||
990 | |||
991 | WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn); | ||
992 | |||
993 | iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status); | ||
994 | iocfc->faa_args.busy = BFA_FALSE; | ||
995 | } | ||
996 | |||
997 | /* | ||
998 | * FAA query response | 1320 | * FAA query response |
999 | */ | 1321 | */ |
1000 | static void | 1322 | static void |
@@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) | |||
1023 | { | 1345 | { |
1024 | struct bfa_s *bfa = bfa_arg; | 1346 | struct bfa_s *bfa = bfa_arg; |
1025 | 1347 | ||
1026 | if (status == BFA_STATUS_FAA_ACQ_ADDR) { | 1348 | if (status == BFA_STATUS_OK) |
1027 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, | 1349 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED); |
1028 | bfa_iocfc_init_cb, bfa); | 1350 | else |
1029 | return; | 1351 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); |
1030 | } | ||
1031 | |||
1032 | if (status != BFA_STATUS_OK) { | ||
1033 | bfa_isr_disable(bfa); | ||
1034 | if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) | ||
1035 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, | ||
1036 | bfa_iocfc_init_cb, bfa); | ||
1037 | else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) | ||
1038 | bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, | ||
1039 | bfa_iocfc_enable_cb, bfa); | ||
1040 | return; | ||
1041 | } | ||
1042 | |||
1043 | bfa_iocfc_send_cfg(bfa); | ||
1044 | bfa_dconf_modinit(bfa); | ||
1045 | } | 1352 | } |
1046 | 1353 | ||
1047 | /* | 1354 | /* |
@@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg) | |||
1052 | { | 1359 | { |
1053 | struct bfa_s *bfa = bfa_arg; | 1360 | struct bfa_s *bfa = bfa_arg; |
1054 | 1361 | ||
1055 | bfa_isr_disable(bfa); | 1362 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); |
1056 | bfa_iocfc_disable_submod(bfa); | ||
1057 | |||
1058 | if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) | ||
1059 | bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, | ||
1060 | bfa); | ||
1061 | else { | ||
1062 | WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); | ||
1063 | bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, | ||
1064 | bfa); | ||
1065 | } | ||
1066 | } | 1363 | } |
1067 | 1364 | ||
1068 | /* | 1365 | /* |
@@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg) | |||
1074 | struct bfa_s *bfa = bfa_arg; | 1371 | struct bfa_s *bfa = bfa_arg; |
1075 | 1372 | ||
1076 | bfa->queue_process = BFA_FALSE; | 1373 | bfa->queue_process = BFA_FALSE; |
1077 | 1374 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED); | |
1078 | bfa_isr_disable(bfa); | ||
1079 | bfa_iocfc_disable_submod(bfa); | ||
1080 | |||
1081 | if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) | ||
1082 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, | ||
1083 | bfa); | ||
1084 | } | 1375 | } |
1085 | 1376 | ||
1086 | /* | 1377 | /* |
@@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg) | |||
1095 | bfa_isr_enable(bfa); | 1386 | bfa_isr_enable(bfa); |
1096 | } | 1387 | } |
1097 | 1388 | ||
1098 | |||
1099 | /* | 1389 | /* |
1100 | * Query IOC memory requirement information. | 1390 | * Query IOC memory requirement information. |
1101 | */ | 1391 | */ |
@@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
1171 | INIT_LIST_HEAD(&bfa->comp_q); | 1461 | INIT_LIST_HEAD(&bfa->comp_q); |
1172 | for (i = 0; i < BFI_IOC_MAX_CQS; i++) | 1462 | for (i = 0; i < BFI_IOC_MAX_CQS; i++) |
1173 | INIT_LIST_HEAD(&bfa->reqq_waitq[i]); | 1463 | INIT_LIST_HEAD(&bfa->reqq_waitq[i]); |
1464 | |||
1465 | bfa->iocfc.cb_reqd = BFA_FALSE; | ||
1466 | bfa->iocfc.op_status = BFA_STATUS_OK; | ||
1467 | bfa->iocfc.submod_enabled = BFA_FALSE; | ||
1468 | |||
1469 | bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped); | ||
1174 | } | 1470 | } |
1175 | 1471 | ||
1176 | /* | 1472 | /* |
@@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
1179 | void | 1475 | void |
1180 | bfa_iocfc_init(struct bfa_s *bfa) | 1476 | bfa_iocfc_init(struct bfa_s *bfa) |
1181 | { | 1477 | { |
1182 | bfa->iocfc.action = BFA_IOCFC_ACT_INIT; | 1478 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT); |
1183 | bfa_ioc_enable(&bfa->ioc); | ||
1184 | } | 1479 | } |
1185 | 1480 | ||
1186 | /* | 1481 | /* |
@@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa) | |||
1190 | void | 1485 | void |
1191 | bfa_iocfc_start(struct bfa_s *bfa) | 1486 | bfa_iocfc_start(struct bfa_s *bfa) |
1192 | { | 1487 | { |
1193 | if (bfa->iocfc.cfgdone) | 1488 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START); |
1194 | bfa_iocfc_start_submod(bfa); | ||
1195 | } | 1489 | } |
1196 | 1490 | ||
1197 | /* | 1491 | /* |
@@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa) | |||
1201 | void | 1495 | void |
1202 | bfa_iocfc_stop(struct bfa_s *bfa) | 1496 | bfa_iocfc_stop(struct bfa_s *bfa) |
1203 | { | 1497 | { |
1204 | bfa->iocfc.action = BFA_IOCFC_ACT_STOP; | ||
1205 | |||
1206 | bfa->queue_process = BFA_FALSE; | 1498 | bfa->queue_process = BFA_FALSE; |
1207 | bfa_dconf_modexit(bfa); | 1499 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); |
1208 | if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE) | ||
1209 | bfa_ioc_disable(&bfa->ioc); | ||
1210 | } | 1500 | } |
1211 | 1501 | ||
1212 | void | 1502 | void |
@@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) | |||
1226 | case BFI_IOCFC_I2H_UPDATEQ_RSP: | 1516 | case BFI_IOCFC_I2H_UPDATEQ_RSP: |
1227 | iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); | 1517 | iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); |
1228 | break; | 1518 | break; |
1229 | case BFI_IOCFC_I2H_FAA_ENABLE_RSP: | 1519 | case BFI_IOCFC_I2H_ADDR_MSG: |
1230 | bfa_faa_enable_reply(iocfc, | 1520 | bfa_iocfc_process_faa_addr(bfa, |
1231 | (struct bfi_faa_en_dis_rsp_s *)msg); | 1521 | (struct bfi_faa_addr_msg_s *)msg); |
1232 | break; | ||
1233 | case BFI_IOCFC_I2H_FAA_DISABLE_RSP: | ||
1234 | bfa_faa_disable_reply(iocfc, | ||
1235 | (struct bfi_faa_en_dis_rsp_s *)msg); | ||
1236 | break; | 1522 | break; |
1237 | case BFI_IOCFC_I2H_FAA_QUERY_RSP: | 1523 | case BFI_IOCFC_I2H_FAA_QUERY_RSP: |
1238 | bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); | 1524 | bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); |
@@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa) | |||
1306 | { | 1592 | { |
1307 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, | 1593 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, |
1308 | "IOC Enable"); | 1594 | "IOC Enable"); |
1309 | bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; | 1595 | bfa->iocfc.cb_reqd = BFA_TRUE; |
1310 | bfa_ioc_enable(&bfa->ioc); | 1596 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE); |
1311 | } | 1597 | } |
1312 | 1598 | ||
1313 | void | 1599 | void |
@@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa) | |||
1315 | { | 1601 | { |
1316 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, | 1602 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, |
1317 | "IOC Disable"); | 1603 | "IOC Disable"); |
1318 | bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; | ||
1319 | 1604 | ||
1320 | bfa->queue_process = BFA_FALSE; | 1605 | bfa->queue_process = BFA_FALSE; |
1321 | bfa_ioc_disable(&bfa->ioc); | 1606 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); |
1322 | } | 1607 | } |
1323 | 1608 | ||
1324 | |||
1325 | bfa_boolean_t | 1609 | bfa_boolean_t |
1326 | bfa_iocfc_is_operational(struct bfa_s *bfa) | 1610 | bfa_iocfc_is_operational(struct bfa_s *bfa) |
1327 | { | 1611 | { |
1328 | return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; | 1612 | return bfa_ioc_is_operational(&bfa->ioc) && |
1613 | bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational); | ||
1329 | } | 1614 | } |
1330 | 1615 | ||
1331 | /* | 1616 | /* |
@@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) | |||
1567 | } | 1852 | } |
1568 | } | 1853 | } |
1569 | 1854 | ||
1570 | void | ||
1571 | bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status) | ||
1572 | { | ||
1573 | if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) { | ||
1574 | if (bfa->iocfc.cfgdone == BFA_TRUE) | ||
1575 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, | ||
1576 | bfa_iocfc_init_cb, bfa); | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | /* | 1855 | /* |
1581 | * Return the list of PCI vendor/device id lists supported by this | 1856 | * Return the list of PCI vendor/device id lists supported by this |
1582 | * BFA instance. | 1857 | * BFA instance. |
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index cb07c628b2f1..36756ce0e58f 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h | |||
@@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s { | |||
52 | u16 num_uf_bufs; /* unsolicited recv buffers */ | 52 | u16 num_uf_bufs; /* unsolicited recv buffers */ |
53 | u8 num_cqs; | 53 | u8 num_cqs; |
54 | u8 fw_tick_res; /* FW clock resolution in ms */ | 54 | u8 fw_tick_res; /* FW clock resolution in ms */ |
55 | u8 rsvd[2]; | 55 | u8 rsvd[6]; |
56 | }; | 56 | }; |
57 | #pragma pack() | 57 | #pragma pack() |
58 | 58 | ||
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index d4f951fe753e..5d2a1307e5ce 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c | |||
@@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) | |||
5717 | 5717 | ||
5718 | if (vport_drv->comp_del) | 5718 | if (vport_drv->comp_del) |
5719 | complete(vport_drv->comp_del); | 5719 | complete(vport_drv->comp_del); |
5720 | else | ||
5721 | kfree(vport_drv); | ||
5720 | 5722 | ||
5721 | bfa_lps_delete(vport->lps); | 5723 | bfa_lps_delete(vport->lps); |
5722 | } | 5724 | } |
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 52628d5d3c9b..fe0463a1db04 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c | |||
@@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) | |||
2169 | * - MAX receive frame size | 2169 | * - MAX receive frame size |
2170 | */ | 2170 | */ |
2171 | rport->cisc = plogi->csp.cisc; | 2171 | rport->cisc = plogi->csp.cisc; |
2172 | rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); | 2172 | if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz)) |
2173 | rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); | ||
2174 | else | ||
2175 | rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz); | ||
2173 | 2176 | ||
2174 | bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); | 2177 | bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); |
2175 | bfa_trc(port->fcs, port->fabric->bb_credit); | 2178 | bfa_trc(port->fcs, port->fabric->bb_credit); |
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index eca7ab78085b..14e6284e48e4 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
@@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); | |||
88 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); | 88 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); |
89 | static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); | 89 | static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); |
90 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); | 90 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); |
91 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); | ||
92 | static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , | 91 | static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , |
93 | enum bfa_ioc_event_e event); | 92 | enum bfa_ioc_event_e event); |
94 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); | 93 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); |
@@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); | |||
97 | static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); | 96 | static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); |
98 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); | 97 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); |
99 | 98 | ||
100 | |||
101 | /* | 99 | /* |
102 | * IOC state machine definitions/declarations | 100 | * IOC state machine definitions/declarations |
103 | */ | 101 | */ |
@@ -114,7 +112,6 @@ enum ioc_event { | |||
114 | IOC_E_HWERROR = 10, /* hardware error interrupt */ | 112 | IOC_E_HWERROR = 10, /* hardware error interrupt */ |
115 | IOC_E_TIMEOUT = 11, /* timeout */ | 113 | IOC_E_TIMEOUT = 11, /* timeout */ |
116 | IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ | 114 | IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ |
117 | IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */ | ||
118 | }; | 115 | }; |
119 | 116 | ||
120 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); | 117 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); |
@@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); | |||
127 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); | 124 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); |
128 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); | 125 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); |
129 | bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); | 126 | bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); |
130 | bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event); | ||
131 | 127 | ||
132 | static struct bfa_sm_table_s ioc_sm_table[] = { | 128 | static struct bfa_sm_table_s ioc_sm_table[] = { |
133 | {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, | 129 | {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, |
@@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = { | |||
140 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, | 136 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, |
141 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, | 137 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, |
142 | {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, | 138 | {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, |
143 | {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR}, | ||
144 | }; | 139 | }; |
145 | 140 | ||
146 | /* | 141 | /* |
@@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
371 | switch (event) { | 366 | switch (event) { |
372 | case IOC_E_FWRSP_GETATTR: | 367 | case IOC_E_FWRSP_GETATTR: |
373 | bfa_ioc_timer_stop(ioc); | 368 | bfa_ioc_timer_stop(ioc); |
374 | bfa_ioc_check_attr_wwns(ioc); | ||
375 | bfa_ioc_hb_monitor(ioc); | ||
376 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | 369 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); |
377 | break; | 370 | break; |
378 | 371 | ||
379 | case IOC_E_FWRSP_ACQ_ADDR: | ||
380 | bfa_ioc_timer_stop(ioc); | ||
381 | bfa_ioc_hb_monitor(ioc); | ||
382 | bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr); | ||
383 | break; | ||
384 | |||
385 | case IOC_E_PFFAILED: | 372 | case IOC_E_PFFAILED: |
386 | case IOC_E_HWERROR: | 373 | case IOC_E_HWERROR: |
387 | bfa_ioc_timer_stop(ioc); | 374 | bfa_ioc_timer_stop(ioc); |
@@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
406 | } | 393 | } |
407 | } | 394 | } |
408 | 395 | ||
409 | /* | ||
410 | * Acquiring address from fabric (entry function) | ||
411 | */ | ||
412 | static void | ||
413 | bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc) | ||
414 | { | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * Acquiring address from the fabric | ||
419 | */ | ||
420 | static void | ||
421 | bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event) | ||
422 | { | ||
423 | bfa_trc(ioc, event); | ||
424 | |||
425 | switch (event) { | ||
426 | case IOC_E_FWRSP_GETATTR: | ||
427 | bfa_ioc_check_attr_wwns(ioc); | ||
428 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | ||
429 | break; | ||
430 | |||
431 | case IOC_E_PFFAILED: | ||
432 | case IOC_E_HWERROR: | ||
433 | bfa_hb_timer_stop(ioc); | ||
434 | case IOC_E_HBFAIL: | ||
435 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
436 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
437 | if (event != IOC_E_PFFAILED) | ||
438 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); | ||
439 | break; | ||
440 | |||
441 | case IOC_E_DISABLE: | ||
442 | bfa_hb_timer_stop(ioc); | ||
443 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
444 | break; | ||
445 | |||
446 | case IOC_E_ENABLE: | ||
447 | break; | ||
448 | |||
449 | default: | ||
450 | bfa_sm_fault(ioc, event); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static void | 396 | static void |
455 | bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) | 397 | bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) |
456 | { | 398 | { |
@@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) | |||
458 | 400 | ||
459 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | 401 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); |
460 | bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); | 402 | bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); |
403 | bfa_ioc_hb_monitor(ioc); | ||
461 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); | 404 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); |
462 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); | 405 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); |
463 | } | 406 | } |
@@ -738,26 +681,60 @@ static void | |||
738 | bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) | 681 | bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) |
739 | { | 682 | { |
740 | struct bfi_ioc_image_hdr_s fwhdr; | 683 | struct bfi_ioc_image_hdr_s fwhdr; |
741 | u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); | 684 | u32 r32, fwstate, pgnum, pgoff, loff = 0; |
685 | int i; | ||
686 | |||
687 | /* | ||
688 | * Spin on init semaphore to serialize. | ||
689 | */ | ||
690 | r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); | ||
691 | while (r32 & 0x1) { | ||
692 | udelay(20); | ||
693 | r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); | ||
694 | } | ||
742 | 695 | ||
743 | /* h/w sem init */ | 696 | /* h/w sem init */ |
744 | if (fwstate == BFI_IOC_UNINIT) | 697 | fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); |
698 | if (fwstate == BFI_IOC_UNINIT) { | ||
699 | writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); | ||
745 | goto sem_get; | 700 | goto sem_get; |
701 | } | ||
746 | 702 | ||
747 | bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); | 703 | bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); |
748 | 704 | ||
749 | if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) | 705 | if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { |
706 | writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); | ||
750 | goto sem_get; | 707 | goto sem_get; |
708 | } | ||
709 | |||
710 | /* | ||
711 | * Clear fwver hdr | ||
712 | */ | ||
713 | pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); | ||
714 | pgoff = PSS_SMEM_PGOFF(loff); | ||
715 | writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); | ||
716 | |||
717 | for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) { | ||
718 | bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); | ||
719 | loff += sizeof(u32); | ||
720 | } | ||
751 | 721 | ||
752 | bfa_trc(iocpf->ioc, fwstate); | 722 | bfa_trc(iocpf->ioc, fwstate); |
753 | bfa_trc(iocpf->ioc, fwhdr.exec); | 723 | bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); |
754 | writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); | 724 | writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); |
725 | writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate); | ||
755 | 726 | ||
756 | /* | 727 | /* |
757 | * Try to lock and then unlock the semaphore. | 728 | * Unlock the hw semaphore. Should be here only once per boot. |
758 | */ | 729 | */ |
759 | readl(iocpf->ioc->ioc_regs.ioc_sem_reg); | 730 | readl(iocpf->ioc->ioc_regs.ioc_sem_reg); |
760 | writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); | 731 | writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); |
732 | |||
733 | /* | ||
734 | * unlock init semaphore. | ||
735 | */ | ||
736 | writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); | ||
737 | |||
761 | sem_get: | 738 | sem_get: |
762 | bfa_ioc_hw_sem_get(iocpf->ioc); | 739 | bfa_ioc_hw_sem_get(iocpf->ioc); |
763 | } | 740 | } |
@@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1707 | u32 i; | 1684 | u32 i; |
1708 | u32 asicmode; | 1685 | u32 asicmode; |
1709 | 1686 | ||
1710 | /* | ||
1711 | * Initialize LMEM first before code download | ||
1712 | */ | ||
1713 | bfa_ioc_lmem_init(ioc); | ||
1714 | |||
1715 | bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); | 1687 | bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); |
1716 | fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); | 1688 | fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); |
1717 | 1689 | ||
@@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) | |||
1999 | bfa_ioc_pll_init_asic(ioc); | 1971 | bfa_ioc_pll_init_asic(ioc); |
2000 | 1972 | ||
2001 | ioc->pllinit = BFA_TRUE; | 1973 | ioc->pllinit = BFA_TRUE; |
1974 | |||
1975 | /* | ||
1976 | * Initialize LMEM | ||
1977 | */ | ||
1978 | bfa_ioc_lmem_init(ioc); | ||
1979 | |||
2002 | /* | 1980 | /* |
2003 | * release semaphore. | 1981 | * release semaphore. |
2004 | */ | 1982 | */ |
@@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) | |||
2122 | bfa_ioc_getattr_reply(ioc); | 2100 | bfa_ioc_getattr_reply(ioc); |
2123 | break; | 2101 | break; |
2124 | 2102 | ||
2125 | case BFI_IOC_I2H_ACQ_ADDR_REPLY: | ||
2126 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR); | ||
2127 | break; | ||
2128 | |||
2129 | default: | 2103 | default: |
2130 | bfa_trc(ioc, msg->mh.msg_id); | 2104 | bfa_trc(ioc, msg->mh.msg_id); |
2131 | WARN_ON(1); | 2105 | WARN_ON(1); |
@@ -2416,15 +2390,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) | |||
2416 | } | 2390 | } |
2417 | 2391 | ||
2418 | /* | 2392 | /* |
2419 | * Return TRUE if IOC is in acquiring address state | ||
2420 | */ | ||
2421 | bfa_boolean_t | ||
2422 | bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc) | ||
2423 | { | ||
2424 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr); | ||
2425 | } | ||
2426 | |||
2427 | /* | ||
2428 | * return true if IOC firmware is different. | 2393 | * return true if IOC firmware is different. |
2429 | */ | 2394 | */ |
2430 | bfa_boolean_t | 2395 | bfa_boolean_t |
@@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc) | |||
2916 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); | 2881 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); |
2917 | } | 2882 | } |
2918 | 2883 | ||
2919 | static void | ||
2920 | bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) | ||
2921 | { | ||
2922 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) | ||
2923 | return; | ||
2924 | if (ioc->attr->nwwn == 0) | ||
2925 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); | ||
2926 | if (ioc->attr->pwwn == 0) | ||
2927 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); | ||
2928 | } | ||
2929 | |||
2930 | /* | 2884 | /* |
2931 | * BFA IOC PF private functions | 2885 | * BFA IOC PF private functions |
2932 | */ | 2886 | */ |
@@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type, | |||
4495 | */ | 4449 | */ |
4496 | 4450 | ||
4497 | #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ | 4451 | #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ |
4498 | #define BFA_DIAG_FWPING_TOV 1000 /* msec */ | 4452 | #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */ |
4499 | 4453 | ||
4500 | /* IOC event handler */ | 4454 | /* IOC event handler */ |
4501 | static void | 4455 | static void |
@@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest) | |||
4772 | } | 4726 | } |
4773 | 4727 | ||
4774 | static void | 4728 | static void |
4775 | diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg) | 4729 | diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg) |
4776 | { | 4730 | { |
4777 | bfa_trc(diag, diag->ledtest.lock); | 4731 | bfa_trc(diag, diag->ledtest.lock); |
4778 | diag->ledtest.lock = BFA_FALSE; | 4732 | diag->ledtest.lock = BFA_FALSE; |
@@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, | |||
4850 | u32 pattern, struct bfa_diag_memtest_result *result, | 4804 | u32 pattern, struct bfa_diag_memtest_result *result, |
4851 | bfa_cb_diag_t cbfn, void *cbarg) | 4805 | bfa_cb_diag_t cbfn, void *cbarg) |
4852 | { | 4806 | { |
4807 | u32 memtest_tov; | ||
4808 | |||
4853 | bfa_trc(diag, pattern); | 4809 | bfa_trc(diag, pattern); |
4854 | 4810 | ||
4855 | if (!bfa_ioc_adapter_is_disabled(diag->ioc)) | 4811 | if (!bfa_ioc_adapter_is_disabled(diag->ioc)) |
@@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest, | |||
4869 | /* download memtest code and take LPU0 out of reset */ | 4825 | /* download memtest code and take LPU0 out of reset */ |
4870 | bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); | 4826 | bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); |
4871 | 4827 | ||
4828 | memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? | ||
4829 | CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV; | ||
4872 | bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, | 4830 | bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, |
4873 | bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV); | 4831 | bfa_diag_memtest_done, diag, memtest_tov); |
4874 | diag->timer_active = 1; | 4832 | diag->timer_active = 1; |
4875 | return BFA_STATUS_OK; | 4833 | return BFA_STATUS_OK; |
4876 | } | 4834 | } |
@@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) | |||
5641 | case BFA_DCONF_SM_INIT: | 5599 | case BFA_DCONF_SM_INIT: |
5642 | if (dconf->min_cfg) { | 5600 | if (dconf->min_cfg) { |
5643 | bfa_trc(dconf->bfa, dconf->min_cfg); | 5601 | bfa_trc(dconf->bfa, dconf->min_cfg); |
5602 | bfa_fsm_send_event(&dconf->bfa->iocfc, | ||
5603 | IOCFC_E_DCONF_DONE); | ||
5644 | return; | 5604 | return; |
5645 | } | 5605 | } |
5646 | bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); | 5606 | bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); |
5647 | dconf->flashdone = BFA_FALSE; | 5607 | bfa_timer_start(dconf->bfa, &dconf->timer, |
5648 | bfa_trc(dconf->bfa, dconf->flashdone); | 5608 | bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); |
5649 | bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), | 5609 | bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), |
5650 | BFA_FLASH_PART_DRV, dconf->instance, | 5610 | BFA_FLASH_PART_DRV, dconf->instance, |
5651 | dconf->dconf, | 5611 | dconf->dconf, |
5652 | sizeof(struct bfa_dconf_s), 0, | 5612 | sizeof(struct bfa_dconf_s), 0, |
5653 | bfa_dconf_init_cb, dconf->bfa); | 5613 | bfa_dconf_init_cb, dconf->bfa); |
5654 | if (bfa_status != BFA_STATUS_OK) { | 5614 | if (bfa_status != BFA_STATUS_OK) { |
5615 | bfa_timer_stop(&dconf->timer); | ||
5655 | bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); | 5616 | bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); |
5656 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); | 5617 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5657 | return; | 5618 | return; |
5658 | } | 5619 | } |
5659 | break; | 5620 | break; |
5660 | case BFA_DCONF_SM_EXIT: | 5621 | case BFA_DCONF_SM_EXIT: |
5661 | dconf->flashdone = BFA_TRUE; | 5622 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); |
5662 | case BFA_DCONF_SM_IOCDISABLE: | 5623 | case BFA_DCONF_SM_IOCDISABLE: |
5663 | case BFA_DCONF_SM_WR: | 5624 | case BFA_DCONF_SM_WR: |
5664 | case BFA_DCONF_SM_FLASH_COMP: | 5625 | case BFA_DCONF_SM_FLASH_COMP: |
@@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, | |||
5679 | 5640 | ||
5680 | switch (event) { | 5641 | switch (event) { |
5681 | case BFA_DCONF_SM_FLASH_COMP: | 5642 | case BFA_DCONF_SM_FLASH_COMP: |
5643 | bfa_timer_stop(&dconf->timer); | ||
5682 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); | 5644 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); |
5683 | break; | 5645 | break; |
5684 | case BFA_DCONF_SM_TIMEOUT: | 5646 | case BFA_DCONF_SM_TIMEOUT: |
5685 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); | 5647 | bfa_sm_set_state(dconf, bfa_dconf_sm_ready); |
5648 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); | ||
5686 | break; | 5649 | break; |
5687 | case BFA_DCONF_SM_EXIT: | 5650 | case BFA_DCONF_SM_EXIT: |
5688 | dconf->flashdone = BFA_TRUE; | 5651 | bfa_timer_stop(&dconf->timer); |
5689 | bfa_trc(dconf->bfa, dconf->flashdone); | 5652 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5653 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); | ||
5654 | break; | ||
5690 | case BFA_DCONF_SM_IOCDISABLE: | 5655 | case BFA_DCONF_SM_IOCDISABLE: |
5656 | bfa_timer_stop(&dconf->timer); | ||
5691 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); | 5657 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5692 | break; | 5658 | break; |
5693 | default: | 5659 | default: |
@@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) | |||
5710 | bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); | 5676 | bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); |
5711 | break; | 5677 | break; |
5712 | case BFA_DCONF_SM_EXIT: | 5678 | case BFA_DCONF_SM_EXIT: |
5713 | dconf->flashdone = BFA_TRUE; | ||
5714 | bfa_trc(dconf->bfa, dconf->flashdone); | ||
5715 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); | 5679 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5680 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); | ||
5716 | break; | 5681 | break; |
5717 | case BFA_DCONF_SM_INIT: | 5682 | case BFA_DCONF_SM_INIT: |
5718 | case BFA_DCONF_SM_IOCDISABLE: | 5683 | case BFA_DCONF_SM_IOCDISABLE: |
@@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, | |||
5774 | bfa_timer_stop(&dconf->timer); | 5739 | bfa_timer_stop(&dconf->timer); |
5775 | case BFA_DCONF_SM_TIMEOUT: | 5740 | case BFA_DCONF_SM_TIMEOUT: |
5776 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); | 5741 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5777 | dconf->flashdone = BFA_TRUE; | 5742 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); |
5778 | bfa_trc(dconf->bfa, dconf->flashdone); | ||
5779 | bfa_ioc_disable(&dconf->bfa->ioc); | ||
5780 | break; | 5743 | break; |
5781 | default: | 5744 | default: |
5782 | bfa_sm_fault(dconf->bfa, event); | 5745 | bfa_sm_fault(dconf->bfa, event); |
@@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, | |||
5823 | bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); | 5786 | bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); |
5824 | break; | 5787 | break; |
5825 | case BFA_DCONF_SM_EXIT: | 5788 | case BFA_DCONF_SM_EXIT: |
5826 | dconf->flashdone = BFA_TRUE; | ||
5827 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); | 5789 | bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); |
5790 | bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); | ||
5828 | break; | 5791 | break; |
5829 | case BFA_DCONF_SM_IOCDISABLE: | 5792 | case BFA_DCONF_SM_IOCDISABLE: |
5830 | break; | 5793 | break; |
@@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
5865 | if (cfg->drvcfg.min_cfg) { | 5828 | if (cfg->drvcfg.min_cfg) { |
5866 | bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); | 5829 | bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); |
5867 | dconf->min_cfg = BFA_TRUE; | 5830 | dconf->min_cfg = BFA_TRUE; |
5868 | /* | ||
5869 | * Set the flashdone flag to TRUE explicitly as no flash | ||
5870 | * write will happen in min_cfg mode. | ||
5871 | */ | ||
5872 | dconf->flashdone = BFA_TRUE; | ||
5873 | } else { | 5831 | } else { |
5874 | dconf->min_cfg = BFA_FALSE; | 5832 | dconf->min_cfg = BFA_FALSE; |
5875 | bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); | 5833 | bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); |
@@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) | |||
5885 | struct bfa_s *bfa = arg; | 5843 | struct bfa_s *bfa = arg; |
5886 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); | 5844 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); |
5887 | 5845 | ||
5888 | dconf->flashdone = BFA_TRUE; | 5846 | bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); |
5889 | bfa_trc(bfa, dconf->flashdone); | ||
5890 | bfa_iocfc_cb_dconf_modinit(bfa, status); | ||
5891 | if (status == BFA_STATUS_OK) { | 5847 | if (status == BFA_STATUS_OK) { |
5892 | bfa_dconf_read_data_valid(bfa) = BFA_TRUE; | 5848 | bfa_dconf_read_data_valid(bfa) = BFA_TRUE; |
5893 | if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) | 5849 | if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) |
@@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) | |||
5895 | if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) | 5851 | if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) |
5896 | dconf->dconf->hdr.version = BFI_DCONF_VERSION; | 5852 | dconf->dconf->hdr.version = BFI_DCONF_VERSION; |
5897 | } | 5853 | } |
5898 | bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); | 5854 | bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); |
5899 | } | 5855 | } |
5900 | 5856 | ||
5901 | void | 5857 | void |
@@ -5977,7 +5933,5 @@ void | |||
5977 | bfa_dconf_modexit(struct bfa_s *bfa) | 5933 | bfa_dconf_modexit(struct bfa_s *bfa) |
5978 | { | 5934 | { |
5979 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); | 5935 | struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); |
5980 | BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE; | ||
5981 | bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone); | ||
5982 | bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); | 5936 | bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); |
5983 | } | 5937 | } |
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 546d46b37101..1a99d4b5b50f 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h | |||
@@ -373,6 +373,22 @@ struct bfa_cb_qe_s { | |||
373 | }; | 373 | }; |
374 | 374 | ||
375 | /* | 375 | /* |
376 | * IOCFC state machine definitions/declarations | ||
377 | */ | ||
378 | enum iocfc_event { | ||
379 | IOCFC_E_INIT = 1, /* IOCFC init request */ | ||
380 | IOCFC_E_START = 2, /* IOCFC mod start request */ | ||
381 | IOCFC_E_STOP = 3, /* IOCFC stop request */ | ||
382 | IOCFC_E_ENABLE = 4, /* IOCFC enable request */ | ||
383 | IOCFC_E_DISABLE = 5, /* IOCFC disable request */ | ||
384 | IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */ | ||
385 | IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */ | ||
386 | IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */ | ||
387 | IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */ | ||
388 | IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */ | ||
389 | }; | ||
390 | |||
391 | /* | ||
376 | * ASIC block configurtion related | 392 | * ASIC block configurtion related |
377 | */ | 393 | */ |
378 | 394 | ||
@@ -706,7 +722,6 @@ struct bfa_dconf_s { | |||
706 | struct bfa_dconf_mod_s { | 722 | struct bfa_dconf_mod_s { |
707 | bfa_sm_t sm; | 723 | bfa_sm_t sm; |
708 | u8 instance; | 724 | u8 instance; |
709 | bfa_boolean_t flashdone; | ||
710 | bfa_boolean_t read_data_valid; | 725 | bfa_boolean_t read_data_valid; |
711 | bfa_boolean_t min_cfg; | 726 | bfa_boolean_t min_cfg; |
712 | struct bfa_timer_s timer; | 727 | struct bfa_timer_s timer; |
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index d1b8f0caaa79..2eb0c6a2938d 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c | |||
@@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb) | |||
786 | } | 786 | } |
787 | 787 | ||
788 | #define CT2_NFC_MAX_DELAY 1000 | 788 | #define CT2_NFC_MAX_DELAY 1000 |
789 | #define CT2_NFC_VER_VALID 0x143 | ||
790 | #define BFA_IOC_PLL_POLL 1000000 | ||
791 | |||
792 | static bfa_boolean_t | ||
793 | bfa_ioc_ct2_nfc_halted(void __iomem *rb) | ||
794 | { | ||
795 | u32 r32; | ||
796 | |||
797 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); | ||
798 | if (r32 & __NFC_CONTROLLER_HALTED) | ||
799 | return BFA_TRUE; | ||
800 | |||
801 | return BFA_FALSE; | ||
802 | } | ||
803 | |||
804 | static void | ||
805 | bfa_ioc_ct2_nfc_resume(void __iomem *rb) | ||
806 | { | ||
807 | u32 r32; | ||
808 | int i; | ||
809 | |||
810 | writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); | ||
811 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { | ||
812 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); | ||
813 | if (!(r32 & __NFC_CONTROLLER_HALTED)) | ||
814 | return; | ||
815 | udelay(1000); | ||
816 | } | ||
817 | WARN_ON(1); | ||
818 | } | ||
819 | |||
789 | bfa_status_t | 820 | bfa_status_t |
790 | bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) | 821 | bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) |
791 | { | 822 | { |
792 | u32 wgn, r32; | 823 | u32 wgn, r32, nfc_ver, i; |
793 | int i; | ||
794 | 824 | ||
795 | /* | ||
796 | * Initialize PLL if not already done by NFC | ||
797 | */ | ||
798 | wgn = readl(rb + CT2_WGN_STATUS); | 825 | wgn = readl(rb + CT2_WGN_STATUS); |
799 | if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { | 826 | nfc_ver = readl(rb + CT2_RSC_GPR15_REG); |
827 | |||
828 | if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && | ||
829 | (nfc_ver >= CT2_NFC_VER_VALID)) { | ||
830 | if (bfa_ioc_ct2_nfc_halted(rb)) | ||
831 | bfa_ioc_ct2_nfc_resume(rb); | ||
832 | |||
833 | writel(__RESET_AND_START_SCLK_LCLK_PLLS, | ||
834 | rb + CT2_CSI_FW_CTL_SET_REG); | ||
835 | |||
836 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { | ||
837 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | ||
838 | if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) | ||
839 | break; | ||
840 | } | ||
841 | |||
842 | WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); | ||
843 | |||
844 | for (i = 0; i < BFA_IOC_PLL_POLL; i++) { | ||
845 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | ||
846 | if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) | ||
847 | break; | ||
848 | } | ||
849 | |||
850 | WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); | ||
851 | udelay(1000); | ||
852 | |||
853 | r32 = readl(rb + CT2_CSI_FW_CTL_REG); | ||
854 | WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); | ||
855 | } else { | ||
800 | writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); | 856 | writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); |
801 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { | 857 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { |
802 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); | 858 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); |
@@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) | |||
804 | break; | 860 | break; |
805 | udelay(1000); | 861 | udelay(1000); |
806 | } | 862 | } |
807 | } | ||
808 | 863 | ||
809 | /* | 864 | bfa_ioc_ct2_mac_reset(rb); |
810 | * Mask the interrupts and clear any | 865 | bfa_ioc_ct2_sclk_init(rb); |
811 | * pending interrupts. | 866 | bfa_ioc_ct2_lclk_init(rb); |
812 | */ | 867 | |
813 | writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); | 868 | /* |
814 | writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); | 869 | * release soft reset on s_clk & l_clk |
815 | 870 | */ | |
816 | r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | 871 | r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); |
817 | if (r32 == 1) { | 872 | writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, |
818 | writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); | 873 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); |
819 | readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | 874 | |
875 | /* | ||
876 | * release soft reset on s_clk & l_clk | ||
877 | */ | ||
878 | r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); | ||
879 | writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, | ||
880 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
820 | } | 881 | } |
821 | r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
822 | if (r32 == 1) { | ||
823 | writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
824 | readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
825 | } | ||
826 | |||
827 | bfa_ioc_ct2_mac_reset(rb); | ||
828 | bfa_ioc_ct2_sclk_init(rb); | ||
829 | bfa_ioc_ct2_lclk_init(rb); | ||
830 | |||
831 | /* | ||
832 | * release soft reset on s_clk & l_clk | ||
833 | */ | ||
834 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
835 | writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, | ||
836 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
837 | |||
838 | /* | ||
839 | * release soft reset on s_clk & l_clk | ||
840 | */ | ||
841 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
842 | writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, | ||
843 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
844 | 882 | ||
845 | /* | 883 | /* |
846 | * Announce flash device presence, if flash was corrupted. | 884 | * Announce flash device presence, if flash was corrupted. |
847 | */ | 885 | */ |
848 | if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { | 886 | if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { |
849 | r32 = readl((rb + PSS_GPIO_OUT_REG)); | 887 | r32 = readl(rb + PSS_GPIO_OUT_REG); |
850 | writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); | 888 | writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); |
851 | r32 = readl((rb + PSS_GPIO_OE_REG)); | 889 | r32 = readl(rb + PSS_GPIO_OE_REG); |
852 | writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); | 890 | writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); |
853 | } | 891 | } |
854 | 892 | ||
893 | /* | ||
894 | * Mask the interrupts and clear any | ||
895 | * pending interrupts. | ||
896 | */ | ||
897 | writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); | ||
898 | writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); | ||
899 | |||
900 | /* For first time initialization, no need to clear interrupts */ | ||
901 | r32 = readl(rb + HOST_SEM5_REG); | ||
902 | if (r32 & 0x1) { | ||
903 | r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); | ||
904 | if (r32 == 1) { | ||
905 | writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); | ||
906 | readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | ||
907 | } | ||
908 | r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); | ||
909 | if (r32 == 1) { | ||
910 | writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); | ||
911 | readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); | ||
912 | } | ||
913 | } | ||
914 | |||
855 | bfa_ioc_ct2_mem_init(rb); | 915 | bfa_ioc_ct2_mem_init(rb); |
856 | 916 | ||
857 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); | 917 | writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); |
858 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); | 918 | writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); |
919 | |||
859 | return BFA_STATUS_OK; | 920 | return BFA_STATUS_OK; |
860 | } | 921 | } |
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index aa8a0eaf91f9..2e856e6710f7 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c | |||
@@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
1280 | switch (event) { | 1280 | switch (event) { |
1281 | case BFA_LPS_SM_RESUME: | 1281 | case BFA_LPS_SM_RESUME: |
1282 | bfa_sm_set_state(lps, bfa_lps_sm_login); | 1282 | bfa_sm_set_state(lps, bfa_lps_sm_login); |
1283 | bfa_lps_send_login(lps); | ||
1283 | break; | 1284 | break; |
1284 | 1285 | ||
1285 | case BFA_LPS_SM_OFFLINE: | 1286 | case BFA_LPS_SM_OFFLINE: |
@@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) | |||
1578 | break; | 1579 | break; |
1579 | 1580 | ||
1580 | case BFA_STATUS_VPORT_MAX: | 1581 | case BFA_STATUS_VPORT_MAX: |
1581 | if (!rsp->ext_status) | 1582 | if (rsp->ext_status) |
1582 | bfa_lps_no_res(lps, rsp->ext_status); | 1583 | bfa_lps_no_res(lps, rsp->ext_status); |
1583 | break; | 1584 | break; |
1584 | 1585 | ||
@@ -3084,33 +3085,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) | |||
3084 | } | 3085 | } |
3085 | 3086 | ||
3086 | static void | 3087 | static void |
3087 | bfa_fcport_send_txcredit(void *port_cbarg) | ||
3088 | { | ||
3089 | |||
3090 | struct bfa_fcport_s *fcport = port_cbarg; | ||
3091 | struct bfi_fcport_set_svc_params_req_s *m; | ||
3092 | |||
3093 | /* | ||
3094 | * check for room in queue to send request now | ||
3095 | */ | ||
3096 | m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); | ||
3097 | if (!m) { | ||
3098 | bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); | ||
3099 | return; | ||
3100 | } | ||
3101 | |||
3102 | bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, | ||
3103 | bfa_fn_lpu(fcport->bfa)); | ||
3104 | m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); | ||
3105 | m->bb_scn = fcport->cfg.bb_scn; | ||
3106 | |||
3107 | /* | ||
3108 | * queue I/O message to firmware | ||
3109 | */ | ||
3110 | bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh); | ||
3111 | } | ||
3112 | |||
3113 | static void | ||
3114 | bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, | 3088 | bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, |
3115 | struct bfa_qos_stats_s *s) | 3089 | struct bfa_qos_stats_s *s) |
3116 | { | 3090 | { |
@@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) | |||
3602 | return BFA_STATUS_UNSUPP_SPEED; | 3576 | return BFA_STATUS_UNSUPP_SPEED; |
3603 | } | 3577 | } |
3604 | 3578 | ||
3605 | /* For Mezz card, port speed entered needs to be checked */ | 3579 | /* Port speed entered needs to be checked */ |
3606 | if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) { | 3580 | if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { |
3607 | if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { | 3581 | /* For CT2, 1G is not supported */ |
3608 | /* For CT2, 1G is not supported */ | 3582 | if ((speed == BFA_PORT_SPEED_1GBPS) && |
3609 | if ((speed == BFA_PORT_SPEED_1GBPS) && | 3583 | (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) |
3610 | (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) | 3584 | return BFA_STATUS_UNSUPP_SPEED; |
3611 | return BFA_STATUS_UNSUPP_SPEED; | ||
3612 | 3585 | ||
3613 | /* Already checked for Auto Speed and Max Speed supp */ | 3586 | /* Already checked for Auto Speed and Max Speed supp */ |
3614 | if (!(speed == BFA_PORT_SPEED_1GBPS || | 3587 | if (!(speed == BFA_PORT_SPEED_1GBPS || |
3615 | speed == BFA_PORT_SPEED_2GBPS || | 3588 | speed == BFA_PORT_SPEED_2GBPS || |
3616 | speed == BFA_PORT_SPEED_4GBPS || | 3589 | speed == BFA_PORT_SPEED_4GBPS || |
3617 | speed == BFA_PORT_SPEED_8GBPS || | 3590 | speed == BFA_PORT_SPEED_8GBPS || |
3618 | speed == BFA_PORT_SPEED_16GBPS || | 3591 | speed == BFA_PORT_SPEED_16GBPS || |
3619 | speed == BFA_PORT_SPEED_AUTO)) | 3592 | speed == BFA_PORT_SPEED_AUTO)) |
3620 | return BFA_STATUS_UNSUPP_SPEED; | 3593 | return BFA_STATUS_UNSUPP_SPEED; |
3621 | } else { | 3594 | } else { |
3622 | if (speed != BFA_PORT_SPEED_10GBPS) | 3595 | if (speed != BFA_PORT_SPEED_10GBPS) |
3623 | return BFA_STATUS_UNSUPP_SPEED; | 3596 | return BFA_STATUS_UNSUPP_SPEED; |
3624 | } | ||
3625 | } | 3597 | } |
3626 | 3598 | ||
3627 | fcport->cfg.speed = speed; | 3599 | fcport->cfg.speed = speed; |
@@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn) | |||
3765 | fcport->cfg.bb_scn = bb_scn; | 3737 | fcport->cfg.bb_scn = bb_scn; |
3766 | if (bb_scn) | 3738 | if (bb_scn) |
3767 | fcport->bbsc_op_state = BFA_TRUE; | 3739 | fcport->bbsc_op_state = BFA_TRUE; |
3768 | bfa_fcport_send_txcredit(fcport); | ||
3769 | } | 3740 | } |
3770 | 3741 | ||
3771 | /* | 3742 | /* |
@@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) | |||
3825 | attr->port_state = BFA_PORT_ST_IOCDIS; | 3796 | attr->port_state = BFA_PORT_ST_IOCDIS; |
3826 | else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) | 3797 | else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) |
3827 | attr->port_state = BFA_PORT_ST_FWMISMATCH; | 3798 | attr->port_state = BFA_PORT_ST_FWMISMATCH; |
3828 | else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc)) | ||
3829 | attr->port_state = BFA_PORT_ST_ACQ_ADDR; | ||
3830 | } | 3799 | } |
3831 | 3800 | ||
3832 | /* FCoE vlan */ | 3801 | /* FCoE vlan */ |
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index b52cbb6bcd5a..f30067564639 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h | |||
@@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); | |||
663 | void bfa_cb_lps_cvl_event(void *bfad, void *uarg); | 663 | void bfa_cb_lps_cvl_event(void *bfad, void *uarg); |
664 | 664 | ||
665 | /* FAA specific APIs */ | 665 | /* FAA specific APIs */ |
666 | bfa_status_t bfa_faa_enable(struct bfa_s *bfa, | ||
667 | bfa_cb_iocfc_t cbfn, void *cbarg); | ||
668 | bfa_status_t bfa_faa_disable(struct bfa_s *bfa, | ||
669 | bfa_cb_iocfc_t cbfn, void *cbarg); | ||
670 | bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, | 666 | bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, |
671 | bfa_cb_iocfc_t cbfn, void *cbarg); | 667 | bfa_cb_iocfc_t cbfn, void *cbarg); |
672 | 668 | ||
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 1938fe0473e9..7b1ecd2b3ffe 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c | |||
@@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) | |||
442 | return status; | 442 | return status; |
443 | } | 443 | } |
444 | 444 | ||
445 | int | ||
446 | bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) | ||
447 | { | ||
448 | struct bfad_im_port_s *im_port = | ||
449 | (struct bfad_im_port_s *) shost->hostdata[0]; | ||
450 | struct bfad_s *bfad = im_port->bfad; | ||
451 | struct bfad_hal_comp fcomp; | ||
452 | unsigned long flags; | ||
453 | uint32_t status; | ||
454 | |||
455 | init_completion(&fcomp.comp); | ||
456 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
457 | status = bfa_port_disable(&bfad->bfa.modules.port, | ||
458 | bfad_hcb_comp, &fcomp); | ||
459 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
460 | |||
461 | if (status != BFA_STATUS_OK) | ||
462 | return -EIO; | ||
463 | |||
464 | wait_for_completion(&fcomp.comp); | ||
465 | if (fcomp.status != BFA_STATUS_OK) | ||
466 | return -EIO; | ||
467 | |||
468 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
469 | status = bfa_port_enable(&bfad->bfa.modules.port, | ||
470 | bfad_hcb_comp, &fcomp); | ||
471 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
472 | if (status != BFA_STATUS_OK) | ||
473 | return -EIO; | ||
474 | |||
475 | wait_for_completion(&fcomp.comp); | ||
476 | if (fcomp.status != BFA_STATUS_OK) | ||
477 | return -EIO; | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
445 | static int | 482 | static int |
446 | bfad_im_vport_delete(struct fc_vport *fc_vport) | 483 | bfad_im_vport_delete(struct fc_vport *fc_vport) |
447 | { | 484 | { |
@@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) | |||
457 | unsigned long flags; | 494 | unsigned long flags; |
458 | struct completion fcomp; | 495 | struct completion fcomp; |
459 | 496 | ||
460 | if (im_port->flags & BFAD_PORT_DELETE) | 497 | if (im_port->flags & BFAD_PORT_DELETE) { |
461 | goto free_scsi_host; | 498 | bfad_scsi_host_free(bfad, im_port); |
499 | list_del(&vport->list_entry); | ||
500 | return 0; | ||
501 | } | ||
462 | 502 | ||
463 | port = im_port->port; | 503 | port = im_port->port; |
464 | 504 | ||
@@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) | |||
489 | 529 | ||
490 | wait_for_completion(vport->comp_del); | 530 | wait_for_completion(vport->comp_del); |
491 | 531 | ||
492 | free_scsi_host: | ||
493 | bfad_scsi_host_free(bfad, im_port); | 532 | bfad_scsi_host_free(bfad, im_port); |
494 | list_del(&vport->list_entry); | 533 | list_del(&vport->list_entry); |
495 | kfree(vport); | 534 | kfree(vport); |
@@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = { | |||
579 | .show_rport_dev_loss_tmo = 1, | 618 | .show_rport_dev_loss_tmo = 1, |
580 | .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, | 619 | .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, |
581 | .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, | 620 | .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, |
582 | 621 | .issue_fc_host_lip = bfad_im_issue_fc_host_lip, | |
583 | .vport_create = bfad_im_vport_create, | 622 | .vport_create = bfad_im_vport_create, |
584 | .vport_delete = bfad_im_vport_delete, | 623 | .vport_delete = bfad_im_vport_delete, |
585 | .vport_disable = bfad_im_vport_disable, | 624 | .vport_disable = bfad_im_vport_disable, |
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 8005c6c5a080..e1f4b10df42a 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c | |||
@@ -1288,50 +1288,6 @@ out: | |||
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | int | 1290 | int |
1291 | bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd) | ||
1292 | { | ||
1293 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; | ||
1294 | unsigned long flags; | ||
1295 | struct bfad_hal_comp fcomp; | ||
1296 | |||
1297 | init_completion(&fcomp.comp); | ||
1298 | iocmd->status = BFA_STATUS_OK; | ||
1299 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
1300 | iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp); | ||
1301 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
1302 | |||
1303 | if (iocmd->status != BFA_STATUS_OK) | ||
1304 | goto out; | ||
1305 | |||
1306 | wait_for_completion(&fcomp.comp); | ||
1307 | iocmd->status = fcomp.status; | ||
1308 | out: | ||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | int | ||
1313 | bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd) | ||
1314 | { | ||
1315 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; | ||
1316 | unsigned long flags; | ||
1317 | struct bfad_hal_comp fcomp; | ||
1318 | |||
1319 | init_completion(&fcomp.comp); | ||
1320 | iocmd->status = BFA_STATUS_OK; | ||
1321 | spin_lock_irqsave(&bfad->bfad_lock, flags); | ||
1322 | iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); | ||
1323 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | ||
1324 | |||
1325 | if (iocmd->status != BFA_STATUS_OK) | ||
1326 | goto out; | ||
1327 | |||
1328 | wait_for_completion(&fcomp.comp); | ||
1329 | iocmd->status = fcomp.status; | ||
1330 | out: | ||
1331 | return 0; | ||
1332 | } | ||
1333 | |||
1334 | int | ||
1335 | bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) | 1291 | bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) |
1336 | { | 1292 | { |
1337 | struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; | 1293 | struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; |
@@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, | |||
1918 | struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; | 1874 | struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; |
1919 | void *iocmd_bufptr; | 1875 | void *iocmd_bufptr; |
1920 | unsigned long flags; | 1876 | unsigned long flags; |
1877 | u32 offset; | ||
1921 | 1878 | ||
1922 | if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), | 1879 | if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), |
1923 | BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { | 1880 | BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { |
@@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, | |||
1935 | 1892 | ||
1936 | iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); | 1893 | iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); |
1937 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 1894 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
1895 | offset = iocmd->offset; | ||
1938 | iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, | 1896 | iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, |
1939 | (u32 *)&iocmd->offset, &iocmd->bufsz); | 1897 | &offset, &iocmd->bufsz); |
1898 | iocmd->offset = offset; | ||
1940 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 1899 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
1941 | out: | 1900 | out: |
1942 | return 0; | 1901 | return 0; |
@@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, | |||
2633 | case IOCMD_FLASH_DISABLE_OPTROM: | 2592 | case IOCMD_FLASH_DISABLE_OPTROM: |
2634 | rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); | 2593 | rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); |
2635 | break; | 2594 | break; |
2636 | case IOCMD_FAA_ENABLE: | ||
2637 | rc = bfad_iocmd_faa_enable(bfad, iocmd); | ||
2638 | break; | ||
2639 | case IOCMD_FAA_DISABLE: | ||
2640 | rc = bfad_iocmd_faa_disable(bfad, iocmd); | ||
2641 | break; | ||
2642 | case IOCMD_FAA_QUERY: | 2595 | case IOCMD_FAA_QUERY: |
2643 | rc = bfad_iocmd_faa_query(bfad, iocmd); | 2596 | rc = bfad_iocmd_faa_query(bfad, iocmd); |
2644 | break; | 2597 | break; |
@@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job) | |||
2809 | struct bfad_im_port_s *im_port = | 2762 | struct bfad_im_port_s *im_port = |
2810 | (struct bfad_im_port_s *) job->shost->hostdata[0]; | 2763 | (struct bfad_im_port_s *) job->shost->hostdata[0]; |
2811 | struct bfad_s *bfad = im_port->bfad; | 2764 | struct bfad_s *bfad = im_port->bfad; |
2765 | struct request_queue *request_q = job->req->q; | ||
2812 | void *payload_kbuf; | 2766 | void *payload_kbuf; |
2813 | int rc = -EINVAL; | 2767 | int rc = -EINVAL; |
2814 | 2768 | ||
2769 | /* | ||
2770 | * Set the BSG device request_queue size to 256 to support | ||
2771 | * payloads larger than 512*1024K bytes. | ||
2772 | */ | ||
2773 | blk_queue_max_segments(request_q, 256); | ||
2774 | |||
2815 | /* Allocate a temp buffer to hold the passed in user space command */ | 2775 | /* Allocate a temp buffer to hold the passed in user space command */ |
2816 | payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); | 2776 | payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); |
2817 | if (!payload_kbuf) { | 2777 | if (!payload_kbuf) { |
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index e859adb9aa9e..17ad67283130 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h | |||
@@ -83,8 +83,6 @@ enum { | |||
83 | IOCMD_PORT_CFG_MODE, | 83 | IOCMD_PORT_CFG_MODE, |
84 | IOCMD_FLASH_ENABLE_OPTROM, | 84 | IOCMD_FLASH_ENABLE_OPTROM, |
85 | IOCMD_FLASH_DISABLE_OPTROM, | 85 | IOCMD_FLASH_DISABLE_OPTROM, |
86 | IOCMD_FAA_ENABLE, | ||
87 | IOCMD_FAA_DISABLE, | ||
88 | IOCMD_FAA_QUERY, | 86 | IOCMD_FAA_QUERY, |
89 | IOCMD_CEE_GET_ATTR, | 87 | IOCMD_CEE_GET_ATTR, |
90 | IOCMD_CEE_GET_STATS, | 88 | IOCMD_CEE_GET_STATS, |
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index dc5b9d99c450..7f74f1d19124 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h | |||
@@ -56,7 +56,7 @@ | |||
56 | #ifdef BFA_DRIVER_VERSION | 56 | #ifdef BFA_DRIVER_VERSION |
57 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION | 57 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION |
58 | #else | 58 | #else |
59 | #define BFAD_DRIVER_VERSION "3.0.2.2" | 59 | #define BFAD_DRIVER_VERSION "3.0.23.0" |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | #define BFAD_PROTO_NAME FCPI_NAME | 62 | #define BFAD_PROTO_NAME FCPI_NAME |
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 0d9f1fb50db0..d4220e13cafa 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h | |||
@@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs { | |||
28 | BFI_IOCFC_H2I_CFG_REQ = 1, | 28 | BFI_IOCFC_H2I_CFG_REQ = 1, |
29 | BFI_IOCFC_H2I_SET_INTR_REQ = 2, | 29 | BFI_IOCFC_H2I_SET_INTR_REQ = 2, |
30 | BFI_IOCFC_H2I_UPDATEQ_REQ = 3, | 30 | BFI_IOCFC_H2I_UPDATEQ_REQ = 3, |
31 | BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4, | 31 | BFI_IOCFC_H2I_FAA_QUERY_REQ = 4, |
32 | BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5, | 32 | BFI_IOCFC_H2I_ADDR_REQ = 5, |
33 | BFI_IOCFC_H2I_FAA_QUERY_REQ = 6, | ||
34 | }; | 33 | }; |
35 | 34 | ||
36 | enum bfi_iocfc_i2h_msgs { | 35 | enum bfi_iocfc_i2h_msgs { |
37 | BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), | 36 | BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), |
38 | BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), | 37 | BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), |
39 | BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4), | 38 | BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4), |
40 | BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5), | 39 | BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5), |
41 | BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6), | ||
42 | }; | 40 | }; |
43 | 41 | ||
44 | struct bfi_iocfc_cfg_s { | 42 | struct bfi_iocfc_cfg_s { |
@@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s { | |||
184 | struct bfi_mhdr_s mh; /* common msg header */ | 182 | struct bfi_mhdr_s mh; /* common msg header */ |
185 | }; | 183 | }; |
186 | 184 | ||
185 | struct bfi_faa_addr_msg_s { | ||
186 | struct bfi_mhdr_s mh; /* common msg header */ | ||
187 | u8 rsvd[4]; | ||
188 | wwn_t pwwn; /* Fabric acquired PWWN */ | ||
189 | wwn_t nwwn; /* Fabric acquired PWWN */ | ||
190 | }; | ||
191 | |||
187 | /* | 192 | /* |
188 | * BFI_IOCFC_H2I_FAA_QUERY_REQ message | 193 | * BFI_IOCFC_H2I_FAA_QUERY_REQ message |
189 | */ | 194 | */ |
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index d892064b64a8..ed5f159e1867 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h | |||
@@ -335,11 +335,17 @@ enum { | |||
335 | #define __PMM_1T_PNDB_P 0x00000002 | 335 | #define __PMM_1T_PNDB_P 0x00000002 |
336 | #define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c | 336 | #define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c |
337 | #define CT2_WGN_STATUS 0x00014990 | 337 | #define CT2_WGN_STATUS 0x00014990 |
338 | #define __A2T_AHB_LOAD 0x00000800 | ||
338 | #define __WGN_READY 0x00000400 | 339 | #define __WGN_READY 0x00000400 |
339 | #define __GLBL_PF_VF_CFG_RDY 0x00000200 | 340 | #define __GLBL_PF_VF_CFG_RDY 0x00000200 |
341 | #define CT2_NFC_CSR_CLR_REG 0x00027420 | ||
340 | #define CT2_NFC_CSR_SET_REG 0x00027424 | 342 | #define CT2_NFC_CSR_SET_REG 0x00027424 |
341 | #define __HALT_NFC_CONTROLLER 0x00000002 | 343 | #define __HALT_NFC_CONTROLLER 0x00000002 |
342 | #define __NFC_CONTROLLER_HALTED 0x00001000 | 344 | #define __NFC_CONTROLLER_HALTED 0x00001000 |
345 | #define CT2_RSC_GPR15_REG 0x0002765c | ||
346 | #define CT2_CSI_FW_CTL_REG 0x00027080 | ||
347 | #define CT2_CSI_FW_CTL_SET_REG 0x00027088 | ||
348 | #define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000 | ||
343 | 349 | ||
344 | #define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 | 350 | #define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 |
345 | #define __CSI_MAC_RESET 0x00000010 | 351 | #define __CSI_MAC_RESET 0x00000010 |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index abd72a01856d..c1c6a92a0b98 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -439,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
439 | fr->fr_dev = lport; | 439 | fr->fr_dev = lport; |
440 | 440 | ||
441 | bg = &bnx2fc_global; | 441 | bg = &bnx2fc_global; |
442 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 442 | spin_lock(&bg->fcoe_rx_list.lock); |
443 | 443 | ||
444 | __skb_queue_tail(&bg->fcoe_rx_list, skb); | 444 | __skb_queue_tail(&bg->fcoe_rx_list, skb); |
445 | if (bg->fcoe_rx_list.qlen == 1) | 445 | if (bg->fcoe_rx_list.qlen == 1) |
446 | wake_up_process(bg->thread); | 446 | wake_up_process(bg->thread); |
447 | 447 | ||
448 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 448 | spin_unlock(&bg->fcoe_rx_list.lock); |
449 | 449 | ||
450 | return 0; | 450 | return 0; |
451 | err: | 451 | err: |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ae7d15c44e2a..335e85192807 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1436 | goto err; | 1436 | goto err; |
1437 | 1437 | ||
1438 | fps = &per_cpu(fcoe_percpu, cpu); | 1438 | fps = &per_cpu(fcoe_percpu, cpu); |
1439 | spin_lock_bh(&fps->fcoe_rx_list.lock); | 1439 | spin_lock(&fps->fcoe_rx_list.lock); |
1440 | if (unlikely(!fps->thread)) { | 1440 | if (unlikely(!fps->thread)) { |
1441 | /* | 1441 | /* |
1442 | * The targeted CPU is not ready, let's target | 1442 | * The targeted CPU is not ready, let's target |
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1447 | "ready for incoming skb- using first online " | 1447 | "ready for incoming skb- using first online " |
1448 | "CPU.\n"); | 1448 | "CPU.\n"); |
1449 | 1449 | ||
1450 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1450 | spin_unlock(&fps->fcoe_rx_list.lock); |
1451 | cpu = cpumask_first(cpu_online_mask); | 1451 | cpu = cpumask_first(cpu_online_mask); |
1452 | fps = &per_cpu(fcoe_percpu, cpu); | 1452 | fps = &per_cpu(fcoe_percpu, cpu); |
1453 | spin_lock_bh(&fps->fcoe_rx_list.lock); | 1453 | spin_lock(&fps->fcoe_rx_list.lock); |
1454 | if (!fps->thread) { | 1454 | if (!fps->thread) { |
1455 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1455 | spin_unlock(&fps->fcoe_rx_list.lock); |
1456 | goto err; | 1456 | goto err; |
1457 | } | 1457 | } |
1458 | } | 1458 | } |
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, | |||
1463 | * so we're free to queue skbs into it's queue. | 1463 | * so we're free to queue skbs into it's queue. |
1464 | */ | 1464 | */ |
1465 | 1465 | ||
1466 | /* If this is a SCSI-FCP frame, and this is already executing on the | 1466 | /* |
1467 | * correct CPU, and the queue for this CPU is empty, then go ahead | 1467 | * Note: We used to have a set of conditions under which we would |
1468 | * and process the frame directly in the softirq context. | 1468 | * call fcoe_recv_frame directly, rather than queuing to the rx list |
1469 | * This lets us process completions without context switching from the | 1469 | * as it could save a few cycles, but doing so is prohibited, as |
1470 | * NET_RX softirq, to our receive processing thread, and then back to | 1470 | * fcoe_recv_frame has several paths that may sleep, which is forbidden |
1471 | * BLOCK softirq context. | 1471 | * in softirq context. |
1472 | */ | 1472 | */ |
1473 | if (fh->fh_type == FC_TYPE_FCP && | 1473 | __skb_queue_tail(&fps->fcoe_rx_list, skb); |
1474 | cpu == smp_processor_id() && | 1474 | if (fps->thread->state == TASK_INTERRUPTIBLE) |
1475 | skb_queue_empty(&fps->fcoe_rx_list)) { | 1475 | wake_up_process(fps->thread); |
1476 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | 1476 | spin_unlock(&fps->fcoe_rx_list.lock); |
1477 | fcoe_recv_frame(skb); | ||
1478 | } else { | ||
1479 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | ||
1480 | if (fps->fcoe_rx_list.qlen == 1) | ||
1481 | wake_up_process(fps->thread); | ||
1482 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | ||
1483 | } | ||
1484 | 1477 | ||
1485 | return 0; | 1478 | return 0; |
1486 | err: | 1479 | err: |
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg) | |||
1797 | { | 1790 | { |
1798 | struct fcoe_percpu_s *p = arg; | 1791 | struct fcoe_percpu_s *p = arg; |
1799 | struct sk_buff *skb; | 1792 | struct sk_buff *skb; |
1793 | struct sk_buff_head tmp; | ||
1794 | |||
1795 | skb_queue_head_init(&tmp); | ||
1800 | 1796 | ||
1801 | set_user_nice(current, -20); | 1797 | set_user_nice(current, -20); |
1802 | 1798 | ||
1803 | while (!kthread_should_stop()) { | 1799 | while (!kthread_should_stop()) { |
1804 | 1800 | ||
1805 | spin_lock_bh(&p->fcoe_rx_list.lock); | 1801 | spin_lock_bh(&p->fcoe_rx_list.lock); |
1806 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { | 1802 | skb_queue_splice_init(&p->fcoe_rx_list, &tmp); |
1803 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1804 | |||
1805 | while ((skb = __skb_dequeue(&tmp)) != NULL) | ||
1806 | fcoe_recv_frame(skb); | ||
1807 | |||
1808 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1809 | if (!skb_queue_len(&p->fcoe_rx_list)) { | ||
1807 | set_current_state(TASK_INTERRUPTIBLE); | 1810 | set_current_state(TASK_INTERRUPTIBLE); |
1808 | spin_unlock_bh(&p->fcoe_rx_list.lock); | 1811 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1809 | schedule(); | 1812 | schedule(); |
1810 | set_current_state(TASK_RUNNING); | 1813 | set_current_state(TASK_RUNNING); |
1811 | if (kthread_should_stop()) | 1814 | } else |
1812 | return 0; | 1815 | spin_unlock_bh(&p->fcoe_rx_list.lock); |
1813 | spin_lock_bh(&p->fcoe_rx_list.lock); | ||
1814 | } | ||
1815 | spin_unlock_bh(&p->fcoe_rx_list.lock); | ||
1816 | fcoe_recv_frame(skb); | ||
1817 | } | 1816 | } |
1818 | return 0; | 1817 | return 0; |
1819 | } | 1818 | } |
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) | |||
2187 | /* start FIP Discovery and FLOGI */ | 2186 | /* start FIP Discovery and FLOGI */ |
2188 | lport->boot_time = jiffies; | 2187 | lport->boot_time = jiffies; |
2189 | fc_fabric_login(lport); | 2188 | fc_fabric_login(lport); |
2190 | if (!fcoe_link_ok(lport)) | 2189 | if (!fcoe_link_ok(lport)) { |
2190 | rtnl_unlock(); | ||
2191 | fcoe_ctlr_link_up(&fcoe->ctlr); | 2191 | fcoe_ctlr_link_up(&fcoe->ctlr); |
2192 | mutex_unlock(&fcoe_config_mutex); | ||
2193 | return rc; | ||
2194 | } | ||
2192 | 2195 | ||
2193 | out_nodev: | 2196 | out_nodev: |
2194 | rtnl_unlock(); | 2197 | rtnl_unlock(); |
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport) | |||
2261 | static void fcoe_percpu_clean(struct fc_lport *lport) | 2264 | static void fcoe_percpu_clean(struct fc_lport *lport) |
2262 | { | 2265 | { |
2263 | struct fcoe_percpu_s *pp; | 2266 | struct fcoe_percpu_s *pp; |
2264 | struct fcoe_rcv_info *fr; | 2267 | struct sk_buff *skb; |
2265 | struct sk_buff_head *list; | ||
2266 | struct sk_buff *skb, *next; | ||
2267 | struct sk_buff *head; | ||
2268 | unsigned int cpu; | 2268 | unsigned int cpu; |
2269 | 2269 | ||
2270 | for_each_possible_cpu(cpu) { | 2270 | for_each_possible_cpu(cpu) { |
2271 | pp = &per_cpu(fcoe_percpu, cpu); | 2271 | pp = &per_cpu(fcoe_percpu, cpu); |
2272 | spin_lock_bh(&pp->fcoe_rx_list.lock); | ||
2273 | list = &pp->fcoe_rx_list; | ||
2274 | head = list->next; | ||
2275 | for (skb = head; skb != (struct sk_buff *)list; | ||
2276 | skb = next) { | ||
2277 | next = skb->next; | ||
2278 | fr = fcoe_dev_from_skb(skb); | ||
2279 | if (fr->fr_dev == lport) { | ||
2280 | __skb_unlink(skb, list); | ||
2281 | kfree_skb(skb); | ||
2282 | } | ||
2283 | } | ||
2284 | 2272 | ||
2285 | if (!pp->thread || !cpu_online(cpu)) { | 2273 | if (!pp->thread || !cpu_online(cpu)) |
2286 | spin_unlock_bh(&pp->fcoe_rx_list.lock); | ||
2287 | continue; | 2274 | continue; |
2288 | } | ||
2289 | 2275 | ||
2290 | skb = dev_alloc_skb(0); | 2276 | skb = dev_alloc_skb(0); |
2291 | if (!skb) { | 2277 | if (!skb) { |
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport) | |||
2294 | } | 2280 | } |
2295 | skb->destructor = fcoe_percpu_flush_done; | 2281 | skb->destructor = fcoe_percpu_flush_done; |
2296 | 2282 | ||
2283 | spin_lock_bh(&pp->fcoe_rx_list.lock); | ||
2297 | __skb_queue_tail(&pp->fcoe_rx_list, skb); | 2284 | __skb_queue_tail(&pp->fcoe_rx_list, skb); |
2298 | if (pp->fcoe_rx_list.qlen == 1) | 2285 | if (pp->fcoe_rx_list.qlen == 1) |
2299 | wake_up_process(pp->thread); | 2286 | wake_up_process(pp->thread); |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index e7522dcc296e..249a106888d9 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) | |||
242 | printk(KERN_INFO "libfcoe: host%d: FIP selected " | 242 | printk(KERN_INFO "libfcoe: host%d: FIP selected " |
243 | "Fibre-Channel Forwarder MAC %pM\n", | 243 | "Fibre-Channel Forwarder MAC %pM\n", |
244 | fip->lp->host->host_no, sel->fcf_mac); | 244 | fip->lp->host->host_no, sel->fcf_mac); |
245 | memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); | 245 | memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN); |
246 | fip->map_dest = 0; | 246 | fip->map_dest = 0; |
247 | } | 247 | } |
248 | unlock: | 248 | unlock: |
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, | |||
824 | memcpy(fcf->fcf_mac, | 824 | memcpy(fcf->fcf_mac, |
825 | ((struct fip_mac_desc *)desc)->fd_mac, | 825 | ((struct fip_mac_desc *)desc)->fd_mac, |
826 | ETH_ALEN); | 826 | ETH_ALEN); |
827 | memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN); | ||
827 | if (!is_valid_ether_addr(fcf->fcf_mac)) { | 828 | if (!is_valid_ether_addr(fcf->fcf_mac)) { |
828 | LIBFCOE_FIP_DBG(fip, | 829 | LIBFCOE_FIP_DBG(fip, |
829 | "Invalid MAC addr %pM in FIP adv\n", | 830 | "Invalid MAC addr %pM in FIP adv\n", |
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1013 | struct fip_desc *desc; | 1014 | struct fip_desc *desc; |
1014 | struct fip_encaps *els; | 1015 | struct fip_encaps *els; |
1015 | struct fcoe_dev_stats *stats; | 1016 | struct fcoe_dev_stats *stats; |
1017 | struct fcoe_fcf *sel; | ||
1016 | enum fip_desc_type els_dtype = 0; | 1018 | enum fip_desc_type els_dtype = 0; |
1017 | u8 els_op; | 1019 | u8 els_op; |
1018 | u8 sub; | 1020 | u8 sub; |
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1040 | goto drop; | 1042 | goto drop; |
1041 | /* Drop ELS if there are duplicate critical descriptors */ | 1043 | /* Drop ELS if there are duplicate critical descriptors */ |
1042 | if (desc->fip_dtype < 32) { | 1044 | if (desc->fip_dtype < 32) { |
1043 | if (desc_mask & 1U << desc->fip_dtype) { | 1045 | if ((desc->fip_dtype != FIP_DT_MAC) && |
1046 | (desc_mask & 1U << desc->fip_dtype)) { | ||
1044 | LIBFCOE_FIP_DBG(fip, "Duplicate Critical " | 1047 | LIBFCOE_FIP_DBG(fip, "Duplicate Critical " |
1045 | "Descriptors in FIP ELS\n"); | 1048 | "Descriptors in FIP ELS\n"); |
1046 | goto drop; | 1049 | goto drop; |
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1049 | } | 1052 | } |
1050 | switch (desc->fip_dtype) { | 1053 | switch (desc->fip_dtype) { |
1051 | case FIP_DT_MAC: | 1054 | case FIP_DT_MAC: |
1055 | sel = fip->sel_fcf; | ||
1052 | if (desc_cnt == 1) { | 1056 | if (desc_cnt == 1) { |
1053 | LIBFCOE_FIP_DBG(fip, "FIP descriptors " | 1057 | LIBFCOE_FIP_DBG(fip, "FIP descriptors " |
1054 | "received out of order\n"); | 1058 | "received out of order\n"); |
1055 | goto drop; | 1059 | goto drop; |
1056 | } | 1060 | } |
1061 | /* | ||
1062 | * Some switch implementations send two MAC descriptors, | ||
1063 | * with first MAC(granted_mac) being the FPMA, and the | ||
1064 | * second one(fcoe_mac) is used as destination address | ||
1065 | * for sending/receiving FCoE packets. FIP traffic is | ||
1066 | * sent using fip_mac. For regular switches, both | ||
1067 | * fip_mac and fcoe_mac would be the same. | ||
1068 | */ | ||
1069 | if (desc_cnt == 2) | ||
1070 | memcpy(granted_mac, | ||
1071 | ((struct fip_mac_desc *)desc)->fd_mac, | ||
1072 | ETH_ALEN); | ||
1057 | 1073 | ||
1058 | if (dlen != sizeof(struct fip_mac_desc)) | 1074 | if (dlen != sizeof(struct fip_mac_desc)) |
1059 | goto len_err; | 1075 | goto len_err; |
1060 | memcpy(granted_mac, | 1076 | |
1061 | ((struct fip_mac_desc *)desc)->fd_mac, | 1077 | if ((desc_cnt == 3) && (sel)) |
1062 | ETH_ALEN); | 1078 | memcpy(sel->fcoe_mac, |
1079 | ((struct fip_mac_desc *)desc)->fd_mac, | ||
1080 | ETH_ALEN); | ||
1063 | break; | 1081 | break; |
1064 | case FIP_DT_FLOGI: | 1082 | case FIP_DT_FLOGI: |
1065 | case FIP_DT_FDISC: | 1083 | case FIP_DT_FDISC: |
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
1273 | * No Vx_Port description. Clear all NPIV ports, | 1291 | * No Vx_Port description. Clear all NPIV ports, |
1274 | * followed by physical port | 1292 | * followed by physical port |
1275 | */ | 1293 | */ |
1276 | mutex_lock(&lport->lp_mutex); | ||
1277 | list_for_each_entry(vn_port, &lport->vports, list) | ||
1278 | fc_lport_reset(vn_port); | ||
1279 | mutex_unlock(&lport->lp_mutex); | ||
1280 | |||
1281 | mutex_lock(&fip->ctlr_mutex); | 1294 | mutex_lock(&fip->ctlr_mutex); |
1282 | per_cpu_ptr(lport->dev_stats, | 1295 | per_cpu_ptr(lport->dev_stats, |
1283 | get_cpu())->VLinkFailureCount++; | 1296 | get_cpu())->VLinkFailureCount++; |
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, | |||
1285 | fcoe_ctlr_reset(fip); | 1298 | fcoe_ctlr_reset(fip); |
1286 | mutex_unlock(&fip->ctlr_mutex); | 1299 | mutex_unlock(&fip->ctlr_mutex); |
1287 | 1300 | ||
1301 | mutex_lock(&lport->lp_mutex); | ||
1302 | list_for_each_entry(vn_port, &lport->vports, list) | ||
1303 | fc_lport_reset(vn_port); | ||
1304 | mutex_unlock(&lport->lp_mutex); | ||
1305 | |||
1288 | fc_lport_reset(fip->lp); | 1306 | fc_lport_reset(fip->lp); |
1289 | fcoe_ctlr_solicit(fip, NULL); | 1307 | fcoe_ctlr_solicit(fip, NULL); |
1290 | } else { | 1308 | } else { |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index cdfe5a16de2a..e002cd466e9a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock); | |||
104 | static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | 104 | static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { |
105 | { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ | 105 | { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ |
106 | .mailbox = 0x0042C, | 106 | .mailbox = 0x0042C, |
107 | .max_cmds = 100, | ||
107 | .cache_line_size = 0x20, | 108 | .cache_line_size = 0x20, |
109 | .clear_isr = 1, | ||
108 | { | 110 | { |
109 | .set_interrupt_mask_reg = 0x0022C, | 111 | .set_interrupt_mask_reg = 0x0022C, |
110 | .clr_interrupt_mask_reg = 0x00230, | 112 | .clr_interrupt_mask_reg = 0x00230, |
@@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
126 | }, | 128 | }, |
127 | { /* Snipe and Scamp */ | 129 | { /* Snipe and Scamp */ |
128 | .mailbox = 0x0052C, | 130 | .mailbox = 0x0052C, |
131 | .max_cmds = 100, | ||
129 | .cache_line_size = 0x20, | 132 | .cache_line_size = 0x20, |
133 | .clear_isr = 1, | ||
130 | { | 134 | { |
131 | .set_interrupt_mask_reg = 0x00288, | 135 | .set_interrupt_mask_reg = 0x00288, |
132 | .clr_interrupt_mask_reg = 0x0028C, | 136 | .clr_interrupt_mask_reg = 0x0028C, |
@@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
148 | }, | 152 | }, |
149 | { /* CRoC */ | 153 | { /* CRoC */ |
150 | .mailbox = 0x00044, | 154 | .mailbox = 0x00044, |
155 | .max_cmds = 1000, | ||
151 | .cache_line_size = 0x20, | 156 | .cache_line_size = 0x20, |
157 | .clear_isr = 0, | ||
152 | { | 158 | { |
153 | .set_interrupt_mask_reg = 0x00010, | 159 | .set_interrupt_mask_reg = 0x00010, |
154 | .clr_interrupt_mask_reg = 0x00018, | 160 | .clr_interrupt_mask_reg = 0x00018, |
@@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, | |||
847 | 853 | ||
848 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); | 854 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); |
849 | 855 | ||
850 | mb(); | ||
851 | |||
852 | ipr_send_command(ipr_cmd); | 856 | ipr_send_command(ipr_cmd); |
853 | } | 857 | } |
854 | 858 | ||
@@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, | |||
982 | 986 | ||
983 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); | 987 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); |
984 | 988 | ||
985 | mb(); | ||
986 | |||
987 | ipr_send_command(ipr_cmd); | 989 | ipr_send_command(ipr_cmd); |
988 | } else { | 990 | } else { |
989 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); | 991 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); |
@@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) | |||
4339 | 4341 | ||
4340 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | 4342 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { |
4341 | if ((res->bus == starget->channel) && | 4343 | if ((res->bus == starget->channel) && |
4342 | (res->target == starget->id) && | 4344 | (res->target == starget->id)) { |
4343 | (res->lun == 0)) { | ||
4344 | return res; | 4345 | return res; |
4345 | } | 4346 | } |
4346 | } | 4347 | } |
@@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget) | |||
4414 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; | 4415 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; |
4415 | 4416 | ||
4416 | if (ioa_cfg->sis64) { | 4417 | if (ioa_cfg->sis64) { |
4417 | if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) | 4418 | if (!ipr_find_starget(starget)) { |
4418 | clear_bit(starget->id, ioa_cfg->array_ids); | 4419 | if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) |
4419 | else if (starget->channel == IPR_VSET_VIRTUAL_BUS) | 4420 | clear_bit(starget->id, ioa_cfg->array_ids); |
4420 | clear_bit(starget->id, ioa_cfg->vset_ids); | 4421 | else if (starget->channel == IPR_VSET_VIRTUAL_BUS) |
4421 | else if (starget->channel == 0) | 4422 | clear_bit(starget->id, ioa_cfg->vset_ids); |
4422 | clear_bit(starget->id, ioa_cfg->target_ids); | 4423 | else if (starget->channel == 0) |
4424 | clear_bit(starget->id, ioa_cfg->target_ids); | ||
4425 | } | ||
4423 | } | 4426 | } |
4424 | 4427 | ||
4425 | if (sata_port) { | 4428 | if (sata_port) { |
@@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, | |||
5048 | del_timer(&ioa_cfg->reset_cmd->timer); | 5051 | del_timer(&ioa_cfg->reset_cmd->timer); |
5049 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | 5052 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); |
5050 | } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { | 5053 | } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { |
5051 | if (ipr_debug && printk_ratelimit()) | 5054 | if (ioa_cfg->clear_isr) { |
5052 | dev_err(&ioa_cfg->pdev->dev, | 5055 | if (ipr_debug && printk_ratelimit()) |
5053 | "Spurious interrupt detected. 0x%08X\n", int_reg); | 5056 | dev_err(&ioa_cfg->pdev->dev, |
5054 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); | 5057 | "Spurious interrupt detected. 0x%08X\n", int_reg); |
5055 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); | 5058 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); |
5056 | return IRQ_NONE; | 5059 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
5060 | return IRQ_NONE; | ||
5061 | } | ||
5057 | } else { | 5062 | } else { |
5058 | if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) | 5063 | if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) |
5059 | ioa_cfg->ioa_unit_checked = 1; | 5064 | ioa_cfg->ioa_unit_checked = 1; |
@@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5153 | } | 5158 | } |
5154 | } | 5159 | } |
5155 | 5160 | ||
5161 | if (ipr_cmd && !ioa_cfg->clear_isr) | ||
5162 | break; | ||
5163 | |||
5156 | if (ipr_cmd != NULL) { | 5164 | if (ipr_cmd != NULL) { |
5157 | /* Clear the PCI interrupt */ | 5165 | /* Clear the PCI interrupt */ |
5158 | num_hrrq = 0; | 5166 | num_hrrq = 0; |
@@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, | |||
5854 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); | 5862 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); |
5855 | } | 5863 | } |
5856 | 5864 | ||
5857 | if (likely(rc == 0)) { | 5865 | if (unlikely(rc != 0)) { |
5858 | mb(); | 5866 | list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
5859 | ipr_send_command(ipr_cmd); | 5867 | return SCSI_MLQUEUE_HOST_BUSY; |
5860 | } else { | ||
5861 | list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | ||
5862 | return SCSI_MLQUEUE_HOST_BUSY; | ||
5863 | } | 5868 | } |
5864 | 5869 | ||
5870 | ipr_send_command(ipr_cmd); | ||
5865 | return 0; | 5871 | return 0; |
5866 | } | 5872 | } |
5867 | 5873 | ||
@@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
6239 | return AC_ERR_INVALID; | 6245 | return AC_ERR_INVALID; |
6240 | } | 6246 | } |
6241 | 6247 | ||
6242 | mb(); | ||
6243 | |||
6244 | ipr_send_command(ipr_cmd); | 6248 | ipr_send_command(ipr_cmd); |
6245 | 6249 | ||
6246 | return 0; | 6250 | return 0; |
@@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
8277 | if (ioa_cfg->ipr_cmd_pool) | 8281 | if (ioa_cfg->ipr_cmd_pool) |
8278 | pci_pool_destroy (ioa_cfg->ipr_cmd_pool); | 8282 | pci_pool_destroy (ioa_cfg->ipr_cmd_pool); |
8279 | 8283 | ||
8284 | kfree(ioa_cfg->ipr_cmnd_list); | ||
8285 | kfree(ioa_cfg->ipr_cmnd_list_dma); | ||
8286 | ioa_cfg->ipr_cmnd_list = NULL; | ||
8287 | ioa_cfg->ipr_cmnd_list_dma = NULL; | ||
8280 | ioa_cfg->ipr_cmd_pool = NULL; | 8288 | ioa_cfg->ipr_cmd_pool = NULL; |
8281 | } | 8289 | } |
8282 | 8290 | ||
@@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
8352 | int i; | 8360 | int i; |
8353 | 8361 | ||
8354 | ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, | 8362 | ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, |
8355 | sizeof(struct ipr_cmnd), 16, 0); | 8363 | sizeof(struct ipr_cmnd), 512, 0); |
8356 | 8364 | ||
8357 | if (!ioa_cfg->ipr_cmd_pool) | 8365 | if (!ioa_cfg->ipr_cmd_pool) |
8358 | return -ENOMEM; | 8366 | return -ENOMEM; |
8359 | 8367 | ||
8368 | ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); | ||
8369 | ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); | ||
8370 | |||
8371 | if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { | ||
8372 | ipr_free_cmd_blks(ioa_cfg); | ||
8373 | return -ENOMEM; | ||
8374 | } | ||
8375 | |||
8360 | for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { | 8376 | for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { |
8361 | ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); | 8377 | ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); |
8362 | 8378 | ||
@@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
8584 | host->max_channel = IPR_MAX_BUS_TO_SCAN; | 8600 | host->max_channel = IPR_MAX_BUS_TO_SCAN; |
8585 | host->unique_id = host->host_no; | 8601 | host->unique_id = host->host_no; |
8586 | host->max_cmd_len = IPR_MAX_CDB_LEN; | 8602 | host->max_cmd_len = IPR_MAX_CDB_LEN; |
8603 | host->can_queue = ioa_cfg->max_cmds; | ||
8587 | pci_set_drvdata(pdev, ioa_cfg); | 8604 | pci_set_drvdata(pdev, ioa_cfg); |
8588 | 8605 | ||
8589 | p = &ioa_cfg->chip_cfg->regs; | 8606 | p = &ioa_cfg->chip_cfg->regs; |
@@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
8768 | /* set SIS 32 or SIS 64 */ | 8785 | /* set SIS 32 or SIS 64 */ |
8769 | ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; | 8786 | ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; |
8770 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; | 8787 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; |
8788 | ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; | ||
8789 | ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; | ||
8771 | 8790 | ||
8772 | if (ipr_transop_timeout) | 8791 | if (ipr_transop_timeout) |
8773 | ioa_cfg->transop_timeout = ipr_transop_timeout; | 8792 | ioa_cfg->transop_timeout = ipr_transop_timeout; |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index f94eaee2ff16..153b8bd91d1e 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -38,8 +38,8 @@ | |||
38 | /* | 38 | /* |
39 | * Literals | 39 | * Literals |
40 | */ | 40 | */ |
41 | #define IPR_DRIVER_VERSION "2.5.2" | 41 | #define IPR_DRIVER_VERSION "2.5.3" |
42 | #define IPR_DRIVER_DATE "(April 27, 2011)" | 42 | #define IPR_DRIVER_DATE "(March 10, 2012)" |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding | 45 | * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding |
@@ -53,7 +53,7 @@ | |||
53 | * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of | 53 | * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of |
54 | * ops the mid-layer can send to the adapter. | 54 | * ops the mid-layer can send to the adapter. |
55 | */ | 55 | */ |
56 | #define IPR_NUM_BASE_CMD_BLKS 100 | 56 | #define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds) |
57 | 57 | ||
58 | #define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 | 58 | #define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 |
59 | 59 | ||
@@ -153,7 +153,7 @@ | |||
153 | #define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ | 153 | #define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ |
154 | ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) | 154 | ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) |
155 | 155 | ||
156 | #define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS | 156 | #define IPR_MAX_COMMANDS 100 |
157 | #define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ | 157 | #define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ |
158 | IPR_NUM_INTERNAL_CMD_BLKS) | 158 | IPR_NUM_INTERNAL_CMD_BLKS) |
159 | 159 | ||
@@ -1305,7 +1305,9 @@ struct ipr_interrupts { | |||
1305 | 1305 | ||
1306 | struct ipr_chip_cfg_t { | 1306 | struct ipr_chip_cfg_t { |
1307 | u32 mailbox; | 1307 | u32 mailbox; |
1308 | u16 max_cmds; | ||
1308 | u8 cache_line_size; | 1309 | u8 cache_line_size; |
1310 | u8 clear_isr; | ||
1309 | struct ipr_interrupt_offsets regs; | 1311 | struct ipr_interrupt_offsets regs; |
1310 | }; | 1312 | }; |
1311 | 1313 | ||
@@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg { | |||
1388 | u8 sis64:1; | 1390 | u8 sis64:1; |
1389 | u8 dump_timeout:1; | 1391 | u8 dump_timeout:1; |
1390 | u8 cfg_locked:1; | 1392 | u8 cfg_locked:1; |
1393 | u8 clear_isr:1; | ||
1391 | 1394 | ||
1392 | u8 revid; | 1395 | u8 revid; |
1393 | 1396 | ||
@@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg { | |||
1501 | struct ata_host ata_host; | 1504 | struct ata_host ata_host; |
1502 | char ipr_cmd_label[8]; | 1505 | char ipr_cmd_label[8]; |
1503 | #define IPR_CMD_LABEL "ipr_cmd" | 1506 | #define IPR_CMD_LABEL "ipr_cmd" |
1504 | struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; | 1507 | u32 max_cmds; |
1505 | dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; | 1508 | struct ipr_cmnd **ipr_cmnd_list; |
1509 | dma_addr_t *ipr_cmnd_list_dma; | ||
1506 | }; /* struct ipr_ioa_cfg */ | 1510 | }; /* struct ipr_ioa_cfg */ |
1507 | 1511 | ||
1508 | struct ipr_cmnd { | 1512 | struct ipr_cmnd { |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 630291f01826..aceffadb21c7 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -2263,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, | |||
2263 | mp->class = class; | 2263 | mp->class = class; |
2264 | /* adjust em exch xid range for offload */ | 2264 | /* adjust em exch xid range for offload */ |
2265 | mp->min_xid = min_xid; | 2265 | mp->min_xid = min_xid; |
2266 | mp->max_xid = max_xid; | 2266 | |
2267 | /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */ | ||
2268 | pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) / | ||
2269 | sizeof(struct fc_exch *); | ||
2270 | if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) { | ||
2271 | mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) + | ||
2272 | min_xid - 1; | ||
2273 | } else { | ||
2274 | mp->max_xid = max_xid; | ||
2275 | pool_exch_range = (mp->max_xid - mp->min_xid + 1) / | ||
2276 | (fc_cpu_mask + 1); | ||
2277 | } | ||
2267 | 2278 | ||
2268 | mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); | 2279 | mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); |
2269 | if (!mp->ep_pool) | 2280 | if (!mp->ep_pool) |
@@ -2274,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, | |||
2274 | * divided across all cpus. The exch pointers array memory is | 2285 | * divided across all cpus. The exch pointers array memory is |
2275 | * allocated for exch range per pool. | 2286 | * allocated for exch range per pool. |
2276 | */ | 2287 | */ |
2277 | pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1); | ||
2278 | mp->pool_max_index = pool_exch_range - 1; | 2288 | mp->pool_max_index = pool_exch_range - 1; |
2279 | 2289 | ||
2280 | /* | 2290 | /* |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd5d31d022d9..ef9560dff295 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -1743,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | 1743 | mfs = ntohs(flp->fl_csp.sp_bb_data) & |
1744 | FC_SP_BB_DATA_MASK; | 1744 | FC_SP_BB_DATA_MASK; |
1745 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && | 1745 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && |
1746 | mfs < lport->mfs) | 1746 | mfs <= lport->mfs) { |
1747 | lport->mfs = mfs; | 1747 | lport->mfs = mfs; |
1748 | fc_host_maxframe_size(lport->host) = mfs; | ||
1749 | } else { | ||
1750 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " | ||
1751 | "lport->mfs:%hu\n", mfs, lport->mfs); | ||
1752 | fc_lport_error(lport, fp); | ||
1753 | goto err; | ||
1754 | } | ||
1755 | |||
1748 | csp_flags = ntohs(flp->fl_csp.sp_features); | 1756 | csp_flags = ntohs(flp->fl_csp.sp_features); |
1749 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); | 1757 | r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); |
1750 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); | 1758 | e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); |
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 88928f00aa2d..fe5d396aca73 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | #/******************************************************************* | 1 | #/******************************************************************* |
2 | # * This file is part of the Emulex Linux Device Driver for * | 2 | # * This file is part of the Emulex Linux Device Driver for * |
3 | # * Fibre Channel Host Bus Adapters. * | 3 | # * Fibre Channel Host Bus Adapters. * |
4 | # * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | # * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | # * EMULEX and SLI are trademarks of Emulex. * | 5 | # * EMULEX and SLI are trademarks of Emulex. * |
6 | # * www.emulex.com * | 6 | # * www.emulex.com * |
7 | # * * | 7 | # * * |
@@ -22,6 +22,8 @@ | |||
22 | ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage | 22 | ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage |
23 | ccflags-$(GCOV) += -O0 | 23 | ccflags-$(GCOV) += -O0 |
24 | 24 | ||
25 | ccflags-y += -Werror | ||
26 | |||
25 | obj-$(CONFIG_SCSI_LPFC) := lpfc.o | 27 | obj-$(CONFIG_SCSI_LPFC) := lpfc.o |
26 | 28 | ||
27 | lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ | 29 | lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 5fc044ff656e..3a1ffdd6d831 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -840,6 +840,8 @@ struct lpfc_hba { | |||
840 | struct dentry *debug_dumpData; /* BlockGuard BPL */ | 840 | struct dentry *debug_dumpData; /* BlockGuard BPL */ |
841 | struct dentry *debug_dumpDif; /* BlockGuard BPL */ | 841 | struct dentry *debug_dumpDif; /* BlockGuard BPL */ |
842 | struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ | 842 | struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ |
843 | struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ | ||
844 | struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ | ||
843 | struct dentry *debug_writeGuard; /* inject write guard_tag errors */ | 845 | struct dentry *debug_writeGuard; /* inject write guard_tag errors */ |
844 | struct dentry *debug_writeApp; /* inject write app_tag errors */ | 846 | struct dentry *debug_writeApp; /* inject write app_tag errors */ |
845 | struct dentry *debug_writeRef; /* inject write ref_tag errors */ | 847 | struct dentry *debug_writeRef; /* inject write ref_tag errors */ |
@@ -854,6 +856,8 @@ struct lpfc_hba { | |||
854 | uint32_t lpfc_injerr_rgrd_cnt; | 856 | uint32_t lpfc_injerr_rgrd_cnt; |
855 | uint32_t lpfc_injerr_rapp_cnt; | 857 | uint32_t lpfc_injerr_rapp_cnt; |
856 | uint32_t lpfc_injerr_rref_cnt; | 858 | uint32_t lpfc_injerr_rref_cnt; |
859 | uint32_t lpfc_injerr_nportid; | ||
860 | struct lpfc_name lpfc_injerr_wwpn; | ||
857 | sector_t lpfc_injerr_lba; | 861 | sector_t lpfc_injerr_lba; |
858 | #define LPFC_INJERR_LBA_OFF (sector_t)(-1) | 862 | #define LPFC_INJERR_LBA_OFF (sector_t)(-1) |
859 | 863 | ||
@@ -908,6 +912,8 @@ struct lpfc_hba { | |||
908 | atomic_t fast_event_count; | 912 | atomic_t fast_event_count; |
909 | uint32_t fcoe_eventtag; | 913 | uint32_t fcoe_eventtag; |
910 | uint32_t fcoe_eventtag_at_fcf_scan; | 914 | uint32_t fcoe_eventtag_at_fcf_scan; |
915 | uint32_t fcoe_cvl_eventtag; | ||
916 | uint32_t fcoe_cvl_eventtag_attn; | ||
911 | struct lpfc_fcf fcf; | 917 | struct lpfc_fcf fcf; |
912 | uint8_t fc_map[3]; | 918 | uint8_t fc_map[3]; |
913 | uint8_t valid_vlan; | 919 | uint8_t valid_vlan; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 296ad5bc4240..5eb2bc116183 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -2575,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, | |||
2575 | # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters | 2575 | # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters |
2576 | # objects that have been registered with the nameserver after login. | 2576 | # objects that have been registered with the nameserver after login. |
2577 | */ | 2577 | */ |
2578 | LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, | 2578 | LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, |
2579 | "Deregister nameserver objects before LOGO"); | 2579 | "Deregister nameserver objects before LOGO"); |
2580 | 2580 | ||
2581 | /* | 2581 | /* |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 22e17be04d8a..5bdf2eecb178 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2007-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2007-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -1010,25 +1010,35 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, | |||
1010 | { | 1010 | { |
1011 | struct dentry *dent = file->f_dentry; | 1011 | struct dentry *dent = file->f_dentry; |
1012 | struct lpfc_hba *phba = file->private_data; | 1012 | struct lpfc_hba *phba = file->private_data; |
1013 | char cbuf[16]; | 1013 | char cbuf[32]; |
1014 | uint64_t tmp = 0; | ||
1014 | int cnt = 0; | 1015 | int cnt = 0; |
1015 | 1016 | ||
1016 | if (dent == phba->debug_writeGuard) | 1017 | if (dent == phba->debug_writeGuard) |
1017 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt); | 1018 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); |
1018 | else if (dent == phba->debug_writeApp) | 1019 | else if (dent == phba->debug_writeApp) |
1019 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); | 1020 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); |
1020 | else if (dent == phba->debug_writeRef) | 1021 | else if (dent == phba->debug_writeRef) |
1021 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); | 1022 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); |
1022 | else if (dent == phba->debug_readGuard) | 1023 | else if (dent == phba->debug_readGuard) |
1023 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt); | 1024 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); |
1024 | else if (dent == phba->debug_readApp) | 1025 | else if (dent == phba->debug_readApp) |
1025 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); | 1026 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); |
1026 | else if (dent == phba->debug_readRef) | 1027 | else if (dent == phba->debug_readRef) |
1027 | cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt); | 1028 | cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); |
1028 | else if (dent == phba->debug_InjErrLBA) | 1029 | else if (dent == phba->debug_InjErrNPortID) |
1029 | cnt = snprintf(cbuf, 16, "0x%lx\n", | 1030 | cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid); |
1030 | (unsigned long) phba->lpfc_injerr_lba); | 1031 | else if (dent == phba->debug_InjErrWWPN) { |
1031 | else | 1032 | memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); |
1033 | tmp = cpu_to_be64(tmp); | ||
1034 | cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp); | ||
1035 | } else if (dent == phba->debug_InjErrLBA) { | ||
1036 | if (phba->lpfc_injerr_lba == (sector_t)(-1)) | ||
1037 | cnt = snprintf(cbuf, 32, "off\n"); | ||
1038 | else | ||
1039 | cnt = snprintf(cbuf, 32, "0x%llx\n", | ||
1040 | (uint64_t) phba->lpfc_injerr_lba); | ||
1041 | } else | ||
1032 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 1042 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
1033 | "0547 Unknown debugfs error injection entry\n"); | 1043 | "0547 Unknown debugfs error injection entry\n"); |
1034 | 1044 | ||
@@ -1042,7 +1052,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, | |||
1042 | struct dentry *dent = file->f_dentry; | 1052 | struct dentry *dent = file->f_dentry; |
1043 | struct lpfc_hba *phba = file->private_data; | 1053 | struct lpfc_hba *phba = file->private_data; |
1044 | char dstbuf[32]; | 1054 | char dstbuf[32]; |
1045 | unsigned long tmp; | 1055 | uint64_t tmp = 0; |
1046 | int size; | 1056 | int size; |
1047 | 1057 | ||
1048 | memset(dstbuf, 0, 32); | 1058 | memset(dstbuf, 0, 32); |
@@ -1050,7 +1060,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, | |||
1050 | if (copy_from_user(dstbuf, buf, size)) | 1060 | if (copy_from_user(dstbuf, buf, size)) |
1051 | return 0; | 1061 | return 0; |
1052 | 1062 | ||
1053 | if (strict_strtoul(dstbuf, 0, &tmp)) | 1063 | if (dent == phba->debug_InjErrLBA) { |
1064 | if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) | ||
1065 | tmp = (uint64_t)(-1); | ||
1066 | } | ||
1067 | |||
1068 | if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) | ||
1054 | return 0; | 1069 | return 0; |
1055 | 1070 | ||
1056 | if (dent == phba->debug_writeGuard) | 1071 | if (dent == phba->debug_writeGuard) |
@@ -1067,7 +1082,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, | |||
1067 | phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; | 1082 | phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; |
1068 | else if (dent == phba->debug_InjErrLBA) | 1083 | else if (dent == phba->debug_InjErrLBA) |
1069 | phba->lpfc_injerr_lba = (sector_t)tmp; | 1084 | phba->lpfc_injerr_lba = (sector_t)tmp; |
1070 | else | 1085 | else if (dent == phba->debug_InjErrNPortID) |
1086 | phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID); | ||
1087 | else if (dent == phba->debug_InjErrWWPN) { | ||
1088 | tmp = cpu_to_be64(tmp); | ||
1089 | memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name)); | ||
1090 | } else | ||
1071 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 1091 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
1072 | "0548 Unknown debugfs error injection entry\n"); | 1092 | "0548 Unknown debugfs error injection entry\n"); |
1073 | 1093 | ||
@@ -3949,6 +3969,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) | |||
3949 | } | 3969 | } |
3950 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 3970 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; |
3951 | 3971 | ||
3972 | snprintf(name, sizeof(name), "InjErrNPortID"); | ||
3973 | phba->debug_InjErrNPortID = | ||
3974 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, | ||
3975 | phba->hba_debugfs_root, | ||
3976 | phba, &lpfc_debugfs_op_dif_err); | ||
3977 | if (!phba->debug_InjErrNPortID) { | ||
3978 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | ||
3979 | "0809 Cannot create debugfs InjErrNPortID\n"); | ||
3980 | goto debug_failed; | ||
3981 | } | ||
3982 | |||
3983 | snprintf(name, sizeof(name), "InjErrWWPN"); | ||
3984 | phba->debug_InjErrWWPN = | ||
3985 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, | ||
3986 | phba->hba_debugfs_root, | ||
3987 | phba, &lpfc_debugfs_op_dif_err); | ||
3988 | if (!phba->debug_InjErrWWPN) { | ||
3989 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, | ||
3990 | "0810 Cannot create debugfs InjErrWWPN\n"); | ||
3991 | goto debug_failed; | ||
3992 | } | ||
3993 | |||
3952 | snprintf(name, sizeof(name), "writeGuardInjErr"); | 3994 | snprintf(name, sizeof(name), "writeGuardInjErr"); |
3953 | phba->debug_writeGuard = | 3995 | phba->debug_writeGuard = |
3954 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, | 3996 | debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, |
@@ -4321,6 +4363,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) | |||
4321 | debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ | 4363 | debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ |
4322 | phba->debug_InjErrLBA = NULL; | 4364 | phba->debug_InjErrLBA = NULL; |
4323 | } | 4365 | } |
4366 | if (phba->debug_InjErrNPortID) { /* InjErrNPortID */ | ||
4367 | debugfs_remove(phba->debug_InjErrNPortID); | ||
4368 | phba->debug_InjErrNPortID = NULL; | ||
4369 | } | ||
4370 | if (phba->debug_InjErrWWPN) { | ||
4371 | debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */ | ||
4372 | phba->debug_InjErrWWPN = NULL; | ||
4373 | } | ||
4324 | if (phba->debug_writeGuard) { | 4374 | if (phba->debug_writeGuard) { |
4325 | debugfs_remove(phba->debug_writeGuard); /* writeGuard */ | 4375 | debugfs_remove(phba->debug_writeGuard); /* writeGuard */ |
4326 | phba->debug_writeGuard = NULL; | 4376 | phba->debug_writeGuard = NULL; |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8db2fb3b45ec..3407b39e0a3f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
925 | * due to new FCF discovery | 925 | * due to new FCF discovery |
926 | */ | 926 | */ |
927 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && | 927 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && |
928 | (phba->fcf.fcf_flag & FCF_DISCOVERY) && | 928 | (phba->fcf.fcf_flag & FCF_DISCOVERY)) { |
929 | !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | 929 | if (phba->link_state < LPFC_LINK_UP) |
930 | (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { | 930 | goto stop_rr_fcf_flogi; |
931 | if ((phba->fcoe_cvl_eventtag_attn == | ||
932 | phba->fcoe_cvl_eventtag) && | ||
933 | (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | ||
934 | (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)) | ||
935 | goto stop_rr_fcf_flogi; | ||
936 | else | ||
937 | phba->fcoe_cvl_eventtag_attn = | ||
938 | phba->fcoe_cvl_eventtag; | ||
931 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, | 939 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
932 | "2611 FLOGI failed on FCF (x%x), " | 940 | "2611 FLOGI failed on FCF (x%x), " |
933 | "status:x%x/x%x, tmo:x%x, perform " | 941 | "status:x%x/x%x, tmo:x%x, perform " |
@@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
943 | goto out; | 951 | goto out; |
944 | } | 952 | } |
945 | 953 | ||
954 | stop_rr_fcf_flogi: | ||
946 | /* FLOGI failure */ | 955 | /* FLOGI failure */ |
947 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 956 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
948 | "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", | 957 | "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 343d87ba4df8..b507536dc5b5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2843 | struct lpfc_vport *vport = mboxq->vport; | 2843 | struct lpfc_vport *vport = mboxq->vport; |
2844 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 2844 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
2845 | 2845 | ||
2846 | if (mboxq->u.mb.mbxStatus) { | 2846 | /* |
2847 | * VFI not supported for interface type 0, so ignore any mailbox | ||
2848 | * error (except VFI in use) and continue with the discovery. | ||
2849 | */ | ||
2850 | if (mboxq->u.mb.mbxStatus && | ||
2851 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != | ||
2852 | LPFC_SLI_INTF_IF_TYPE_0) && | ||
2853 | mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { | ||
2847 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, | 2854 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, |
2848 | "2018 REG_VFI mbxStatus error x%x " | 2855 | "2018 REG_VFI mbxStatus error x%x " |
2849 | "HBA state x%x\n", | 2856 | "HBA state x%x\n", |
@@ -5673,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) | |||
5673 | ret = 1; | 5680 | ret = 1; |
5674 | spin_unlock_irq(shost->host_lock); | 5681 | spin_unlock_irq(shost->host_lock); |
5675 | goto out; | 5682 | goto out; |
5676 | } else { | 5683 | } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
5684 | ret = 1; | ||
5677 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 5685 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
5678 | "2624 RPI %x DID %x flg %x still " | 5686 | "2624 RPI %x DID %x flag %x " |
5679 | "logged in\n", | 5687 | "still logged in\n", |
5680 | ndlp->nlp_rpi, ndlp->nlp_DID, | 5688 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5681 | ndlp->nlp_flag); | 5689 | ndlp->nlp_flag); |
5682 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) | ||
5683 | ret = 1; | ||
5684 | } | 5690 | } |
5685 | } | 5691 | } |
5686 | spin_unlock_irq(shost->host_lock); | 5692 | spin_unlock_irq(shost->host_lock); |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 9e2b9b227e1a..91f09761bd32 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -338,6 +338,12 @@ struct lpfc_cqe { | |||
338 | #define CQE_CODE_XRI_ABORTED 0x5 | 338 | #define CQE_CODE_XRI_ABORTED 0x5 |
339 | #define CQE_CODE_RECEIVE_V1 0x9 | 339 | #define CQE_CODE_RECEIVE_V1 0x9 |
340 | 340 | ||
341 | /* | ||
342 | * Define mask value for xri_aborted and wcqe completed CQE extended status. | ||
343 | * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . | ||
344 | */ | ||
345 | #define WCQE_PARAM_MASK 0x1FF; | ||
346 | |||
341 | /* completion queue entry for wqe completions */ | 347 | /* completion queue entry for wqe completions */ |
342 | struct lpfc_wcqe_complete { | 348 | struct lpfc_wcqe_complete { |
343 | uint32_t word0; | 349 | uint32_t word0; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b38f99f3be32..9598fdcb08ab 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -2704,16 +2704,14 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
2704 | } | 2704 | } |
2705 | spin_lock_irq(shost->host_lock); | 2705 | spin_lock_irq(shost->host_lock); |
2706 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 2706 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
2707 | 2707 | spin_unlock_irq(shost->host_lock); | |
2708 | /* | 2708 | /* |
2709 | * Whenever an SLI4 port goes offline, free the | 2709 | * Whenever an SLI4 port goes offline, free the |
2710 | * RPI. A new RPI when the adapter port comes | 2710 | * RPI. Get a new RPI when the adapter port |
2711 | * back online. | 2711 | * comes back online. |
2712 | */ | 2712 | */ |
2713 | if (phba->sli_rev == LPFC_SLI_REV4) | 2713 | if (phba->sli_rev == LPFC_SLI_REV4) |
2714 | lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); | 2714 | lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); |
2715 | |||
2716 | spin_unlock_irq(shost->host_lock); | ||
2717 | lpfc_unreg_rpi(vports[i], ndlp); | 2715 | lpfc_unreg_rpi(vports[i], ndlp); |
2718 | } | 2716 | } |
2719 | } | 2717 | } |
@@ -2786,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba) | |||
2786 | 2784 | ||
2787 | spin_lock_irq(&phba->hbalock); | 2785 | spin_lock_irq(&phba->hbalock); |
2788 | spin_lock(&phba->scsi_buf_list_lock); | 2786 | spin_lock(&phba->scsi_buf_list_lock); |
2789 | list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) | 2787 | list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { |
2790 | sb->cur_iocbq.sli4_xritag = | 2788 | sb->cur_iocbq.sli4_xritag = |
2791 | phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; | 2789 | phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; |
2790 | set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask); | ||
2791 | phba->sli4_hba.max_cfg_param.xri_used++; | ||
2792 | phba->sli4_hba.xri_count++; | ||
2793 | } | ||
2792 | spin_unlock(&phba->scsi_buf_list_lock); | 2794 | spin_unlock(&phba->scsi_buf_list_lock); |
2793 | spin_unlock_irq(&phba->hbalock); | 2795 | spin_unlock_irq(&phba->hbalock); |
2794 | return 0; | 2796 | return 0; |
@@ -3723,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, | |||
3723 | break; | 3725 | break; |
3724 | 3726 | ||
3725 | case LPFC_FIP_EVENT_TYPE_FCF_DEAD: | 3727 | case LPFC_FIP_EVENT_TYPE_FCF_DEAD: |
3728 | phba->fcoe_cvl_eventtag = acqe_fip->event_tag; | ||
3726 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3729 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3727 | "2549 FCF (x%x) disconnected from network, " | 3730 | "2549 FCF (x%x) disconnected from network, " |
3728 | "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); | 3731 | "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); |
@@ -3784,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, | |||
3784 | } | 3787 | } |
3785 | break; | 3788 | break; |
3786 | case LPFC_FIP_EVENT_TYPE_CVL: | 3789 | case LPFC_FIP_EVENT_TYPE_CVL: |
3790 | phba->fcoe_cvl_eventtag = acqe_fip->event_tag; | ||
3787 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3791 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3788 | "2718 Clear Virtual Link Received for VPI 0x%x" | 3792 | "2718 Clear Virtual Link Received for VPI 0x%x" |
3789 | " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); | 3793 | " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); |
@@ -5226,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
5226 | * rpi is normalized to a zero base because the physical rpi is | 5230 | * rpi is normalized to a zero base because the physical rpi is |
5227 | * port based. | 5231 | * port based. |
5228 | */ | 5232 | */ |
5229 | curr_rpi_range = phba->sli4_hba.next_rpi - | 5233 | curr_rpi_range = phba->sli4_hba.next_rpi; |
5230 | phba->sli4_hba.max_cfg_param.rpi_base; | ||
5231 | spin_unlock_irq(&phba->hbalock); | 5234 | spin_unlock_irq(&phba->hbalock); |
5232 | 5235 | ||
5233 | /* | 5236 | /* |
@@ -5818,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5818 | readl(phba->sli4_hba.u.if_type2. | 5821 | readl(phba->sli4_hba.u.if_type2. |
5819 | ERR2regaddr); | 5822 | ERR2regaddr); |
5820 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5823 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5821 | "2888 Port Error Detected " | 5824 | "2888 Unrecoverable port error " |
5822 | "during POST: " | 5825 | "following POST: port status reg " |
5823 | "port status reg 0x%x, " | 5826 | "0x%x, port_smphr reg 0x%x, " |
5824 | "port_smphr reg 0x%x, " | ||
5825 | "error 1=0x%x, error 2=0x%x\n", | 5827 | "error 1=0x%x, error 2=0x%x\n", |
5826 | reg_data.word0, | 5828 | reg_data.word0, |
5827 | portsmphr_reg.word0, | 5829 | portsmphr_reg.word0, |
@@ -6142,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
6142 | phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; | 6144 | phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; |
6143 | phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; | 6145 | phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; |
6144 | phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; | 6146 | phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; |
6145 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; | ||
6146 | phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? | 6147 | phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? |
6147 | (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; | 6148 | (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; |
6148 | phba->max_vports = phba->max_vpi; | 6149 | phba->max_vports = phba->max_vpi; |
@@ -7231,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
7231 | uint32_t rdy_chk, num_resets = 0, reset_again = 0; | 7232 | uint32_t rdy_chk, num_resets = 0, reset_again = 0; |
7232 | union lpfc_sli4_cfg_shdr *shdr; | 7233 | union lpfc_sli4_cfg_shdr *shdr; |
7233 | struct lpfc_register reg_data; | 7234 | struct lpfc_register reg_data; |
7235 | uint16_t devid; | ||
7234 | 7236 | ||
7235 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); | 7237 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
7236 | switch (if_type) { | 7238 | switch (if_type) { |
@@ -7277,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
7277 | LPFC_SLIPORT_INIT_PORT); | 7279 | LPFC_SLIPORT_INIT_PORT); |
7278 | writel(reg_data.word0, phba->sli4_hba.u.if_type2. | 7280 | writel(reg_data.word0, phba->sli4_hba.u.if_type2. |
7279 | CTRLregaddr); | 7281 | CTRLregaddr); |
7280 | 7282 | /* flush */ | |
7283 | pci_read_config_word(phba->pcidev, | ||
7284 | PCI_DEVICE_ID, &devid); | ||
7281 | /* | 7285 | /* |
7282 | * Poll the Port Status Register and wait for RDY for | 7286 | * Poll the Port Status Register and wait for RDY for |
7283 | * up to 10 seconds. If the port doesn't respond, treat | 7287 | * up to 10 seconds. If the port doesn't respond, treat |
@@ -7315,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
7315 | phba->work_status[1] = readl( | 7319 | phba->work_status[1] = readl( |
7316 | phba->sli4_hba.u.if_type2.ERR2regaddr); | 7320 | phba->sli4_hba.u.if_type2.ERR2regaddr); |
7317 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7321 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
7318 | "2890 Port Error Detected " | 7322 | "2890 Port error detected during port " |
7319 | "during Port Reset: " | 7323 | "reset(%d): port status reg 0x%x, " |
7320 | "port status reg 0x%x, " | ||
7321 | "error 1=0x%x, error 2=0x%x\n", | 7324 | "error 1=0x%x, error 2=0x%x\n", |
7322 | reg_data.word0, | 7325 | num_resets, reg_data.word0, |
7323 | phba->work_status[0], | 7326 | phba->work_status[0], |
7324 | phba->work_status[1]); | 7327 | phba->work_status[1]); |
7325 | rc = -ENODEV; | 7328 | rc = -ENODEV; |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 7b6b2aa5795a..15ca2a9a0cdd 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -440,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
440 | spin_unlock_irq(shost->host_lock); | 440 | spin_unlock_irq(shost->host_lock); |
441 | stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; | 441 | stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; |
442 | stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; | 442 | stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; |
443 | lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, | 443 | rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, |
444 | ndlp, mbox); | 444 | ndlp, mbox); |
445 | if (rc) | ||
446 | mempool_free(mbox, phba->mbox_mem_pool); | ||
445 | return 1; | 447 | return 1; |
446 | } | 448 | } |
447 | lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); | 449 | rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); |
450 | if (rc) | ||
451 | mempool_free(mbox, phba->mbox_mem_pool); | ||
448 | return 1; | 452 | return 1; |
449 | out: | 453 | out: |
450 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; | 454 | stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index efc055b6bac4..88f3a83dbd2e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -39,8 +39,8 @@ | |||
39 | #include "lpfc_sli4.h" | 39 | #include "lpfc_sli4.h" |
40 | #include "lpfc_nl.h" | 40 | #include "lpfc_nl.h" |
41 | #include "lpfc_disc.h" | 41 | #include "lpfc_disc.h" |
42 | #include "lpfc_scsi.h" | ||
43 | #include "lpfc.h" | 42 | #include "lpfc.h" |
43 | #include "lpfc_scsi.h" | ||
44 | #include "lpfc_logmsg.h" | 44 | #include "lpfc_logmsg.h" |
45 | #include "lpfc_crtn.h" | 45 | #include "lpfc_crtn.h" |
46 | #include "lpfc_vport.h" | 46 | #include "lpfc_vport.h" |
@@ -51,13 +51,19 @@ | |||
51 | int _dump_buf_done; | 51 | int _dump_buf_done; |
52 | 52 | ||
53 | static char *dif_op_str[] = { | 53 | static char *dif_op_str[] = { |
54 | "SCSI_PROT_NORMAL", | 54 | "PROT_NORMAL", |
55 | "SCSI_PROT_READ_INSERT", | 55 | "PROT_READ_INSERT", |
56 | "SCSI_PROT_WRITE_STRIP", | 56 | "PROT_WRITE_STRIP", |
57 | "SCSI_PROT_READ_STRIP", | 57 | "PROT_READ_STRIP", |
58 | "SCSI_PROT_WRITE_INSERT", | 58 | "PROT_WRITE_INSERT", |
59 | "SCSI_PROT_READ_PASS", | 59 | "PROT_READ_PASS", |
60 | "SCSI_PROT_WRITE_PASS", | 60 | "PROT_WRITE_PASS", |
61 | }; | ||
62 | |||
63 | static char *dif_grd_str[] = { | ||
64 | "NO_GUARD", | ||
65 | "DIF_CRC", | ||
66 | "DIX_IP", | ||
61 | }; | 67 | }; |
62 | 68 | ||
63 | struct scsi_dif_tuple { | 69 | struct scsi_dif_tuple { |
@@ -1281,10 +1287,14 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) | |||
1281 | 1287 | ||
1282 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1288 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1283 | 1289 | ||
1284 | #define BG_ERR_INIT 1 | 1290 | /* Return if if error injection is detected by Initiator */ |
1285 | #define BG_ERR_TGT 2 | 1291 | #define BG_ERR_INIT 0x1 |
1286 | #define BG_ERR_SWAP 3 | 1292 | /* Return if if error injection is detected by Target */ |
1287 | #define BG_ERR_CHECK 4 | 1293 | #define BG_ERR_TGT 0x2 |
1294 | /* Return if if swapping CSUM<-->CRC is required for error injection */ | ||
1295 | #define BG_ERR_SWAP 0x10 | ||
1296 | /* Return if disabling Guard/Ref/App checking is required for error injection */ | ||
1297 | #define BG_ERR_CHECK 0x20 | ||
1288 | 1298 | ||
1289 | /** | 1299 | /** |
1290 | * lpfc_bg_err_inject - Determine if we should inject an error | 1300 | * lpfc_bg_err_inject - Determine if we should inject an error |
@@ -1294,10 +1304,7 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) | |||
1294 | * @apptag: (out) BlockGuard application tag for transmitted data | 1304 | * @apptag: (out) BlockGuard application tag for transmitted data |
1295 | * @new_guard (in) Value to replace CRC with if needed | 1305 | * @new_guard (in) Value to replace CRC with if needed |
1296 | * | 1306 | * |
1297 | * Returns (1) if error injection is detected by Initiator | 1307 | * Returns BG_ERR_* bit mask or 0 if request ignored |
1298 | * Returns (2) if error injection is detected by Target | ||
1299 | * Returns (3) if swapping CSUM->CRC is required for error injection | ||
1300 | * Returns (4) disabling Guard/Ref/App checking is required for error injection | ||
1301 | **/ | 1308 | **/ |
1302 | static int | 1309 | static int |
1303 | lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | 1310 | lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, |
@@ -1305,7 +1312,10 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1305 | { | 1312 | { |
1306 | struct scatterlist *sgpe; /* s/g prot entry */ | 1313 | struct scatterlist *sgpe; /* s/g prot entry */ |
1307 | struct scatterlist *sgde; /* s/g data entry */ | 1314 | struct scatterlist *sgde; /* s/g data entry */ |
1315 | struct lpfc_scsi_buf *lpfc_cmd = NULL; | ||
1308 | struct scsi_dif_tuple *src = NULL; | 1316 | struct scsi_dif_tuple *src = NULL; |
1317 | struct lpfc_nodelist *ndlp; | ||
1318 | struct lpfc_rport_data *rdata; | ||
1309 | uint32_t op = scsi_get_prot_op(sc); | 1319 | uint32_t op = scsi_get_prot_op(sc); |
1310 | uint32_t blksize; | 1320 | uint32_t blksize; |
1311 | uint32_t numblks; | 1321 | uint32_t numblks; |
@@ -1318,8 +1328,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1318 | 1328 | ||
1319 | sgpe = scsi_prot_sglist(sc); | 1329 | sgpe = scsi_prot_sglist(sc); |
1320 | sgde = scsi_sglist(sc); | 1330 | sgde = scsi_sglist(sc); |
1321 | |||
1322 | lba = scsi_get_lba(sc); | 1331 | lba = scsi_get_lba(sc); |
1332 | |||
1333 | /* First check if we need to match the LBA */ | ||
1323 | if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { | 1334 | if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { |
1324 | blksize = lpfc_cmd_blksize(sc); | 1335 | blksize = lpfc_cmd_blksize(sc); |
1325 | numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; | 1336 | numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; |
@@ -1334,66 +1345,123 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1334 | sizeof(struct scsi_dif_tuple); | 1345 | sizeof(struct scsi_dif_tuple); |
1335 | if (numblks < blockoff) | 1346 | if (numblks < blockoff) |
1336 | blockoff = numblks; | 1347 | blockoff = numblks; |
1337 | src = (struct scsi_dif_tuple *)sg_virt(sgpe); | ||
1338 | src += blockoff; | ||
1339 | } | 1348 | } |
1340 | } | 1349 | } |
1341 | 1350 | ||
1351 | /* Next check if we need to match the remote NPortID or WWPN */ | ||
1352 | rdata = sc->device->hostdata; | ||
1353 | if (rdata && rdata->pnode) { | ||
1354 | ndlp = rdata->pnode; | ||
1355 | |||
1356 | /* Make sure we have the right NPortID if one is specified */ | ||
1357 | if (phba->lpfc_injerr_nportid && | ||
1358 | (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) | ||
1359 | return 0; | ||
1360 | |||
1361 | /* | ||
1362 | * Make sure we have the right WWPN if one is specified. | ||
1363 | * wwn[0] should be a non-zero NAA in a good WWPN. | ||
1364 | */ | ||
1365 | if (phba->lpfc_injerr_wwpn.u.wwn[0] && | ||
1366 | (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, | ||
1367 | sizeof(struct lpfc_name)) != 0)) | ||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1371 | /* Setup a ptr to the protection data if the SCSI host provides it */ | ||
1372 | if (sgpe) { | ||
1373 | src = (struct scsi_dif_tuple *)sg_virt(sgpe); | ||
1374 | src += blockoff; | ||
1375 | lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble; | ||
1376 | } | ||
1377 | |||
1342 | /* Should we change the Reference Tag */ | 1378 | /* Should we change the Reference Tag */ |
1343 | if (reftag) { | 1379 | if (reftag) { |
1344 | if (phba->lpfc_injerr_wref_cnt) { | 1380 | if (phba->lpfc_injerr_wref_cnt) { |
1345 | switch (op) { | 1381 | switch (op) { |
1346 | case SCSI_PROT_WRITE_PASS: | 1382 | case SCSI_PROT_WRITE_PASS: |
1347 | if (blockoff && src) { | 1383 | if (src) { |
1348 | /* Insert error in middle of the IO */ | 1384 | /* |
1385 | * For WRITE_PASS, force the error | ||
1386 | * to be sent on the wire. It should | ||
1387 | * be detected by the Target. | ||
1388 | * If blockoff != 0 error will be | ||
1389 | * inserted in middle of the IO. | ||
1390 | */ | ||
1349 | 1391 | ||
1350 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1392 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1351 | "9076 BLKGRD: Injecting reftag error: " | 1393 | "9076 BLKGRD: Injecting reftag error: " |
1352 | "write lba x%lx + x%x oldrefTag x%x\n", | 1394 | "write lba x%lx + x%x oldrefTag x%x\n", |
1353 | (unsigned long)lba, blockoff, | 1395 | (unsigned long)lba, blockoff, |
1354 | src->ref_tag); | 1396 | be32_to_cpu(src->ref_tag)); |
1355 | 1397 | ||
1356 | /* | 1398 | /* |
1357 | * NOTE, this will change ref tag in | 1399 | * Save the old ref_tag so we can |
1358 | * the memory location forever! | 1400 | * restore it on completion. |
1359 | */ | 1401 | */ |
1360 | src->ref_tag = 0xDEADBEEF; | 1402 | if (lpfc_cmd) { |
1403 | lpfc_cmd->prot_data_type = | ||
1404 | LPFC_INJERR_REFTAG; | ||
1405 | lpfc_cmd->prot_data_segment = | ||
1406 | src; | ||
1407 | lpfc_cmd->prot_data = | ||
1408 | src->ref_tag; | ||
1409 | } | ||
1410 | src->ref_tag = cpu_to_be32(0xDEADBEEF); | ||
1361 | phba->lpfc_injerr_wref_cnt--; | 1411 | phba->lpfc_injerr_wref_cnt--; |
1362 | phba->lpfc_injerr_lba = | 1412 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1363 | LPFC_INJERR_LBA_OFF; | 1413 | phba->lpfc_injerr_nportid = 0; |
1364 | rc = BG_ERR_CHECK; | 1414 | phba->lpfc_injerr_lba = |
1415 | LPFC_INJERR_LBA_OFF; | ||
1416 | memset(&phba->lpfc_injerr_wwpn, | ||
1417 | 0, sizeof(struct lpfc_name)); | ||
1418 | } | ||
1419 | rc = BG_ERR_TGT | BG_ERR_CHECK; | ||
1420 | |||
1365 | break; | 1421 | break; |
1366 | } | 1422 | } |
1367 | /* Drop thru */ | 1423 | /* Drop thru */ |
1368 | case SCSI_PROT_WRITE_STRIP: | 1424 | case SCSI_PROT_WRITE_INSERT: |
1369 | /* | 1425 | /* |
1370 | * For WRITE_STRIP and WRITE_PASS, | 1426 | * For WRITE_INSERT, force the error |
1371 | * force the error on data | 1427 | * to be sent on the wire. It should be |
1372 | * being copied from SLI-Host to SLI-Port. | 1428 | * detected by the Target. |
1373 | */ | 1429 | */ |
1430 | /* DEADBEEF will be the reftag on the wire */ | ||
1374 | *reftag = 0xDEADBEEF; | 1431 | *reftag = 0xDEADBEEF; |
1375 | phba->lpfc_injerr_wref_cnt--; | 1432 | phba->lpfc_injerr_wref_cnt--; |
1376 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1433 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1377 | rc = BG_ERR_INIT; | 1434 | phba->lpfc_injerr_nportid = 0; |
1435 | phba->lpfc_injerr_lba = | ||
1436 | LPFC_INJERR_LBA_OFF; | ||
1437 | memset(&phba->lpfc_injerr_wwpn, | ||
1438 | 0, sizeof(struct lpfc_name)); | ||
1439 | } | ||
1440 | rc = BG_ERR_TGT | BG_ERR_CHECK; | ||
1378 | 1441 | ||
1379 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1442 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1380 | "9077 BLKGRD: Injecting reftag error: " | 1443 | "9078 BLKGRD: Injecting reftag error: " |
1381 | "write lba x%lx\n", (unsigned long)lba); | 1444 | "write lba x%lx\n", (unsigned long)lba); |
1382 | break; | 1445 | break; |
1383 | case SCSI_PROT_WRITE_INSERT: | 1446 | case SCSI_PROT_WRITE_STRIP: |
1384 | /* | 1447 | /* |
1385 | * For WRITE_INSERT, force the | 1448 | * For WRITE_STRIP and WRITE_PASS, |
1386 | * error to be sent on the wire. It should be | 1449 | * force the error on data |
1387 | * detected by the Target. | 1450 | * being copied from SLI-Host to SLI-Port. |
1388 | */ | 1451 | */ |
1389 | /* DEADBEEF will be the reftag on the wire */ | ||
1390 | *reftag = 0xDEADBEEF; | 1452 | *reftag = 0xDEADBEEF; |
1391 | phba->lpfc_injerr_wref_cnt--; | 1453 | phba->lpfc_injerr_wref_cnt--; |
1392 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1454 | if (phba->lpfc_injerr_wref_cnt == 0) { |
1393 | rc = BG_ERR_TGT; | 1455 | phba->lpfc_injerr_nportid = 0; |
1456 | phba->lpfc_injerr_lba = | ||
1457 | LPFC_INJERR_LBA_OFF; | ||
1458 | memset(&phba->lpfc_injerr_wwpn, | ||
1459 | 0, sizeof(struct lpfc_name)); | ||
1460 | } | ||
1461 | rc = BG_ERR_INIT; | ||
1394 | 1462 | ||
1395 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1463 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1396 | "9078 BLKGRD: Injecting reftag error: " | 1464 | "9077 BLKGRD: Injecting reftag error: " |
1397 | "write lba x%lx\n", (unsigned long)lba); | 1465 | "write lba x%lx\n", (unsigned long)lba); |
1398 | break; | 1466 | break; |
1399 | } | 1467 | } |
@@ -1401,11 +1469,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1401 | if (phba->lpfc_injerr_rref_cnt) { | 1469 | if (phba->lpfc_injerr_rref_cnt) { |
1402 | switch (op) { | 1470 | switch (op) { |
1403 | case SCSI_PROT_READ_INSERT: | 1471 | case SCSI_PROT_READ_INSERT: |
1404 | /* | ||
1405 | * For READ_INSERT, it doesn't make sense | ||
1406 | * to change the reftag. | ||
1407 | */ | ||
1408 | break; | ||
1409 | case SCSI_PROT_READ_STRIP: | 1472 | case SCSI_PROT_READ_STRIP: |
1410 | case SCSI_PROT_READ_PASS: | 1473 | case SCSI_PROT_READ_PASS: |
1411 | /* | 1474 | /* |
@@ -1415,7 +1478,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1415 | */ | 1478 | */ |
1416 | *reftag = 0xDEADBEEF; | 1479 | *reftag = 0xDEADBEEF; |
1417 | phba->lpfc_injerr_rref_cnt--; | 1480 | phba->lpfc_injerr_rref_cnt--; |
1418 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1481 | if (phba->lpfc_injerr_rref_cnt == 0) { |
1482 | phba->lpfc_injerr_nportid = 0; | ||
1483 | phba->lpfc_injerr_lba = | ||
1484 | LPFC_INJERR_LBA_OFF; | ||
1485 | memset(&phba->lpfc_injerr_wwpn, | ||
1486 | 0, sizeof(struct lpfc_name)); | ||
1487 | } | ||
1419 | rc = BG_ERR_INIT; | 1488 | rc = BG_ERR_INIT; |
1420 | 1489 | ||
1421 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1490 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
@@ -1431,56 +1500,87 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1431 | if (phba->lpfc_injerr_wapp_cnt) { | 1500 | if (phba->lpfc_injerr_wapp_cnt) { |
1432 | switch (op) { | 1501 | switch (op) { |
1433 | case SCSI_PROT_WRITE_PASS: | 1502 | case SCSI_PROT_WRITE_PASS: |
1434 | if (blockoff && src) { | 1503 | if (src) { |
1435 | /* Insert error in middle of the IO */ | 1504 | /* |
1505 | * For WRITE_PASS, force the error | ||
1506 | * to be sent on the wire. It should | ||
1507 | * be detected by the Target. | ||
1508 | * If blockoff != 0 error will be | ||
1509 | * inserted in middle of the IO. | ||
1510 | */ | ||
1436 | 1511 | ||
1437 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1512 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1438 | "9080 BLKGRD: Injecting apptag error: " | 1513 | "9080 BLKGRD: Injecting apptag error: " |
1439 | "write lba x%lx + x%x oldappTag x%x\n", | 1514 | "write lba x%lx + x%x oldappTag x%x\n", |
1440 | (unsigned long)lba, blockoff, | 1515 | (unsigned long)lba, blockoff, |
1441 | src->app_tag); | 1516 | be16_to_cpu(src->app_tag)); |
1442 | 1517 | ||
1443 | /* | 1518 | /* |
1444 | * NOTE, this will change app tag in | 1519 | * Save the old app_tag so we can |
1445 | * the memory location forever! | 1520 | * restore it on completion. |
1446 | */ | 1521 | */ |
1447 | src->app_tag = 0xDEAD; | 1522 | if (lpfc_cmd) { |
1523 | lpfc_cmd->prot_data_type = | ||
1524 | LPFC_INJERR_APPTAG; | ||
1525 | lpfc_cmd->prot_data_segment = | ||
1526 | src; | ||
1527 | lpfc_cmd->prot_data = | ||
1528 | src->app_tag; | ||
1529 | } | ||
1530 | src->app_tag = cpu_to_be16(0xDEAD); | ||
1448 | phba->lpfc_injerr_wapp_cnt--; | 1531 | phba->lpfc_injerr_wapp_cnt--; |
1449 | phba->lpfc_injerr_lba = | 1532 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1450 | LPFC_INJERR_LBA_OFF; | 1533 | phba->lpfc_injerr_nportid = 0; |
1451 | rc = BG_ERR_CHECK; | 1534 | phba->lpfc_injerr_lba = |
1535 | LPFC_INJERR_LBA_OFF; | ||
1536 | memset(&phba->lpfc_injerr_wwpn, | ||
1537 | 0, sizeof(struct lpfc_name)); | ||
1538 | } | ||
1539 | rc = BG_ERR_TGT | BG_ERR_CHECK; | ||
1452 | break; | 1540 | break; |
1453 | } | 1541 | } |
1454 | /* Drop thru */ | 1542 | /* Drop thru */ |
1455 | case SCSI_PROT_WRITE_STRIP: | 1543 | case SCSI_PROT_WRITE_INSERT: |
1456 | /* | 1544 | /* |
1457 | * For WRITE_STRIP and WRITE_PASS, | 1545 | * For WRITE_INSERT, force the |
1458 | * force the error on data | 1546 | * error to be sent on the wire. It should be |
1459 | * being copied from SLI-Host to SLI-Port. | 1547 | * detected by the Target. |
1460 | */ | 1548 | */ |
1549 | /* DEAD will be the apptag on the wire */ | ||
1461 | *apptag = 0xDEAD; | 1550 | *apptag = 0xDEAD; |
1462 | phba->lpfc_injerr_wapp_cnt--; | 1551 | phba->lpfc_injerr_wapp_cnt--; |
1463 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1552 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1464 | rc = BG_ERR_INIT; | 1553 | phba->lpfc_injerr_nportid = 0; |
1554 | phba->lpfc_injerr_lba = | ||
1555 | LPFC_INJERR_LBA_OFF; | ||
1556 | memset(&phba->lpfc_injerr_wwpn, | ||
1557 | 0, sizeof(struct lpfc_name)); | ||
1558 | } | ||
1559 | rc = BG_ERR_TGT | BG_ERR_CHECK; | ||
1465 | 1560 | ||
1466 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1561 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1467 | "0812 BLKGRD: Injecting apptag error: " | 1562 | "0813 BLKGRD: Injecting apptag error: " |
1468 | "write lba x%lx\n", (unsigned long)lba); | 1563 | "write lba x%lx\n", (unsigned long)lba); |
1469 | break; | 1564 | break; |
1470 | case SCSI_PROT_WRITE_INSERT: | 1565 | case SCSI_PROT_WRITE_STRIP: |
1471 | /* | 1566 | /* |
1472 | * For WRITE_INSERT, force the | 1567 | * For WRITE_STRIP and WRITE_PASS, |
1473 | * error to be sent on the wire. It should be | 1568 | * force the error on data |
1474 | * detected by the Target. | 1569 | * being copied from SLI-Host to SLI-Port. |
1475 | */ | 1570 | */ |
1476 | /* DEAD will be the apptag on the wire */ | ||
1477 | *apptag = 0xDEAD; | 1571 | *apptag = 0xDEAD; |
1478 | phba->lpfc_injerr_wapp_cnt--; | 1572 | phba->lpfc_injerr_wapp_cnt--; |
1479 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1573 | if (phba->lpfc_injerr_wapp_cnt == 0) { |
1480 | rc = BG_ERR_TGT; | 1574 | phba->lpfc_injerr_nportid = 0; |
1575 | phba->lpfc_injerr_lba = | ||
1576 | LPFC_INJERR_LBA_OFF; | ||
1577 | memset(&phba->lpfc_injerr_wwpn, | ||
1578 | 0, sizeof(struct lpfc_name)); | ||
1579 | } | ||
1580 | rc = BG_ERR_INIT; | ||
1481 | 1581 | ||
1482 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1582 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1483 | "0813 BLKGRD: Injecting apptag error: " | 1583 | "0812 BLKGRD: Injecting apptag error: " |
1484 | "write lba x%lx\n", (unsigned long)lba); | 1584 | "write lba x%lx\n", (unsigned long)lba); |
1485 | break; | 1585 | break; |
1486 | } | 1586 | } |
@@ -1488,11 +1588,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1488 | if (phba->lpfc_injerr_rapp_cnt) { | 1588 | if (phba->lpfc_injerr_rapp_cnt) { |
1489 | switch (op) { | 1589 | switch (op) { |
1490 | case SCSI_PROT_READ_INSERT: | 1590 | case SCSI_PROT_READ_INSERT: |
1491 | /* | ||
1492 | * For READ_INSERT, it doesn't make sense | ||
1493 | * to change the apptag. | ||
1494 | */ | ||
1495 | break; | ||
1496 | case SCSI_PROT_READ_STRIP: | 1591 | case SCSI_PROT_READ_STRIP: |
1497 | case SCSI_PROT_READ_PASS: | 1592 | case SCSI_PROT_READ_PASS: |
1498 | /* | 1593 | /* |
@@ -1502,7 +1597,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1502 | */ | 1597 | */ |
1503 | *apptag = 0xDEAD; | 1598 | *apptag = 0xDEAD; |
1504 | phba->lpfc_injerr_rapp_cnt--; | 1599 | phba->lpfc_injerr_rapp_cnt--; |
1505 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1600 | if (phba->lpfc_injerr_rapp_cnt == 0) { |
1601 | phba->lpfc_injerr_nportid = 0; | ||
1602 | phba->lpfc_injerr_lba = | ||
1603 | LPFC_INJERR_LBA_OFF; | ||
1604 | memset(&phba->lpfc_injerr_wwpn, | ||
1605 | 0, sizeof(struct lpfc_name)); | ||
1606 | } | ||
1506 | rc = BG_ERR_INIT; | 1607 | rc = BG_ERR_INIT; |
1507 | 1608 | ||
1508 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1609 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
@@ -1519,57 +1620,51 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1519 | if (phba->lpfc_injerr_wgrd_cnt) { | 1620 | if (phba->lpfc_injerr_wgrd_cnt) { |
1520 | switch (op) { | 1621 | switch (op) { |
1521 | case SCSI_PROT_WRITE_PASS: | 1622 | case SCSI_PROT_WRITE_PASS: |
1522 | if (blockoff && src) { | 1623 | rc = BG_ERR_CHECK; |
1523 | /* Insert error in middle of the IO */ | ||
1524 | |||
1525 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | ||
1526 | "0815 BLKGRD: Injecting guard error: " | ||
1527 | "write lba x%lx + x%x oldgrdTag x%x\n", | ||
1528 | (unsigned long)lba, blockoff, | ||
1529 | src->guard_tag); | ||
1530 | |||
1531 | /* | ||
1532 | * NOTE, this will change guard tag in | ||
1533 | * the memory location forever! | ||
1534 | */ | ||
1535 | src->guard_tag = 0xDEAD; | ||
1536 | phba->lpfc_injerr_wgrd_cnt--; | ||
1537 | phba->lpfc_injerr_lba = | ||
1538 | LPFC_INJERR_LBA_OFF; | ||
1539 | rc = BG_ERR_CHECK; | ||
1540 | break; | ||
1541 | } | ||
1542 | /* Drop thru */ | 1624 | /* Drop thru */ |
1543 | case SCSI_PROT_WRITE_STRIP: | 1625 | |
1626 | case SCSI_PROT_WRITE_INSERT: | ||
1544 | /* | 1627 | /* |
1545 | * For WRITE_STRIP and WRITE_PASS, | 1628 | * For WRITE_INSERT, force the |
1546 | * force the error on data | 1629 | * error to be sent on the wire. It should be |
1547 | * being copied from SLI-Host to SLI-Port. | 1630 | * detected by the Target. |
1548 | */ | 1631 | */ |
1549 | phba->lpfc_injerr_wgrd_cnt--; | 1632 | phba->lpfc_injerr_wgrd_cnt--; |
1550 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1633 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1634 | phba->lpfc_injerr_nportid = 0; | ||
1635 | phba->lpfc_injerr_lba = | ||
1636 | LPFC_INJERR_LBA_OFF; | ||
1637 | memset(&phba->lpfc_injerr_wwpn, | ||
1638 | 0, sizeof(struct lpfc_name)); | ||
1639 | } | ||
1551 | 1640 | ||
1552 | rc = BG_ERR_SWAP; | 1641 | rc |= BG_ERR_TGT | BG_ERR_SWAP; |
1553 | /* Signals the caller to swap CRC->CSUM */ | 1642 | /* Signals the caller to swap CRC->CSUM */ |
1554 | 1643 | ||
1555 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1644 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1556 | "0816 BLKGRD: Injecting guard error: " | 1645 | "0817 BLKGRD: Injecting guard error: " |
1557 | "write lba x%lx\n", (unsigned long)lba); | 1646 | "write lba x%lx\n", (unsigned long)lba); |
1558 | break; | 1647 | break; |
1559 | case SCSI_PROT_WRITE_INSERT: | 1648 | case SCSI_PROT_WRITE_STRIP: |
1560 | /* | 1649 | /* |
1561 | * For WRITE_INSERT, force the | 1650 | * For WRITE_STRIP and WRITE_PASS, |
1562 | * error to be sent on the wire. It should be | 1651 | * force the error on data |
1563 | * detected by the Target. | 1652 | * being copied from SLI-Host to SLI-Port. |
1564 | */ | 1653 | */ |
1565 | phba->lpfc_injerr_wgrd_cnt--; | 1654 | phba->lpfc_injerr_wgrd_cnt--; |
1566 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1655 | if (phba->lpfc_injerr_wgrd_cnt == 0) { |
1656 | phba->lpfc_injerr_nportid = 0; | ||
1657 | phba->lpfc_injerr_lba = | ||
1658 | LPFC_INJERR_LBA_OFF; | ||
1659 | memset(&phba->lpfc_injerr_wwpn, | ||
1660 | 0, sizeof(struct lpfc_name)); | ||
1661 | } | ||
1567 | 1662 | ||
1568 | rc = BG_ERR_SWAP; | 1663 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1569 | /* Signals the caller to swap CRC->CSUM */ | 1664 | /* Signals the caller to swap CRC->CSUM */ |
1570 | 1665 | ||
1571 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1666 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
1572 | "0817 BLKGRD: Injecting guard error: " | 1667 | "0816 BLKGRD: Injecting guard error: " |
1573 | "write lba x%lx\n", (unsigned long)lba); | 1668 | "write lba x%lx\n", (unsigned long)lba); |
1574 | break; | 1669 | break; |
1575 | } | 1670 | } |
@@ -1577,11 +1672,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1577 | if (phba->lpfc_injerr_rgrd_cnt) { | 1672 | if (phba->lpfc_injerr_rgrd_cnt) { |
1578 | switch (op) { | 1673 | switch (op) { |
1579 | case SCSI_PROT_READ_INSERT: | 1674 | case SCSI_PROT_READ_INSERT: |
1580 | /* | ||
1581 | * For READ_INSERT, it doesn't make sense | ||
1582 | * to change the guard tag. | ||
1583 | */ | ||
1584 | break; | ||
1585 | case SCSI_PROT_READ_STRIP: | 1675 | case SCSI_PROT_READ_STRIP: |
1586 | case SCSI_PROT_READ_PASS: | 1676 | case SCSI_PROT_READ_PASS: |
1587 | /* | 1677 | /* |
@@ -1589,11 +1679,16 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1589 | * error on data being read off the wire. It | 1679 | * error on data being read off the wire. It |
1590 | * should force an IO error to the driver. | 1680 | * should force an IO error to the driver. |
1591 | */ | 1681 | */ |
1592 | *apptag = 0xDEAD; | ||
1593 | phba->lpfc_injerr_rgrd_cnt--; | 1682 | phba->lpfc_injerr_rgrd_cnt--; |
1594 | phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; | 1683 | if (phba->lpfc_injerr_rgrd_cnt == 0) { |
1684 | phba->lpfc_injerr_nportid = 0; | ||
1685 | phba->lpfc_injerr_lba = | ||
1686 | LPFC_INJERR_LBA_OFF; | ||
1687 | memset(&phba->lpfc_injerr_wwpn, | ||
1688 | 0, sizeof(struct lpfc_name)); | ||
1689 | } | ||
1595 | 1690 | ||
1596 | rc = BG_ERR_SWAP; | 1691 | rc = BG_ERR_INIT | BG_ERR_SWAP; |
1597 | /* Signals the caller to swap CRC->CSUM */ | 1692 | /* Signals the caller to swap CRC->CSUM */ |
1598 | 1693 | ||
1599 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 1694 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
@@ -1629,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1629 | switch (scsi_get_prot_op(sc)) { | 1724 | switch (scsi_get_prot_op(sc)) { |
1630 | case SCSI_PROT_READ_INSERT: | 1725 | case SCSI_PROT_READ_INSERT: |
1631 | case SCSI_PROT_WRITE_STRIP: | 1726 | case SCSI_PROT_WRITE_STRIP: |
1632 | *txop = BG_OP_IN_CSUM_OUT_NODIF; | ||
1633 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; | 1727 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1728 | *txop = BG_OP_IN_CSUM_OUT_NODIF; | ||
1634 | break; | 1729 | break; |
1635 | 1730 | ||
1636 | case SCSI_PROT_READ_STRIP: | 1731 | case SCSI_PROT_READ_STRIP: |
1637 | case SCSI_PROT_WRITE_INSERT: | 1732 | case SCSI_PROT_WRITE_INSERT: |
1638 | *txop = BG_OP_IN_NODIF_OUT_CRC; | ||
1639 | *rxop = BG_OP_IN_CRC_OUT_NODIF; | 1733 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1734 | *txop = BG_OP_IN_NODIF_OUT_CRC; | ||
1640 | break; | 1735 | break; |
1641 | 1736 | ||
1642 | case SCSI_PROT_READ_PASS: | 1737 | case SCSI_PROT_READ_PASS: |
1643 | case SCSI_PROT_WRITE_PASS: | 1738 | case SCSI_PROT_WRITE_PASS: |
1644 | *txop = BG_OP_IN_CSUM_OUT_CRC; | ||
1645 | *rxop = BG_OP_IN_CRC_OUT_CSUM; | 1739 | *rxop = BG_OP_IN_CRC_OUT_CSUM; |
1740 | *txop = BG_OP_IN_CSUM_OUT_CRC; | ||
1646 | break; | 1741 | break; |
1647 | 1742 | ||
1648 | case SCSI_PROT_NORMAL: | 1743 | case SCSI_PROT_NORMAL: |
@@ -1658,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1658 | switch (scsi_get_prot_op(sc)) { | 1753 | switch (scsi_get_prot_op(sc)) { |
1659 | case SCSI_PROT_READ_STRIP: | 1754 | case SCSI_PROT_READ_STRIP: |
1660 | case SCSI_PROT_WRITE_INSERT: | 1755 | case SCSI_PROT_WRITE_INSERT: |
1661 | *txop = BG_OP_IN_NODIF_OUT_CRC; | ||
1662 | *rxop = BG_OP_IN_CRC_OUT_NODIF; | 1756 | *rxop = BG_OP_IN_CRC_OUT_NODIF; |
1757 | *txop = BG_OP_IN_NODIF_OUT_CRC; | ||
1663 | break; | 1758 | break; |
1664 | 1759 | ||
1665 | case SCSI_PROT_READ_PASS: | 1760 | case SCSI_PROT_READ_PASS: |
1666 | case SCSI_PROT_WRITE_PASS: | 1761 | case SCSI_PROT_WRITE_PASS: |
1667 | *txop = BG_OP_IN_CRC_OUT_CRC; | ||
1668 | *rxop = BG_OP_IN_CRC_OUT_CRC; | 1762 | *rxop = BG_OP_IN_CRC_OUT_CRC; |
1763 | *txop = BG_OP_IN_CRC_OUT_CRC; | ||
1669 | break; | 1764 | break; |
1670 | 1765 | ||
1671 | case SCSI_PROT_READ_INSERT: | 1766 | case SCSI_PROT_READ_INSERT: |
1672 | case SCSI_PROT_WRITE_STRIP: | 1767 | case SCSI_PROT_WRITE_STRIP: |
1673 | *txop = BG_OP_IN_CRC_OUT_NODIF; | ||
1674 | *rxop = BG_OP_IN_NODIF_OUT_CRC; | 1768 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1769 | *txop = BG_OP_IN_CRC_OUT_NODIF; | ||
1675 | break; | 1770 | break; |
1676 | 1771 | ||
1677 | case SCSI_PROT_NORMAL: | 1772 | case SCSI_PROT_NORMAL: |
@@ -1710,20 +1805,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1710 | switch (scsi_get_prot_op(sc)) { | 1805 | switch (scsi_get_prot_op(sc)) { |
1711 | case SCSI_PROT_READ_INSERT: | 1806 | case SCSI_PROT_READ_INSERT: |
1712 | case SCSI_PROT_WRITE_STRIP: | 1807 | case SCSI_PROT_WRITE_STRIP: |
1713 | *txop = BG_OP_IN_CRC_OUT_NODIF; | ||
1714 | *rxop = BG_OP_IN_NODIF_OUT_CRC; | 1808 | *rxop = BG_OP_IN_NODIF_OUT_CRC; |
1809 | *txop = BG_OP_IN_CRC_OUT_NODIF; | ||
1715 | break; | 1810 | break; |
1716 | 1811 | ||
1717 | case SCSI_PROT_READ_STRIP: | 1812 | case SCSI_PROT_READ_STRIP: |
1718 | case SCSI_PROT_WRITE_INSERT: | 1813 | case SCSI_PROT_WRITE_INSERT: |
1719 | *txop = BG_OP_IN_NODIF_OUT_CSUM; | ||
1720 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; | 1814 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1815 | *txop = BG_OP_IN_NODIF_OUT_CSUM; | ||
1721 | break; | 1816 | break; |
1722 | 1817 | ||
1723 | case SCSI_PROT_READ_PASS: | 1818 | case SCSI_PROT_READ_PASS: |
1724 | case SCSI_PROT_WRITE_PASS: | 1819 | case SCSI_PROT_WRITE_PASS: |
1725 | *txop = BG_OP_IN_CRC_OUT_CRC; | 1820 | *rxop = BG_OP_IN_CSUM_OUT_CRC; |
1726 | *rxop = BG_OP_IN_CRC_OUT_CRC; | 1821 | *txop = BG_OP_IN_CRC_OUT_CSUM; |
1727 | break; | 1822 | break; |
1728 | 1823 | ||
1729 | case SCSI_PROT_NORMAL: | 1824 | case SCSI_PROT_NORMAL: |
@@ -1735,20 +1830,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1735 | switch (scsi_get_prot_op(sc)) { | 1830 | switch (scsi_get_prot_op(sc)) { |
1736 | case SCSI_PROT_READ_STRIP: | 1831 | case SCSI_PROT_READ_STRIP: |
1737 | case SCSI_PROT_WRITE_INSERT: | 1832 | case SCSI_PROT_WRITE_INSERT: |
1738 | *txop = BG_OP_IN_NODIF_OUT_CSUM; | ||
1739 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; | 1833 | *rxop = BG_OP_IN_CSUM_OUT_NODIF; |
1834 | *txop = BG_OP_IN_NODIF_OUT_CSUM; | ||
1740 | break; | 1835 | break; |
1741 | 1836 | ||
1742 | case SCSI_PROT_READ_PASS: | 1837 | case SCSI_PROT_READ_PASS: |
1743 | case SCSI_PROT_WRITE_PASS: | 1838 | case SCSI_PROT_WRITE_PASS: |
1744 | *txop = BG_OP_IN_CSUM_OUT_CRC; | 1839 | *rxop = BG_OP_IN_CSUM_OUT_CSUM; |
1745 | *rxop = BG_OP_IN_CRC_OUT_CSUM; | 1840 | *txop = BG_OP_IN_CSUM_OUT_CSUM; |
1746 | break; | 1841 | break; |
1747 | 1842 | ||
1748 | case SCSI_PROT_READ_INSERT: | 1843 | case SCSI_PROT_READ_INSERT: |
1749 | case SCSI_PROT_WRITE_STRIP: | 1844 | case SCSI_PROT_WRITE_STRIP: |
1750 | *txop = BG_OP_IN_CSUM_OUT_NODIF; | ||
1751 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; | 1845 | *rxop = BG_OP_IN_NODIF_OUT_CSUM; |
1846 | *txop = BG_OP_IN_CSUM_OUT_NODIF; | ||
1752 | break; | 1847 | break; |
1753 | 1848 | ||
1754 | case SCSI_PROT_NORMAL: | 1849 | case SCSI_PROT_NORMAL: |
@@ -1817,11 +1912,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1817 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ | 1912 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ |
1818 | 1913 | ||
1819 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 1914 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1820 | rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); | 1915 | rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); |
1821 | if (rc) { | 1916 | if (rc) { |
1822 | if (rc == BG_ERR_SWAP) | 1917 | if (rc & BG_ERR_SWAP) |
1823 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); | 1918 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); |
1824 | if (rc == BG_ERR_CHECK) | 1919 | if (rc & BG_ERR_CHECK) |
1825 | checking = 0; | 1920 | checking = 0; |
1826 | } | 1921 | } |
1827 | #endif | 1922 | #endif |
@@ -1964,11 +2059,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1964 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ | 2059 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ |
1965 | 2060 | ||
1966 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 2061 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
1967 | rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); | 2062 | rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); |
1968 | if (rc) { | 2063 | if (rc) { |
1969 | if (rc == BG_ERR_SWAP) | 2064 | if (rc & BG_ERR_SWAP) |
1970 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); | 2065 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); |
1971 | if (rc == BG_ERR_CHECK) | 2066 | if (rc & BG_ERR_CHECK) |
1972 | checking = 0; | 2067 | checking = 0; |
1973 | } | 2068 | } |
1974 | #endif | 2069 | #endif |
@@ -2172,11 +2267,11 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
2172 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ | 2267 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ |
2173 | 2268 | ||
2174 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 2269 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2175 | rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); | 2270 | rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); |
2176 | if (rc) { | 2271 | if (rc) { |
2177 | if (rc == BG_ERR_SWAP) | 2272 | if (rc & BG_ERR_SWAP) |
2178 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); | 2273 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); |
2179 | if (rc == BG_ERR_CHECK) | 2274 | if (rc & BG_ERR_CHECK) |
2180 | checking = 0; | 2275 | checking = 0; |
2181 | } | 2276 | } |
2182 | #endif | 2277 | #endif |
@@ -2312,11 +2407,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
2312 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ | 2407 | reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ |
2313 | 2408 | ||
2314 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | 2409 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
2315 | rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); | 2410 | rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); |
2316 | if (rc) { | 2411 | if (rc) { |
2317 | if (rc == BG_ERR_SWAP) | 2412 | if (rc & BG_ERR_SWAP) |
2318 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); | 2413 | lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); |
2319 | if (rc == BG_ERR_CHECK) | 2414 | if (rc & BG_ERR_CHECK) |
2320 | checking = 0; | 2415 | checking = 0; |
2321 | } | 2416 | } |
2322 | #endif | 2417 | #endif |
@@ -2788,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
2788 | /* No error was reported - problem in FW? */ | 2883 | /* No error was reported - problem in FW? */ |
2789 | cmd->result = ScsiResult(DID_ERROR, 0); | 2884 | cmd->result = ScsiResult(DID_ERROR, 0); |
2790 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, | 2885 | lpfc_printf_log(phba, KERN_ERR, LOG_BG, |
2791 | "9057 BLKGRD: no errors reported!\n"); | 2886 | "9057 BLKGRD: Unknown error reported!\n"); |
2792 | } | 2887 | } |
2793 | 2888 | ||
2794 | out: | 2889 | out: |
@@ -3460,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
3460 | /* pick up SLI4 exhange busy status from HBA */ | 3555 | /* pick up SLI4 exhange busy status from HBA */ |
3461 | lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; | 3556 | lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; |
3462 | 3557 | ||
3558 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
3559 | if (lpfc_cmd->prot_data_type) { | ||
3560 | struct scsi_dif_tuple *src = NULL; | ||
3561 | |||
3562 | src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; | ||
3563 | /* | ||
3564 | * Used to restore any changes to protection | ||
3565 | * data for error injection. | ||
3566 | */ | ||
3567 | switch (lpfc_cmd->prot_data_type) { | ||
3568 | case LPFC_INJERR_REFTAG: | ||
3569 | src->ref_tag = | ||
3570 | lpfc_cmd->prot_data; | ||
3571 | break; | ||
3572 | case LPFC_INJERR_APPTAG: | ||
3573 | src->app_tag = | ||
3574 | (uint16_t)lpfc_cmd->prot_data; | ||
3575 | break; | ||
3576 | case LPFC_INJERR_GUARD: | ||
3577 | src->guard_tag = | ||
3578 | (uint16_t)lpfc_cmd->prot_data; | ||
3579 | break; | ||
3580 | default: | ||
3581 | break; | ||
3582 | } | ||
3583 | |||
3584 | lpfc_cmd->prot_data = 0; | ||
3585 | lpfc_cmd->prot_data_type = 0; | ||
3586 | lpfc_cmd->prot_data_segment = NULL; | ||
3587 | } | ||
3588 | #endif | ||
3463 | if (pnode && NLP_CHK_NODE_ACT(pnode)) | 3589 | if (pnode && NLP_CHK_NODE_ACT(pnode)) |
3464 | atomic_dec(&pnode->cmd_pending); | 3590 | atomic_dec(&pnode->cmd_pending); |
3465 | 3591 | ||
@@ -4061,15 +4187,6 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
4061 | cmnd->result = err; | 4187 | cmnd->result = err; |
4062 | goto out_fail_command; | 4188 | goto out_fail_command; |
4063 | } | 4189 | } |
4064 | /* | ||
4065 | * Do not let the mid-layer retry I/O too fast. If an I/O is retried | ||
4066 | * without waiting a bit then indicate that the device is busy. | ||
4067 | */ | ||
4068 | if (cmnd->retries && | ||
4069 | time_before(jiffies, (cmnd->jiffies_at_alloc + | ||
4070 | msecs_to_jiffies(LPFC_RETRY_PAUSE * | ||
4071 | cmnd->retries)))) | ||
4072 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
4073 | ndlp = rdata->pnode; | 4190 | ndlp = rdata->pnode; |
4074 | 4191 | ||
4075 | if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && | 4192 | if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && |
@@ -4119,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
4119 | if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { | 4236 | if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { |
4120 | if (vport->phba->cfg_enable_bg) { | 4237 | if (vport->phba->cfg_enable_bg) { |
4121 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4238 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4122 | "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " | 4239 | "9033 BLKGRD: rcvd protected cmd:%02x op=%s " |
4123 | "str=%s\n", | 4240 | "guard=%s\n", cmnd->cmnd[0], |
4124 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | 4241 | dif_op_str[scsi_get_prot_op(cmnd)], |
4125 | dif_op_str[scsi_get_prot_op(cmnd)]); | 4242 | dif_grd_str[scsi_host_get_guard(shost)]); |
4126 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
4127 | "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " | ||
4128 | "%02x %02x %02x %02x %02x\n", | ||
4129 | cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], | ||
4130 | cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], | ||
4131 | cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], | ||
4132 | cmnd->cmnd[9]); | ||
4133 | if (cmnd->cmnd[0] == READ_10) | 4243 | if (cmnd->cmnd[0] == READ_10) |
4134 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4244 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4135 | "9035 BLKGRD: READ @ sector %llu, " | 4245 | "9035 BLKGRD: READ @ sector %llu, " |
4136 | "count %u\n", | 4246 | "cnt %u, rpt %d\n", |
4137 | (unsigned long long)scsi_get_lba(cmnd), | 4247 | (unsigned long long)scsi_get_lba(cmnd), |
4138 | blk_rq_sectors(cmnd->request)); | 4248 | blk_rq_sectors(cmnd->request), |
4249 | (cmnd->cmnd[1]>>5)); | ||
4139 | else if (cmnd->cmnd[0] == WRITE_10) | 4250 | else if (cmnd->cmnd[0] == WRITE_10) |
4140 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4251 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4141 | "9036 BLKGRD: WRITE @ sector %llu, " | 4252 | "9036 BLKGRD: WRITE @ sector %llu, " |
4142 | "count %u cmd=%p\n", | 4253 | "cnt %u, wpt %d\n", |
4143 | (unsigned long long)scsi_get_lba(cmnd), | 4254 | (unsigned long long)scsi_get_lba(cmnd), |
4144 | blk_rq_sectors(cmnd->request), | 4255 | blk_rq_sectors(cmnd->request), |
4145 | cmnd); | 4256 | (cmnd->cmnd[1]>>5)); |
4146 | } | 4257 | } |
4147 | 4258 | ||
4148 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); | 4259 | err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); |
4149 | } else { | 4260 | } else { |
4150 | if (vport->phba->cfg_enable_bg) { | 4261 | if (vport->phba->cfg_enable_bg) { |
4151 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4262 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4152 | "9038 BLKGRD: rcvd unprotected cmd:" | 4263 | "9038 BLKGRD: rcvd unprotected cmd:" |
4153 | "%02x op:%02x str=%s\n", | 4264 | "%02x op=%s guard=%s\n", cmnd->cmnd[0], |
4154 | cmnd->cmnd[0], scsi_get_prot_op(cmnd), | 4265 | dif_op_str[scsi_get_prot_op(cmnd)], |
4155 | dif_op_str[scsi_get_prot_op(cmnd)]); | 4266 | dif_grd_str[scsi_host_get_guard(shost)]); |
4156 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
4157 | "9039 BLKGRD: CDB: %02x %02x %02x " | ||
4158 | "%02x %02x %02x %02x %02x %02x %02x\n", | ||
4159 | cmnd->cmnd[0], cmnd->cmnd[1], | ||
4160 | cmnd->cmnd[2], cmnd->cmnd[3], | ||
4161 | cmnd->cmnd[4], cmnd->cmnd[5], | ||
4162 | cmnd->cmnd[6], cmnd->cmnd[7], | ||
4163 | cmnd->cmnd[8], cmnd->cmnd[9]); | ||
4164 | if (cmnd->cmnd[0] == READ_10) | 4267 | if (cmnd->cmnd[0] == READ_10) |
4165 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4268 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4166 | "9040 dbg: READ @ sector %llu, " | 4269 | "9040 dbg: READ @ sector %llu, " |
4167 | "count %u\n", | 4270 | "cnt %u, rpt %d\n", |
4168 | (unsigned long long)scsi_get_lba(cmnd), | 4271 | (unsigned long long)scsi_get_lba(cmnd), |
4169 | blk_rq_sectors(cmnd->request)); | 4272 | blk_rq_sectors(cmnd->request), |
4273 | (cmnd->cmnd[1]>>5)); | ||
4170 | else if (cmnd->cmnd[0] == WRITE_10) | 4274 | else if (cmnd->cmnd[0] == WRITE_10) |
4171 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | 4275 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, |
4172 | "9041 dbg: WRITE @ sector %llu, " | 4276 | "9041 dbg: WRITE @ sector %llu, " |
4173 | "count %u cmd=%p\n", | 4277 | "cnt %u, wpt %d\n", |
4174 | (unsigned long long)scsi_get_lba(cmnd), | 4278 | (unsigned long long)scsi_get_lba(cmnd), |
4175 | blk_rq_sectors(cmnd->request), cmnd); | 4279 | blk_rq_sectors(cmnd->request), |
4176 | else | 4280 | (cmnd->cmnd[1]>>5)); |
4177 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, | ||
4178 | "9042 dbg: parser not implemented\n"); | ||
4179 | } | 4281 | } |
4180 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); | 4282 | err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); |
4181 | } | 4283 | } |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 9075a08cf781..21a2ffe67eac 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -150,9 +150,18 @@ struct lpfc_scsi_buf { | |||
150 | struct lpfc_iocbq cur_iocbq; | 150 | struct lpfc_iocbq cur_iocbq; |
151 | wait_queue_head_t *waitq; | 151 | wait_queue_head_t *waitq; |
152 | unsigned long start_time; | 152 | unsigned long start_time; |
153 | |||
154 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS | ||
155 | /* Used to restore any changes to protection data for error injection */ | ||
156 | void *prot_data_segment; | ||
157 | uint32_t prot_data; | ||
158 | uint32_t prot_data_type; | ||
159 | #define LPFC_INJERR_REFTAG 1 | ||
160 | #define LPFC_INJERR_APPTAG 2 | ||
161 | #define LPFC_INJERR_GUARD 3 | ||
162 | #endif | ||
153 | }; | 163 | }; |
154 | 164 | ||
155 | #define LPFC_SCSI_DMA_EXT_SIZE 264 | 165 | #define LPFC_SCSI_DMA_EXT_SIZE 264 |
156 | #define LPFC_BPL_SIZE 1024 | 166 | #define LPFC_BPL_SIZE 1024 |
157 | #define LPFC_RETRY_PAUSE 300 | ||
158 | #define MDAC_DIRECT_CMD 0x22 | 167 | #define MDAC_DIRECT_CMD 0x22 |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e0e4d8d18244..dbaf5b963bff 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -5578,8 +5578,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) | |||
5578 | for (i = 0; i < count; i++) | 5578 | for (i = 0; i < count; i++) |
5579 | phba->sli4_hba.rpi_ids[i] = base + i; | 5579 | phba->sli4_hba.rpi_ids[i] = base + i; |
5580 | 5580 | ||
5581 | lpfc_sli4_node_prep(phba); | ||
5582 | |||
5583 | /* VPIs. */ | 5581 | /* VPIs. */ |
5584 | count = phba->sli4_hba.max_cfg_param.max_vpi; | 5582 | count = phba->sli4_hba.max_cfg_param.max_vpi; |
5585 | base = phba->sli4_hba.max_cfg_param.vpi_base; | 5583 | base = phba->sli4_hba.max_cfg_param.vpi_base; |
@@ -5613,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) | |||
5613 | rc = -ENOMEM; | 5611 | rc = -ENOMEM; |
5614 | goto free_vpi_ids; | 5612 | goto free_vpi_ids; |
5615 | } | 5613 | } |
5614 | phba->sli4_hba.max_cfg_param.xri_used = 0; | ||
5615 | phba->sli4_hba.xri_count = 0; | ||
5616 | phba->sli4_hba.xri_ids = kzalloc(count * | 5616 | phba->sli4_hba.xri_ids = kzalloc(count * |
5617 | sizeof(uint16_t), | 5617 | sizeof(uint16_t), |
5618 | GFP_KERNEL); | 5618 | GFP_KERNEL); |
@@ -6147,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
6147 | rc = -ENODEV; | 6147 | rc = -ENODEV; |
6148 | goto out_free_mbox; | 6148 | goto out_free_mbox; |
6149 | } | 6149 | } |
6150 | lpfc_sli4_node_prep(phba); | ||
6150 | 6151 | ||
6151 | /* Create all the SLI4 queues */ | 6152 | /* Create all the SLI4 queues */ |
6152 | rc = lpfc_sli4_queue_create(phba); | 6153 | rc = lpfc_sli4_queue_create(phba); |
@@ -7251,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) | |||
7251 | 7252 | ||
7252 | out_not_finished: | 7253 | out_not_finished: |
7253 | spin_lock_irqsave(&phba->hbalock, iflags); | 7254 | spin_lock_irqsave(&phba->hbalock, iflags); |
7254 | mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; | 7255 | if (phba->sli.mbox_active) { |
7255 | __lpfc_mbox_cmpl_put(phba, mboxq); | 7256 | mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; |
7256 | /* Release the token */ | 7257 | __lpfc_mbox_cmpl_put(phba, mboxq); |
7257 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 7258 | /* Release the token */ |
7258 | phba->sli.mbox_active = NULL; | 7259 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
7260 | phba->sli.mbox_active = NULL; | ||
7261 | } | ||
7259 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 7262 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
7260 | 7263 | ||
7261 | return MBX_NOT_FINISHED; | 7264 | return MBX_NOT_FINISHED; |
@@ -7743,6 +7746,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
7743 | if (pcmd && (*pcmd == ELS_CMD_FLOGI || | 7746 | if (pcmd && (*pcmd == ELS_CMD_FLOGI || |
7744 | *pcmd == ELS_CMD_SCR || | 7747 | *pcmd == ELS_CMD_SCR || |
7745 | *pcmd == ELS_CMD_FDISC || | 7748 | *pcmd == ELS_CMD_FDISC || |
7749 | *pcmd == ELS_CMD_LOGO || | ||
7746 | *pcmd == ELS_CMD_PLOGI)) { | 7750 | *pcmd == ELS_CMD_PLOGI)) { |
7747 | bf_set(els_req64_sp, &wqe->els_req, 1); | 7751 | bf_set(els_req64_sp, &wqe->els_req, 1); |
7748 | bf_set(els_req64_sid, &wqe->els_req, | 7752 | bf_set(els_req64_sid, &wqe->els_req, |
@@ -8385,6 +8389,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, | |||
8385 | struct sli4_wcqe_xri_aborted *axri) | 8389 | struct sli4_wcqe_xri_aborted *axri) |
8386 | { | 8390 | { |
8387 | struct lpfc_vport *vport; | 8391 | struct lpfc_vport *vport; |
8392 | uint32_t ext_status = 0; | ||
8388 | 8393 | ||
8389 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { | 8394 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
8390 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | 8395 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
@@ -8396,12 +8401,20 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, | |||
8396 | vport = ndlp->vport; | 8401 | vport = ndlp->vport; |
8397 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 8402 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
8398 | "3116 Port generated FCP XRI ABORT event on " | 8403 | "3116 Port generated FCP XRI ABORT event on " |
8399 | "vpi %d rpi %d xri x%x status 0x%x\n", | 8404 | "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", |
8400 | ndlp->vport->vpi, ndlp->nlp_rpi, | 8405 | ndlp->vport->vpi, ndlp->nlp_rpi, |
8401 | bf_get(lpfc_wcqe_xa_xri, axri), | 8406 | bf_get(lpfc_wcqe_xa_xri, axri), |
8402 | bf_get(lpfc_wcqe_xa_status, axri)); | 8407 | bf_get(lpfc_wcqe_xa_status, axri), |
8408 | axri->parameter); | ||
8403 | 8409 | ||
8404 | if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) | 8410 | /* |
8411 | * Catch the ABTS protocol failure case. Older OCe FW releases returned | ||
8412 | * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and | ||
8413 | * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. | ||
8414 | */ | ||
8415 | ext_status = axri->parameter & WCQE_PARAM_MASK; | ||
8416 | if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && | ||
8417 | ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) | ||
8405 | lpfc_sli_abts_recover_port(vport, ndlp); | 8418 | lpfc_sli_abts_recover_port(vport, ndlp); |
8406 | } | 8419 | } |
8407 | 8420 | ||
@@ -9807,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) | |||
9807 | unsigned long timeout; | 9820 | unsigned long timeout; |
9808 | 9821 | ||
9809 | timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; | 9822 | timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; |
9823 | |||
9810 | spin_lock_irq(&phba->hbalock); | 9824 | spin_lock_irq(&phba->hbalock); |
9811 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; | 9825 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; |
9812 | spin_unlock_irq(&phba->hbalock); | ||
9813 | 9826 | ||
9814 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 9827 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
9815 | spin_lock_irq(&phba->hbalock); | ||
9816 | /* Determine how long we might wait for the active mailbox | 9828 | /* Determine how long we might wait for the active mailbox |
9817 | * command to be gracefully completed by firmware. | 9829 | * command to be gracefully completed by firmware. |
9818 | */ | 9830 | */ |
@@ -9831,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) | |||
9831 | */ | 9843 | */ |
9832 | break; | 9844 | break; |
9833 | } | 9845 | } |
9834 | } | 9846 | } else |
9847 | spin_unlock_irq(&phba->hbalock); | ||
9848 | |||
9835 | lpfc_sli_mbox_sys_flush(phba); | 9849 | lpfc_sli_mbox_sys_flush(phba); |
9836 | } | 9850 | } |
9837 | 9851 | ||
@@ -13272,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) | |||
13272 | LPFC_MBOXQ_t *mbox; | 13286 | LPFC_MBOXQ_t *mbox; |
13273 | uint32_t reqlen, alloclen, index; | 13287 | uint32_t reqlen, alloclen, index; |
13274 | uint32_t mbox_tmo; | 13288 | uint32_t mbox_tmo; |
13275 | uint16_t rsrc_start, rsrc_size, els_xri_cnt; | 13289 | uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt; |
13276 | uint16_t xritag_start = 0, lxri = 0; | 13290 | uint16_t xritag_start = 0, lxri = 0; |
13277 | struct lpfc_rsrc_blks *rsrc_blk; | 13291 | struct lpfc_rsrc_blks *rsrc_blk; |
13278 | int cnt, ttl_cnt, rc = 0; | 13292 | int cnt, ttl_cnt, rc = 0; |
@@ -13294,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) | |||
13294 | 13308 | ||
13295 | cnt = 0; | 13309 | cnt = 0; |
13296 | ttl_cnt = 0; | 13310 | ttl_cnt = 0; |
13311 | post_els_xri_cnt = els_xri_cnt; | ||
13297 | list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, | 13312 | list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, |
13298 | list) { | 13313 | list) { |
13299 | rsrc_start = rsrc_blk->rsrc_start; | 13314 | rsrc_start = rsrc_blk->rsrc_start; |
@@ -13303,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) | |||
13303 | "3014 Working ELS Extent start %d, cnt %d\n", | 13318 | "3014 Working ELS Extent start %d, cnt %d\n", |
13304 | rsrc_start, rsrc_size); | 13319 | rsrc_start, rsrc_size); |
13305 | 13320 | ||
13306 | loop_cnt = min(els_xri_cnt, rsrc_size); | 13321 | loop_cnt = min(post_els_xri_cnt, rsrc_size); |
13307 | if (ttl_cnt + loop_cnt >= els_xri_cnt) { | 13322 | if (loop_cnt < post_els_xri_cnt) { |
13308 | loop_cnt = els_xri_cnt - ttl_cnt; | 13323 | post_els_xri_cnt -= loop_cnt; |
13309 | ttl_cnt = els_xri_cnt; | 13324 | ttl_cnt += loop_cnt; |
13310 | } | 13325 | } else |
13326 | ttl_cnt += post_els_xri_cnt; | ||
13311 | 13327 | ||
13312 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 13328 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
13313 | if (!mbox) | 13329 | if (!mbox) |
@@ -14203,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, | |||
14203 | * field and RX_ID from ABTS for RX_ID field. | 14219 | * field and RX_ID from ABTS for RX_ID field. |
14204 | */ | 14220 | */ |
14205 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); | 14221 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); |
14206 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); | ||
14207 | } else { | 14222 | } else { |
14208 | /* ABTS sent by initiator to CT exchange, construction | 14223 | /* ABTS sent by initiator to CT exchange, construction |
14209 | * of BA_ACC will need to allocate a new XRI as for the | 14224 | * of BA_ACC will need to allocate a new XRI as for the |
14210 | * XRI_TAG and RX_ID fields. | 14225 | * XRI_TAG field. |
14211 | */ | 14226 | */ |
14212 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); | 14227 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); |
14213 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); | ||
14214 | } | 14228 | } |
14229 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); | ||
14215 | bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); | 14230 | bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); |
14216 | 14231 | ||
14217 | /* Xmit CT abts response on exchange <xid> */ | 14232 | /* Xmit CT abts response on exchange <xid> */ |
@@ -15042,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) | |||
15042 | LPFC_MBOXQ_t *mboxq; | 15057 | LPFC_MBOXQ_t *mboxq; |
15043 | 15058 | ||
15044 | phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; | 15059 | phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; |
15060 | phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; | ||
15045 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 15061 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
15046 | if (!mboxq) { | 15062 | if (!mboxq) { |
15047 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 15063 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f2a2602e5c35..25cefc254b76 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.29" | 21 | #define LPFC_DRIVER_VERSION "8.3.30" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 5e69f468535f..8a59a772fdf2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) | |||
657 | return; | 657 | return; |
658 | 658 | ||
659 | /* eat the loginfos associated with task aborts */ | 659 | /* eat the loginfos associated with task aborts */ |
660 | if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == | 660 | if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == |
661 | 0x31140000 || log_info == 0x31130000)) | 661 | 0x31140000 || log_info == 0x31130000)) |
662 | return; | 662 | return; |
663 | 663 | ||
@@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
2060 | { | 2060 | { |
2061 | int i = 0; | 2061 | int i = 0; |
2062 | char desc[16]; | 2062 | char desc[16]; |
2063 | u8 revision; | ||
2064 | u32 iounit_pg1_flags; | 2063 | u32 iounit_pg1_flags; |
2065 | u32 bios_version; | 2064 | u32 bios_version; |
2066 | 2065 | ||
2067 | bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); | 2066 | bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); |
2068 | pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); | ||
2069 | strncpy(desc, ioc->manu_pg0.ChipName, 16); | 2067 | strncpy(desc, ioc->manu_pg0.ChipName, 16); |
2070 | printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " | 2068 | printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " |
2071 | "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", | 2069 | "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", |
@@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
2074 | (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, | 2072 | (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, |
2075 | (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, | 2073 | (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, |
2076 | ioc->facts.FWVersion.Word & 0x000000FF, | 2074 | ioc->facts.FWVersion.Word & 0x000000FF, |
2077 | revision, | 2075 | ioc->pdev->revision, |
2078 | (bios_version & 0xFF000000) >> 24, | 2076 | (bios_version & 0xFF000000) >> 24, |
2079 | (bios_version & 0x00FF0000) >> 16, | 2077 | (bios_version & 0x00FF0000) >> 16, |
2080 | (bios_version & 0x0000FF00) >> 8, | 2078 | (bios_version & 0x0000FF00) >> 8, |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 7fceb899029e..3b9a28efea82 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
@@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg) | |||
1026 | { | 1026 | { |
1027 | struct mpt2_ioctl_iocinfo karg; | 1027 | struct mpt2_ioctl_iocinfo karg; |
1028 | struct MPT2SAS_ADAPTER *ioc; | 1028 | struct MPT2SAS_ADAPTER *ioc; |
1029 | u8 revision; | ||
1030 | 1029 | ||
1031 | if (copy_from_user(&karg, arg, sizeof(karg))) { | 1030 | if (copy_from_user(&karg, arg, sizeof(karg))) { |
1032 | printk(KERN_ERR "failure at %s:%d/%s()!\n", | 1031 | printk(KERN_ERR "failure at %s:%d/%s()!\n", |
@@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg) | |||
1046 | karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; | 1045 | karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; |
1047 | if (ioc->pfacts) | 1046 | if (ioc->pfacts) |
1048 | karg.port_number = ioc->pfacts[0].PortNumber; | 1047 | karg.port_number = ioc->pfacts[0].PortNumber; |
1049 | pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); | 1048 | karg.hw_rev = ioc->pdev->revision; |
1050 | karg.hw_rev = revision; | ||
1051 | karg.pci_id = ioc->pdev->device; | 1049 | karg.pci_id = ioc->pdev->device; |
1052 | karg.subsystem_device = ioc->pdev->subsystem_device; | 1050 | karg.subsystem_device = ioc->pdev->subsystem_device; |
1053 | karg.subsystem_vendor = ioc->pdev->subsystem_vendor; | 1051 | karg.subsystem_vendor = ioc->pdev->subsystem_vendor; |
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 3619f6eeeeda..9d82ee5c10de 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
@@ -2093,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
2093 | struct ata_task_resp *resp ; | 2093 | struct ata_task_resp *resp ; |
2094 | u32 *sata_resp; | 2094 | u32 *sata_resp; |
2095 | struct pm8001_device *pm8001_dev; | 2095 | struct pm8001_device *pm8001_dev; |
2096 | unsigned long flags; | ||
2096 | 2097 | ||
2097 | psataPayload = (struct sata_completion_resp *)(piomb + 4); | 2098 | psataPayload = (struct sata_completion_resp *)(piomb + 4); |
2098 | status = le32_to_cpu(psataPayload->status); | 2099 | status = le32_to_cpu(psataPayload->status); |
@@ -2382,26 +2383,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
2382 | ts->stat = SAS_DEV_NO_RESPONSE; | 2383 | ts->stat = SAS_DEV_NO_RESPONSE; |
2383 | break; | 2384 | break; |
2384 | } | 2385 | } |
2385 | spin_lock_irq(&t->task_state_lock); | 2386 | spin_lock_irqsave(&t->task_state_lock, flags); |
2386 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | 2387 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; |
2387 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 2388 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
2388 | t->task_state_flags |= SAS_TASK_STATE_DONE; | 2389 | t->task_state_flags |= SAS_TASK_STATE_DONE; |
2389 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | 2390 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { |
2390 | spin_unlock_irq(&t->task_state_lock); | 2391 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2391 | PM8001_FAIL_DBG(pm8001_ha, | 2392 | PM8001_FAIL_DBG(pm8001_ha, |
2392 | pm8001_printk("task 0x%p done with io_status 0x%x" | 2393 | pm8001_printk("task 0x%p done with io_status 0x%x" |
2393 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", | 2394 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", |
2394 | t, status, ts->resp, ts->stat)); | 2395 | t, status, ts->resp, ts->stat)); |
2395 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2396 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2396 | } else if (t->uldd_task) { | 2397 | } else if (t->uldd_task) { |
2397 | spin_unlock_irq(&t->task_state_lock); | 2398 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2398 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2399 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2399 | mb();/* ditto */ | 2400 | mb();/* ditto */ |
2400 | spin_unlock_irq(&pm8001_ha->lock); | 2401 | spin_unlock_irq(&pm8001_ha->lock); |
2401 | t->task_done(t); | 2402 | t->task_done(t); |
2402 | spin_lock_irq(&pm8001_ha->lock); | 2403 | spin_lock_irq(&pm8001_ha->lock); |
2403 | } else if (!t->uldd_task) { | 2404 | } else if (!t->uldd_task) { |
2404 | spin_unlock_irq(&t->task_state_lock); | 2405 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2405 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2406 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2406 | mb();/*ditto*/ | 2407 | mb();/*ditto*/ |
2407 | spin_unlock_irq(&pm8001_ha->lock); | 2408 | spin_unlock_irq(&pm8001_ha->lock); |
@@ -2423,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) | |||
2423 | u32 tag = le32_to_cpu(psataPayload->tag); | 2424 | u32 tag = le32_to_cpu(psataPayload->tag); |
2424 | u32 port_id = le32_to_cpu(psataPayload->port_id); | 2425 | u32 port_id = le32_to_cpu(psataPayload->port_id); |
2425 | u32 dev_id = le32_to_cpu(psataPayload->device_id); | 2426 | u32 dev_id = le32_to_cpu(psataPayload->device_id); |
2427 | unsigned long flags; | ||
2426 | 2428 | ||
2427 | ccb = &pm8001_ha->ccb_info[tag]; | 2429 | ccb = &pm8001_ha->ccb_info[tag]; |
2428 | t = ccb->task; | 2430 | t = ccb->task; |
@@ -2593,26 +2595,26 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) | |||
2593 | ts->stat = SAS_OPEN_TO; | 2595 | ts->stat = SAS_OPEN_TO; |
2594 | break; | 2596 | break; |
2595 | } | 2597 | } |
2596 | spin_lock_irq(&t->task_state_lock); | 2598 | spin_lock_irqsave(&t->task_state_lock, flags); |
2597 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; | 2599 | t->task_state_flags &= ~SAS_TASK_STATE_PENDING; |
2598 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 2600 | t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
2599 | t->task_state_flags |= SAS_TASK_STATE_DONE; | 2601 | t->task_state_flags |= SAS_TASK_STATE_DONE; |
2600 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { | 2602 | if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { |
2601 | spin_unlock_irq(&t->task_state_lock); | 2603 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2602 | PM8001_FAIL_DBG(pm8001_ha, | 2604 | PM8001_FAIL_DBG(pm8001_ha, |
2603 | pm8001_printk("task 0x%p done with io_status 0x%x" | 2605 | pm8001_printk("task 0x%p done with io_status 0x%x" |
2604 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", | 2606 | " resp 0x%x stat 0x%x but aborted by upper layer!\n", |
2605 | t, event, ts->resp, ts->stat)); | 2607 | t, event, ts->resp, ts->stat)); |
2606 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2608 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2607 | } else if (t->uldd_task) { | 2609 | } else if (t->uldd_task) { |
2608 | spin_unlock_irq(&t->task_state_lock); | 2610 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2609 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2611 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2610 | mb();/* ditto */ | 2612 | mb();/* ditto */ |
2611 | spin_unlock_irq(&pm8001_ha->lock); | 2613 | spin_unlock_irq(&pm8001_ha->lock); |
2612 | t->task_done(t); | 2614 | t->task_done(t); |
2613 | spin_lock_irq(&pm8001_ha->lock); | 2615 | spin_lock_irq(&pm8001_ha->lock); |
2614 | } else if (!t->uldd_task) { | 2616 | } else if (!t->uldd_task) { |
2615 | spin_unlock_irq(&t->task_state_lock); | 2617 | spin_unlock_irqrestore(&t->task_state_lock, flags); |
2616 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); | 2618 | pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); |
2617 | mb();/*ditto*/ | 2619 | mb();/*ditto*/ |
2618 | spin_unlock_irq(&pm8001_ha->lock); | 2620 | spin_unlock_irq(&pm8001_ha->lock); |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 7c9f28b7da72..fc542a9bb106 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -431,9 +431,9 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha, | |||
431 | mbox_sts_entry->out_mbox[6])); | 431 | mbox_sts_entry->out_mbox[6])); |
432 | 432 | ||
433 | if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) | 433 | if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) |
434 | status = QLA_SUCCESS; | 434 | status = ISCSI_PING_SUCCESS; |
435 | else | 435 | else |
436 | status = QLA_ERROR; | 436 | status = mbox_sts_entry->out_mbox[6]; |
437 | 437 | ||
438 | data_size = sizeof(mbox_sts_entry->out_mbox); | 438 | data_size = sizeof(mbox_sts_entry->out_mbox); |
439 | 439 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 3d9419460e0c..ee47820c30a6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -834,7 +834,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) | |||
834 | static void qla4xxx_set_port_speed(struct Scsi_Host *shost) | 834 | static void qla4xxx_set_port_speed(struct Scsi_Host *shost) |
835 | { | 835 | { |
836 | struct scsi_qla_host *ha = to_qla_host(shost); | 836 | struct scsi_qla_host *ha = to_qla_host(shost); |
837 | struct iscsi_cls_host *ihost = shost_priv(shost); | 837 | struct iscsi_cls_host *ihost = shost->shost_data; |
838 | uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; | 838 | uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; |
839 | 839 | ||
840 | qla4xxx_get_firmware_state(ha); | 840 | qla4xxx_get_firmware_state(ha); |
@@ -859,7 +859,7 @@ static void qla4xxx_set_port_speed(struct Scsi_Host *shost) | |||
859 | static void qla4xxx_set_port_state(struct Scsi_Host *shost) | 859 | static void qla4xxx_set_port_state(struct Scsi_Host *shost) |
860 | { | 860 | { |
861 | struct scsi_qla_host *ha = to_qla_host(shost); | 861 | struct scsi_qla_host *ha = to_qla_host(shost); |
862 | struct iscsi_cls_host *ihost = shost_priv(shost); | 862 | struct iscsi_cls_host *ihost = shost->shost_data; |
863 | uint32_t state = ISCSI_PORT_STATE_DOWN; | 863 | uint32_t state = ISCSI_PORT_STATE_DOWN; |
864 | 864 | ||
865 | if (test_bit(AF_LINK_UP, &ha->flags)) | 865 | if (test_bit(AF_LINK_UP, &ha->flags)) |
@@ -3445,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) | |||
3445 | int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) | 3445 | int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) |
3446 | { | 3446 | { |
3447 | int status = 0; | 3447 | int status = 0; |
3448 | uint8_t revision_id; | ||
3449 | unsigned long mem_base, mem_len, db_base, db_len; | 3448 | unsigned long mem_base, mem_len, db_base, db_len; |
3450 | struct pci_dev *pdev = ha->pdev; | 3449 | struct pci_dev *pdev = ha->pdev; |
3451 | 3450 | ||
@@ -3457,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) | |||
3457 | goto iospace_error_exit; | 3456 | goto iospace_error_exit; |
3458 | } | 3457 | } |
3459 | 3458 | ||
3460 | pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id); | ||
3461 | DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", | 3459 | DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", |
3462 | __func__, revision_id)); | 3460 | __func__, pdev->revision)); |
3463 | ha->revision_id = revision_id; | 3461 | ha->revision_id = pdev->revision; |
3464 | 3462 | ||
3465 | /* remap phys address */ | 3463 | /* remap phys address */ |
3466 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | 3464 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ |
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index ede9af944141..97b30c108e36 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -5,4 +5,4 @@ | |||
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k15" | 8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k16" |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 591856131c4e..182d5a57ab74 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324"; | |||
101 | #define DEF_LBPU 0 | 101 | #define DEF_LBPU 0 |
102 | #define DEF_LBPWS 0 | 102 | #define DEF_LBPWS 0 |
103 | #define DEF_LBPWS10 0 | 103 | #define DEF_LBPWS10 0 |
104 | #define DEF_LBPRZ 1 | ||
104 | #define DEF_LOWEST_ALIGNED 0 | 105 | #define DEF_LOWEST_ALIGNED 0 |
105 | #define DEF_NO_LUN_0 0 | 106 | #define DEF_NO_LUN_0 0 |
106 | #define DEF_NUM_PARTS 0 | 107 | #define DEF_NUM_PARTS 0 |
@@ -186,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; | |||
186 | static unsigned int scsi_debug_lbpu = DEF_LBPU; | 187 | static unsigned int scsi_debug_lbpu = DEF_LBPU; |
187 | static unsigned int scsi_debug_lbpws = DEF_LBPWS; | 188 | static unsigned int scsi_debug_lbpws = DEF_LBPWS; |
188 | static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; | 189 | static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; |
190 | static unsigned int scsi_debug_lbprz = DEF_LBPRZ; | ||
189 | static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; | 191 | static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; |
190 | static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; | 192 | static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; |
191 | static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; | 193 | static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; |
@@ -775,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr) | |||
775 | return 0x3c; | 777 | return 0x3c; |
776 | } | 778 | } |
777 | 779 | ||
778 | /* Thin provisioning VPD page (SBC-3) */ | 780 | /* Logical block provisioning VPD page (SBC-3) */ |
779 | static int inquiry_evpd_b2(unsigned char *arr) | 781 | static int inquiry_evpd_b2(unsigned char *arr) |
780 | { | 782 | { |
781 | memset(arr, 0, 0x8); | 783 | memset(arr, 0, 0x4); |
782 | arr[0] = 0; /* threshold exponent */ | 784 | arr[0] = 0; /* threshold exponent */ |
783 | 785 | ||
784 | if (scsi_debug_lbpu) | 786 | if (scsi_debug_lbpu) |
@@ -790,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr) | |||
790 | if (scsi_debug_lbpws10) | 792 | if (scsi_debug_lbpws10) |
791 | arr[1] |= 1 << 5; | 793 | arr[1] |= 1 << 5; |
792 | 794 | ||
793 | return 0x8; | 795 | if (scsi_debug_lbprz) |
796 | arr[1] |= 1 << 2; | ||
797 | |||
798 | return 0x4; | ||
794 | } | 799 | } |
795 | 800 | ||
796 | #define SDEBUG_LONG_INQ_SZ 96 | 801 | #define SDEBUG_LONG_INQ_SZ 96 |
@@ -1071,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
1071 | arr[13] = scsi_debug_physblk_exp & 0xf; | 1076 | arr[13] = scsi_debug_physblk_exp & 0xf; |
1072 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; | 1077 | arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; |
1073 | 1078 | ||
1074 | if (scsi_debug_lbp()) | 1079 | if (scsi_debug_lbp()) { |
1075 | arr[14] |= 0x80; /* LBPME */ | 1080 | arr[14] |= 0x80; /* LBPME */ |
1081 | if (scsi_debug_lbprz) | ||
1082 | arr[14] |= 0x40; /* LBPRZ */ | ||
1083 | } | ||
1076 | 1084 | ||
1077 | arr[15] = scsi_debug_lowest_aligned & 0xff; | 1085 | arr[15] = scsi_debug_lowest_aligned & 0xff; |
1078 | 1086 | ||
@@ -2046,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len) | |||
2046 | block = lba + alignment; | 2054 | block = lba + alignment; |
2047 | rem = do_div(block, granularity); | 2055 | rem = do_div(block, granularity); |
2048 | 2056 | ||
2049 | if (rem == 0 && lba + granularity <= end && | 2057 | if (rem == 0 && lba + granularity <= end && block < map_size) { |
2050 | block < map_size) | ||
2051 | clear_bit(block, map_storep); | 2058 | clear_bit(block, map_storep); |
2052 | 2059 | if (scsi_debug_lbprz) | |
2060 | memset(fake_storep + | ||
2061 | block * scsi_debug_sector_size, 0, | ||
2062 | scsi_debug_sector_size); | ||
2063 | } | ||
2053 | lba += granularity - rem; | 2064 | lba += granularity - rem; |
2054 | } | 2065 | } |
2055 | } | 2066 | } |
@@ -2731,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO); | |||
2731 | module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); | 2742 | module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); |
2732 | module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); | 2743 | module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); |
2733 | module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); | 2744 | module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); |
2745 | module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); | ||
2734 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | 2746 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); |
2735 | module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); | 2747 | module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); |
2736 | module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); | 2748 | module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); |
@@ -2772,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | |||
2772 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); | 2784 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); |
2773 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); | 2785 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); |
2774 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); | 2786 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); |
2787 | MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); | ||
2775 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); | 2788 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); |
2776 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); | 2789 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); |
2777 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); | 2790 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index fac31730addf..1cf640e575da 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1486,7 +1486,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, | |||
1486 | struct iscsi_uevent *ev; | 1486 | struct iscsi_uevent *ev; |
1487 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); | 1487 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); |
1488 | 1488 | ||
1489 | skb = alloc_skb(len, GFP_KERNEL); | 1489 | skb = alloc_skb(len, GFP_NOIO); |
1490 | if (!skb) { | 1490 | if (!skb) { |
1491 | printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", | 1491 | printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", |
1492 | host_no, code); | 1492 | host_no, code); |
@@ -1504,7 +1504,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, | |||
1504 | if (data_size) | 1504 | if (data_size) |
1505 | memcpy((char *)ev + sizeof(*ev), data, data_size); | 1505 | memcpy((char *)ev + sizeof(*ev), data, data_size); |
1506 | 1506 | ||
1507 | iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); | 1507 | iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); |
1508 | } | 1508 | } |
1509 | EXPORT_SYMBOL_GPL(iscsi_post_host_event); | 1509 | EXPORT_SYMBOL_GPL(iscsi_post_host_event); |
1510 | 1510 | ||
@@ -1517,7 +1517,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, | |||
1517 | struct iscsi_uevent *ev; | 1517 | struct iscsi_uevent *ev; |
1518 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); | 1518 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); |
1519 | 1519 | ||
1520 | skb = alloc_skb(len, GFP_KERNEL); | 1520 | skb = alloc_skb(len, GFP_NOIO); |
1521 | if (!skb) { | 1521 | if (!skb) { |
1522 | printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); | 1522 | printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); |
1523 | return; | 1523 | return; |
@@ -1533,7 +1533,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, | |||
1533 | ev->r.ping_comp.data_size = data_size; | 1533 | ev->r.ping_comp.data_size = data_size; |
1534 | memcpy((char *)ev + sizeof(*ev), data, data_size); | 1534 | memcpy((char *)ev + sizeof(*ev), data, data_size); |
1535 | 1535 | ||
1536 | iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); | 1536 | iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); |
1537 | } | 1537 | } |
1538 | EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); | 1538 | EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); |
1539 | 1539 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 09e3df42a402..5ba5c2a9e8e9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -664,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) | |||
664 | } | 664 | } |
665 | 665 | ||
666 | /** | 666 | /** |
667 | * sd_init_command - build a scsi (read or write) command from | 667 | * sd_prep_fn - build a scsi (read or write) command from |
668 | * information in the request structure. | 668 | * information in the request structure. |
669 | * @SCpnt: pointer to mid-level's per scsi command structure that | 669 | * @SCpnt: pointer to mid-level's per scsi command structure that |
670 | * contains request and into which the scsi command is written | 670 | * contains request and into which the scsi command is written |
@@ -711,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
711 | ret = BLKPREP_KILL; | 711 | ret = BLKPREP_KILL; |
712 | 712 | ||
713 | SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, | 713 | SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, |
714 | "sd_init_command: block=%llu, " | 714 | "sd_prep_fn: block=%llu, " |
715 | "count=%d\n", | 715 | "count=%d\n", |
716 | (unsigned long long)block, | 716 | (unsigned long long)block, |
717 | this_count)); | 717 | this_count)); |
@@ -1212,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) | |||
1212 | retval = -ENODEV; | 1212 | retval = -ENODEV; |
1213 | 1213 | ||
1214 | if (scsi_block_when_processing_errors(sdp)) { | 1214 | if (scsi_block_when_processing_errors(sdp)) { |
1215 | retval = scsi_autopm_get_device(sdp); | ||
1216 | if (retval) | ||
1217 | goto out; | ||
1218 | |||
1215 | sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); | 1219 | sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); |
1216 | retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, | 1220 | retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, |
1217 | sshdr); | 1221 | sshdr); |
1222 | scsi_autopm_put_device(sdp); | ||
1218 | } | 1223 | } |
1219 | 1224 | ||
1220 | /* failed to execute TUR, assume media not present */ | 1225 | /* failed to execute TUR, assume media not present */ |
@@ -2644,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
2644 | * (e.g. /dev/sda). More precisely it is the block device major | 2649 | * (e.g. /dev/sda). More precisely it is the block device major |
2645 | * and minor number that is chosen here. | 2650 | * and minor number that is chosen here. |
2646 | * | 2651 | * |
2647 | * Assume sd_attach is not re-entrant (for time being) | 2652 | * Assume sd_probe is not re-entrant (for time being) |
2648 | * Also think about sd_attach() and sd_remove() running coincidentally. | 2653 | * Also think about sd_probe() and sd_remove() running coincidentally. |
2649 | **/ | 2654 | **/ |
2650 | static int sd_probe(struct device *dev) | 2655 | static int sd_probe(struct device *dev) |
2651 | { | 2656 | { |
@@ -2660,7 +2665,7 @@ static int sd_probe(struct device *dev) | |||
2660 | goto out; | 2665 | goto out; |
2661 | 2666 | ||
2662 | SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, | 2667 | SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, |
2663 | "sd_attach\n")); | 2668 | "sd_probe\n")); |
2664 | 2669 | ||
2665 | error = -ENOMEM; | 2670 | error = -ENOMEM; |
2666 | sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); | 2671 | sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index a15f691f9d34..e41998cb098e 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -1105,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
1105 | STp->drv_buffer)); | 1105 | STp->drv_buffer)); |
1106 | } | 1106 | } |
1107 | STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; | 1107 | STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; |
1108 | if (!STp->drv_buffer && STp->immediate_filemark) { | ||
1109 | printk(KERN_WARNING | ||
1110 | "%s: non-buffered tape: disabling writing immediate filemarks\n", | ||
1111 | name); | ||
1112 | STp->immediate_filemark = 0; | ||
1113 | } | ||
1108 | } | 1114 | } |
1109 | st_release_request(SRpnt); | 1115 | st_release_request(SRpnt); |
1110 | SRpnt = NULL; | 1116 | SRpnt = NULL; |
@@ -1313,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
1313 | 1319 | ||
1314 | memset(cmd, 0, MAX_COMMAND_SIZE); | 1320 | memset(cmd, 0, MAX_COMMAND_SIZE); |
1315 | cmd[0] = WRITE_FILEMARKS; | 1321 | cmd[0] = WRITE_FILEMARKS; |
1322 | if (STp->immediate_filemark) | ||
1323 | cmd[1] = 1; | ||
1316 | cmd[4] = 1 + STp->two_fm; | 1324 | cmd[4] = 1 + STp->two_fm; |
1317 | 1325 | ||
1318 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, | 1326 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, |
@@ -2180,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char | |||
2180 | name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, | 2188 | name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, |
2181 | STp->scsi2_logical); | 2189 | STp->scsi2_logical); |
2182 | printk(KERN_INFO | 2190 | printk(KERN_INFO |
2183 | "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate, | 2191 | "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n", |
2184 | STp->sili); | 2192 | name, STm->sysv, STp->immediate, STp->sili, |
2193 | STp->immediate_filemark); | ||
2185 | printk(KERN_INFO "%s: debugging: %d\n", | 2194 | printk(KERN_INFO "%s: debugging: %d\n", |
2186 | name, debugging); | 2195 | name, debugging); |
2187 | } | 2196 | } |
@@ -2223,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
2223 | STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; | 2232 | STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; |
2224 | STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; | 2233 | STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; |
2225 | STp->immediate = (options & MT_ST_NOWAIT) != 0; | 2234 | STp->immediate = (options & MT_ST_NOWAIT) != 0; |
2235 | STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0; | ||
2226 | STm->sysv = (options & MT_ST_SYSV) != 0; | 2236 | STm->sysv = (options & MT_ST_SYSV) != 0; |
2227 | STp->sili = (options & MT_ST_SILI) != 0; | 2237 | STp->sili = (options & MT_ST_SILI) != 0; |
2228 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; | 2238 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; |
@@ -2254,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
2254 | STp->scsi2_logical = value; | 2264 | STp->scsi2_logical = value; |
2255 | if ((options & MT_ST_NOWAIT) != 0) | 2265 | if ((options & MT_ST_NOWAIT) != 0) |
2256 | STp->immediate = value; | 2266 | STp->immediate = value; |
2267 | if ((options & MT_ST_NOWAIT_EOF) != 0) | ||
2268 | STp->immediate_filemark = value; | ||
2257 | if ((options & MT_ST_SYSV) != 0) | 2269 | if ((options & MT_ST_SYSV) != 0) |
2258 | STm->sysv = value; | 2270 | STm->sysv = value; |
2259 | if ((options & MT_ST_SILI) != 0) | 2271 | if ((options & MT_ST_SILI) != 0) |
@@ -2713,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
2713 | cmd[0] = WRITE_FILEMARKS; | 2725 | cmd[0] = WRITE_FILEMARKS; |
2714 | if (cmd_in == MTWSM) | 2726 | if (cmd_in == MTWSM) |
2715 | cmd[1] = 2; | 2727 | cmd[1] = 2; |
2716 | if (cmd_in == MTWEOFI) | 2728 | if (cmd_in == MTWEOFI || |
2729 | (cmd_in == MTWEOF && STp->immediate_filemark)) | ||
2717 | cmd[1] |= 1; | 2730 | cmd[1] |= 1; |
2718 | cmd[2] = (arg >> 16); | 2731 | cmd[2] = (arg >> 16); |
2719 | cmd[3] = (arg >> 8); | 2732 | cmd[3] = (arg >> 8); |
@@ -4092,6 +4105,7 @@ static int st_probe(struct device *dev) | |||
4092 | tpnt->scsi2_logical = ST_SCSI2LOGICAL; | 4105 | tpnt->scsi2_logical = ST_SCSI2LOGICAL; |
4093 | tpnt->sili = ST_SILI; | 4106 | tpnt->sili = ST_SILI; |
4094 | tpnt->immediate = ST_NOWAIT; | 4107 | tpnt->immediate = ST_NOWAIT; |
4108 | tpnt->immediate_filemark = 0; | ||
4095 | tpnt->default_drvbuffer = 0xff; /* No forced buffering */ | 4109 | tpnt->default_drvbuffer = 0xff; /* No forced buffering */ |
4096 | tpnt->partition = 0; | 4110 | tpnt->partition = 0; |
4097 | tpnt->new_partition = 0; | 4111 | tpnt->new_partition = 0; |
@@ -4477,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
4477 | options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; | 4491 | options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; |
4478 | options |= STm->sysv ? MT_ST_SYSV : 0; | 4492 | options |= STm->sysv ? MT_ST_SYSV : 0; |
4479 | options |= STp->immediate ? MT_ST_NOWAIT : 0; | 4493 | options |= STp->immediate ? MT_ST_NOWAIT : 0; |
4494 | options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0; | ||
4480 | options |= STp->sili ? MT_ST_SILI : 0; | 4495 | options |= STp->sili ? MT_ST_SILI : 0; |
4481 | 4496 | ||
4482 | l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); | 4497 | l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); |
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index f91a67c6d968..ea35632b986c 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h | |||
@@ -120,6 +120,7 @@ struct scsi_tape { | |||
120 | unsigned char c_algo; /* compression algorithm */ | 120 | unsigned char c_algo; /* compression algorithm */ |
121 | unsigned char pos_unknown; /* after reset position unknown */ | 121 | unsigned char pos_unknown; /* after reset position unknown */ |
122 | unsigned char sili; /* use SILI when reading in variable b mode */ | 122 | unsigned char sili; /* use SILI when reading in variable b mode */ |
123 | unsigned char immediate_filemark; /* write filemark immediately */ | ||
123 | int tape_type; | 124 | int tape_type; |
124 | int long_timeout; /* timeout for commands known to take long time */ | 125 | int long_timeout; /* timeout for commands known to take long time */ |
125 | 126 | ||
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig new file mode 100644 index 000000000000..8f27f9d6f91d --- /dev/null +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -0,0 +1,49 @@ | |||
1 | # | ||
2 | # Kernel configuration file for the UFS Host Controller | ||
3 | # | ||
4 | # This code is based on drivers/scsi/ufs/Kconfig | ||
5 | # Copyright (C) 2011 Samsung Samsung India Software Operations | ||
6 | # | ||
7 | # Santosh Yaraganavi <santosh.sy@samsung.com> | ||
8 | # Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | |||
10 | # This program is free software; you can redistribute it and/or | ||
11 | # modify it under the terms of the GNU General Public License | ||
12 | # as published by the Free Software Foundation; either version 2 | ||
13 | # of the License, or (at your option) any later version. | ||
14 | |||
15 | # This program is distributed in the hope that it will be useful, | ||
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | # GNU General Public License for more details. | ||
19 | |||
20 | # NO WARRANTY | ||
21 | # THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
22 | # CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
23 | # LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
24 | # MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
25 | # solely responsible for determining the appropriateness of using and | ||
26 | # distributing the Program and assumes all risks associated with its | ||
27 | # exercise of rights under this Agreement, including but not limited to | ||
28 | # the risks and costs of program errors, damage to or loss of data, | ||
29 | # programs or equipment, and unavailability or interruption of operations. | ||
30 | |||
31 | # DISCLAIMER OF LIABILITY | ||
32 | # NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | # DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | # USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | # HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | # You should have received a copy of the GNU General Public License | ||
41 | # along with this program; if not, write to the Free Software | ||
42 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | # USA. | ||
44 | |||
45 | config SCSI_UFSHCD | ||
46 | tristate "Universal Flash Storage host controller driver" | ||
47 | depends on PCI && SCSI | ||
48 | ---help--- | ||
49 | This is a generic driver which supports PCIe UFS Host controllers. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile new file mode 100644 index 000000000000..adf7895a6a91 --- /dev/null +++ b/drivers/scsi/ufs/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | # UFSHCD makefile | ||
2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o | ||
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h new file mode 100644 index 000000000000..b207529f8d54 --- /dev/null +++ b/drivers/scsi/ufs/ufs.h | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * Universal Flash Storage Host controller driver | ||
3 | * | ||
4 | * This code is based on drivers/scsi/ufs/ufs.h | ||
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | ||
6 | * | ||
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | ||
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version 2 | ||
13 | * of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * NO WARRANTY | ||
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
25 | * solely responsible for determining the appropriateness of using and | ||
26 | * distributing the Program and assumes all risks associated with its | ||
27 | * exercise of rights under this Agreement, including but not limited to | ||
28 | * the risks and costs of program errors, damage to or loss of data, | ||
29 | * programs or equipment, and unavailability or interruption of operations. | ||
30 | |||
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | ||
45 | |||
46 | #ifndef _UFS_H | ||
47 | #define _UFS_H | ||
48 | |||
49 | #define MAX_CDB_SIZE 16 | ||
50 | |||
51 | #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ | ||
52 | ((byte3 << 24) | (byte2 << 16) |\ | ||
53 | (byte1 << 8) | (byte0)) | ||
54 | |||
55 | /* | ||
56 | * UFS Protocol Information Unit related definitions | ||
57 | */ | ||
58 | |||
59 | /* Task management functions */ | ||
60 | enum { | ||
61 | UFS_ABORT_TASK = 0x01, | ||
62 | UFS_ABORT_TASK_SET = 0x02, | ||
63 | UFS_CLEAR_TASK_SET = 0x04, | ||
64 | UFS_LOGICAL_RESET = 0x08, | ||
65 | UFS_QUERY_TASK = 0x80, | ||
66 | UFS_QUERY_TASK_SET = 0x81, | ||
67 | }; | ||
68 | |||
69 | /* UTP UPIU Transaction Codes Initiator to Target */ | ||
70 | enum { | ||
71 | UPIU_TRANSACTION_NOP_OUT = 0x00, | ||
72 | UPIU_TRANSACTION_COMMAND = 0x01, | ||
73 | UPIU_TRANSACTION_DATA_OUT = 0x02, | ||
74 | UPIU_TRANSACTION_TASK_REQ = 0x04, | ||
75 | UPIU_TRANSACTION_QUERY_REQ = 0x26, | ||
76 | }; | ||
77 | |||
78 | /* UTP UPIU Transaction Codes Target to Initiator */ | ||
79 | enum { | ||
80 | UPIU_TRANSACTION_NOP_IN = 0x20, | ||
81 | UPIU_TRANSACTION_RESPONSE = 0x21, | ||
82 | UPIU_TRANSACTION_DATA_IN = 0x22, | ||
83 | UPIU_TRANSACTION_TASK_RSP = 0x24, | ||
84 | UPIU_TRANSACTION_READY_XFER = 0x31, | ||
85 | UPIU_TRANSACTION_QUERY_RSP = 0x36, | ||
86 | }; | ||
87 | |||
88 | /* UPIU Read/Write flags */ | ||
89 | enum { | ||
90 | UPIU_CMD_FLAGS_NONE = 0x00, | ||
91 | UPIU_CMD_FLAGS_WRITE = 0x20, | ||
92 | UPIU_CMD_FLAGS_READ = 0x40, | ||
93 | }; | ||
94 | |||
95 | /* UPIU Task Attributes */ | ||
96 | enum { | ||
97 | UPIU_TASK_ATTR_SIMPLE = 0x00, | ||
98 | UPIU_TASK_ATTR_ORDERED = 0x01, | ||
99 | UPIU_TASK_ATTR_HEADQ = 0x02, | ||
100 | UPIU_TASK_ATTR_ACA = 0x03, | ||
101 | }; | ||
102 | |||
103 | /* UTP QUERY Transaction Specific Fields OpCode */ | ||
104 | enum { | ||
105 | UPIU_QUERY_OPCODE_NOP = 0x0, | ||
106 | UPIU_QUERY_OPCODE_READ_DESC = 0x1, | ||
107 | UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, | ||
108 | UPIU_QUERY_OPCODE_READ_ATTR = 0x3, | ||
109 | UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4, | ||
110 | UPIU_QUERY_OPCODE_READ_FLAG = 0x5, | ||
111 | UPIU_QUERY_OPCODE_SET_FLAG = 0x6, | ||
112 | UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7, | ||
113 | UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, | ||
114 | }; | ||
115 | |||
116 | /* UTP Transfer Request Command Type (CT) */ | ||
117 | enum { | ||
118 | UPIU_COMMAND_SET_TYPE_SCSI = 0x0, | ||
119 | UPIU_COMMAND_SET_TYPE_UFS = 0x1, | ||
120 | UPIU_COMMAND_SET_TYPE_QUERY = 0x2, | ||
121 | }; | ||
122 | |||
123 | enum { | ||
124 | MASK_SCSI_STATUS = 0xFF, | ||
125 | MASK_TASK_RESPONSE = 0xFF00, | ||
126 | MASK_RSP_UPIU_RESULT = 0xFFFF, | ||
127 | }; | ||
128 | |||
129 | /* Task management service response */ | ||
130 | enum { | ||
131 | UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00, | ||
132 | UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04, | ||
133 | UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08, | ||
134 | UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, | ||
135 | UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, | ||
136 | }; | ||
137 | /** | ||
138 | * struct utp_upiu_header - UPIU header structure | ||
139 | * @dword_0: UPIU header DW-0 | ||
140 | * @dword_1: UPIU header DW-1 | ||
141 | * @dword_2: UPIU header DW-2 | ||
142 | */ | ||
143 | struct utp_upiu_header { | ||
144 | u32 dword_0; | ||
145 | u32 dword_1; | ||
146 | u32 dword_2; | ||
147 | }; | ||
148 | |||
149 | /** | ||
150 | * struct utp_upiu_cmd - Command UPIU structure | ||
151 | * @header: UPIU header structure DW-0 to DW-2 | ||
152 | * @data_transfer_len: Data Transfer Length DW-3 | ||
153 | * @cdb: Command Descriptor Block CDB DW-4 to DW-7 | ||
154 | */ | ||
155 | struct utp_upiu_cmd { | ||
156 | struct utp_upiu_header header; | ||
157 | u32 exp_data_transfer_len; | ||
158 | u8 cdb[MAX_CDB_SIZE]; | ||
159 | }; | ||
160 | |||
161 | /** | ||
162 | * struct utp_upiu_rsp - Response UPIU structure | ||
163 | * @header: UPIU header DW-0 to DW-2 | ||
164 | * @residual_transfer_count: Residual transfer count DW-3 | ||
165 | * @reserved: Reserved double words DW-4 to DW-7 | ||
166 | * @sense_data_len: Sense data length DW-8 U16 | ||
167 | * @sense_data: Sense data field DW-8 to DW-12 | ||
168 | */ | ||
169 | struct utp_upiu_rsp { | ||
170 | struct utp_upiu_header header; | ||
171 | u32 residual_transfer_count; | ||
172 | u32 reserved[4]; | ||
173 | u16 sense_data_len; | ||
174 | u8 sense_data[18]; | ||
175 | }; | ||
176 | |||
177 | /** | ||
178 | * struct utp_upiu_task_req - Task request UPIU structure | ||
179 | * @header - UPIU header structure DW0 to DW-2 | ||
180 | * @input_param1: Input parameter 1 DW-3 | ||
181 | * @input_param2: Input parameter 2 DW-4 | ||
182 | * @input_param3: Input parameter 3 DW-5 | ||
183 | * @reserved: Reserved double words DW-6 to DW-7 | ||
184 | */ | ||
185 | struct utp_upiu_task_req { | ||
186 | struct utp_upiu_header header; | ||
187 | u32 input_param1; | ||
188 | u32 input_param2; | ||
189 | u32 input_param3; | ||
190 | u32 reserved[2]; | ||
191 | }; | ||
192 | |||
193 | /** | ||
194 | * struct utp_upiu_task_rsp - Task Management Response UPIU structure | ||
195 | * @header: UPIU header structure DW0-DW-2 | ||
196 | * @output_param1: Ouput parameter 1 DW3 | ||
197 | * @output_param2: Output parameter 2 DW4 | ||
198 | * @reserved: Reserved double words DW-5 to DW-7 | ||
199 | */ | ||
200 | struct utp_upiu_task_rsp { | ||
201 | struct utp_upiu_header header; | ||
202 | u32 output_param1; | ||
203 | u32 output_param2; | ||
204 | u32 reserved[3]; | ||
205 | }; | ||
206 | |||
207 | #endif /* End of Header */ | ||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c new file mode 100644 index 000000000000..52b96e8bf92e --- /dev/null +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -0,0 +1,1978 @@ | |||
1 | /* | ||
2 | * Universal Flash Storage Host controller driver | ||
3 | * | ||
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | ||
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | ||
6 | * | ||
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | ||
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version 2 | ||
13 | * of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * NO WARRANTY | ||
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
25 | * solely responsible for determining the appropriateness of using and | ||
26 | * distributing the Program and assumes all risks associated with its | ||
27 | * exercise of rights under this Agreement, including but not limited to | ||
28 | * the risks and costs of program errors, damage to or loss of data, | ||
29 | * programs or equipment, and unavailability or interruption of operations. | ||
30 | |||
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | ||
45 | |||
46 | #include <linux/module.h> | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/io.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/spinlock.h> | ||
55 | #include <linux/workqueue.h> | ||
56 | #include <linux/errno.h> | ||
57 | #include <linux/types.h> | ||
58 | #include <linux/wait.h> | ||
59 | #include <linux/bitops.h> | ||
60 | |||
61 | #include <asm/irq.h> | ||
62 | #include <asm/byteorder.h> | ||
63 | #include <scsi/scsi.h> | ||
64 | #include <scsi/scsi_cmnd.h> | ||
65 | #include <scsi/scsi_host.h> | ||
66 | #include <scsi/scsi_tcq.h> | ||
67 | #include <scsi/scsi_dbg.h> | ||
68 | #include <scsi/scsi_eh.h> | ||
69 | |||
70 | #include "ufs.h" | ||
71 | #include "ufshci.h" | ||
72 | |||
73 | #define UFSHCD "ufshcd" | ||
74 | #define UFSHCD_DRIVER_VERSION "0.1" | ||
75 | |||
76 | enum { | ||
77 | UFSHCD_MAX_CHANNEL = 0, | ||
78 | UFSHCD_MAX_ID = 1, | ||
79 | UFSHCD_MAX_LUNS = 8, | ||
80 | UFSHCD_CMD_PER_LUN = 32, | ||
81 | UFSHCD_CAN_QUEUE = 32, | ||
82 | }; | ||
83 | |||
84 | /* UFSHCD states */ | ||
85 | enum { | ||
86 | UFSHCD_STATE_OPERATIONAL, | ||
87 | UFSHCD_STATE_RESET, | ||
88 | UFSHCD_STATE_ERROR, | ||
89 | }; | ||
90 | |||
91 | /* Interrupt configuration options */ | ||
92 | enum { | ||
93 | UFSHCD_INT_DISABLE, | ||
94 | UFSHCD_INT_ENABLE, | ||
95 | UFSHCD_INT_CLEAR, | ||
96 | }; | ||
97 | |||
98 | /* Interrupt aggregation options */ | ||
99 | enum { | ||
100 | INT_AGGR_RESET, | ||
101 | INT_AGGR_CONFIG, | ||
102 | }; | ||
103 | |||
104 | /** | ||
105 | * struct uic_command - UIC command structure | ||
106 | * @command: UIC command | ||
107 | * @argument1: UIC command argument 1 | ||
108 | * @argument2: UIC command argument 2 | ||
109 | * @argument3: UIC command argument 3 | ||
110 | * @cmd_active: Indicate if UIC command is outstanding | ||
111 | * @result: UIC command result | ||
112 | */ | ||
113 | struct uic_command { | ||
114 | u32 command; | ||
115 | u32 argument1; | ||
116 | u32 argument2; | ||
117 | u32 argument3; | ||
118 | int cmd_active; | ||
119 | int result; | ||
120 | }; | ||
121 | |||
122 | /** | ||
123 | * struct ufs_hba - per adapter private structure | ||
124 | * @mmio_base: UFSHCI base register address | ||
125 | * @ucdl_base_addr: UFS Command Descriptor base address | ||
126 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | ||
127 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | ||
128 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | ||
129 | * @utrdl_dma_addr: UTRDL DMA address | ||
130 | * @utmrdl_dma_addr: UTMRDL DMA address | ||
131 | * @host: Scsi_Host instance of the driver | ||
132 | * @pdev: PCI device handle | ||
133 | * @lrb: local reference block | ||
134 | * @outstanding_tasks: Bits representing outstanding task requests | ||
135 | * @outstanding_reqs: Bits representing outstanding transfer requests | ||
136 | * @capabilities: UFS Controller Capabilities | ||
137 | * @nutrs: Transfer Request Queue depth supported by controller | ||
138 | * @nutmrs: Task Management Queue depth supported by controller | ||
139 | * @active_uic_cmd: handle of active UIC command | ||
140 | * @ufshcd_tm_wait_queue: wait queue for task management | ||
141 | * @tm_condition: condition variable for task management | ||
142 | * @ufshcd_state: UFSHCD states | ||
143 | * @int_enable_mask: Interrupt Mask Bits | ||
144 | * @uic_workq: Work queue for UIC completion handling | ||
145 | * @feh_workq: Work queue for fatal controller error handling | ||
146 | * @errors: HBA errors | ||
147 | */ | ||
148 | struct ufs_hba { | ||
149 | void __iomem *mmio_base; | ||
150 | |||
151 | /* Virtual memory reference */ | ||
152 | struct utp_transfer_cmd_desc *ucdl_base_addr; | ||
153 | struct utp_transfer_req_desc *utrdl_base_addr; | ||
154 | struct utp_task_req_desc *utmrdl_base_addr; | ||
155 | |||
156 | /* DMA memory reference */ | ||
157 | dma_addr_t ucdl_dma_addr; | ||
158 | dma_addr_t utrdl_dma_addr; | ||
159 | dma_addr_t utmrdl_dma_addr; | ||
160 | |||
161 | struct Scsi_Host *host; | ||
162 | struct pci_dev *pdev; | ||
163 | |||
164 | struct ufshcd_lrb *lrb; | ||
165 | |||
166 | unsigned long outstanding_tasks; | ||
167 | unsigned long outstanding_reqs; | ||
168 | |||
169 | u32 capabilities; | ||
170 | int nutrs; | ||
171 | int nutmrs; | ||
172 | u32 ufs_version; | ||
173 | |||
174 | struct uic_command active_uic_cmd; | ||
175 | wait_queue_head_t ufshcd_tm_wait_queue; | ||
176 | unsigned long tm_condition; | ||
177 | |||
178 | u32 ufshcd_state; | ||
179 | u32 int_enable_mask; | ||
180 | |||
181 | /* Work Queues */ | ||
182 | struct work_struct uic_workq; | ||
183 | struct work_struct feh_workq; | ||
184 | |||
185 | /* HBA Errors */ | ||
186 | u32 errors; | ||
187 | }; | ||
188 | |||
189 | /** | ||
190 | * struct ufshcd_lrb - local reference block | ||
191 | * @utr_descriptor_ptr: UTRD address of the command | ||
192 | * @ucd_cmd_ptr: UCD address of the command | ||
193 | * @ucd_rsp_ptr: Response UPIU address for this command | ||
194 | * @ucd_prdt_ptr: PRDT address of the command | ||
195 | * @cmd: pointer to SCSI command | ||
196 | * @sense_buffer: pointer to sense buffer address of the SCSI command | ||
197 | * @sense_bufflen: Length of the sense buffer | ||
198 | * @scsi_status: SCSI status of the command | ||
199 | * @command_type: SCSI, UFS, Query. | ||
200 | * @task_tag: Task tag of the command | ||
201 | * @lun: LUN of the command | ||
202 | */ | ||
203 | struct ufshcd_lrb { | ||
204 | struct utp_transfer_req_desc *utr_descriptor_ptr; | ||
205 | struct utp_upiu_cmd *ucd_cmd_ptr; | ||
206 | struct utp_upiu_rsp *ucd_rsp_ptr; | ||
207 | struct ufshcd_sg_entry *ucd_prdt_ptr; | ||
208 | |||
209 | struct scsi_cmnd *cmd; | ||
210 | u8 *sense_buffer; | ||
211 | unsigned int sense_bufflen; | ||
212 | int scsi_status; | ||
213 | |||
214 | int command_type; | ||
215 | int task_tag; | ||
216 | unsigned int lun; | ||
217 | }; | ||
218 | |||
219 | /** | ||
220 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | ||
221 | * @hba - Pointer to adapter instance | ||
222 | * | ||
223 | * Returns UFSHCI version supported by the controller | ||
224 | */ | ||
225 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | ||
226 | { | ||
227 | return readl(hba->mmio_base + REG_UFS_VERSION); | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ufshcd_is_device_present - Check if any device connected to | ||
232 | * the host controller | ||
233 | * @reg_hcs - host controller status register value | ||
234 | * | ||
235 | * Returns 0 if device present, non-zero if no device detected | ||
236 | */ | ||
237 | static inline int ufshcd_is_device_present(u32 reg_hcs) | ||
238 | { | ||
239 | return (DEVICE_PRESENT & reg_hcs) ? 0 : -1; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | ||
244 | * @lrb: pointer to local command reference block | ||
245 | * | ||
246 | * This function is used to get the OCS field from UTRD | ||
247 | * Returns the OCS field in the UTRD | ||
248 | */ | ||
249 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | ||
250 | { | ||
251 | return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status | ||
256 | * @task_req_descp: pointer to utp_task_req_desc structure | ||
257 | * | ||
258 | * This function is used to get the OCS field from UTMRD | ||
259 | * Returns the OCS field in the UTMRD | ||
260 | */ | ||
261 | static inline int | ||
262 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | ||
263 | { | ||
264 | return task_req_descp->header.dword_2 & MASK_OCS; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * ufshcd_get_tm_free_slot - get a free slot for task management request | ||
269 | * @hba: per adapter instance | ||
270 | * | ||
271 | * Returns maximum number of task management request slots in case of | ||
272 | * task management queue full or returns the free slot number | ||
273 | */ | ||
274 | static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) | ||
275 | { | ||
276 | return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | ||
281 | * @hba: per adapter instance | ||
282 | * @pos: position of the bit to be cleared | ||
283 | */ | ||
284 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | ||
285 | { | ||
286 | writel(~(1 << pos), | ||
287 | (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR)); | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | ||
292 | * @reg: Register value of host controller status | ||
293 | * | ||
294 | * Returns integer, 0 on Success and positive value if failed | ||
295 | */ | ||
296 | static inline int ufshcd_get_lists_status(u32 reg) | ||
297 | { | ||
298 | /* | ||
299 | * The mask 0xFF is for the following HCS register bits | ||
300 | * Bit Description | ||
301 | * 0 Device Present | ||
302 | * 1 UTRLRDY | ||
303 | * 2 UTMRLRDY | ||
304 | * 3 UCRDY | ||
305 | * 4 HEI | ||
306 | * 5 DEI | ||
307 | * 6-7 reserved | ||
308 | */ | ||
309 | return (((reg) & (0xFF)) >> 1) ^ (0x07); | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * ufshcd_get_uic_cmd_result - Get the UIC command result | ||
314 | * @hba: Pointer to adapter instance | ||
315 | * | ||
316 | * This function gets the result of UIC command completion | ||
317 | * Returns 0 on success, non zero value on error | ||
318 | */ | ||
319 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | ||
320 | { | ||
321 | return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) & | ||
322 | MASK_UIC_COMMAND_RESULT; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * ufshcd_free_hba_memory - Free allocated memory for LRB, request | ||
327 | * and task lists | ||
328 | * @hba: Pointer to adapter instance | ||
329 | */ | ||
330 | static inline void ufshcd_free_hba_memory(struct ufs_hba *hba) | ||
331 | { | ||
332 | size_t utmrdl_size, utrdl_size, ucdl_size; | ||
333 | |||
334 | kfree(hba->lrb); | ||
335 | |||
336 | if (hba->utmrdl_base_addr) { | ||
337 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | ||
338 | dma_free_coherent(&hba->pdev->dev, utmrdl_size, | ||
339 | hba->utmrdl_base_addr, hba->utmrdl_dma_addr); | ||
340 | } | ||
341 | |||
342 | if (hba->utrdl_base_addr) { | ||
343 | utrdl_size = | ||
344 | (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | ||
345 | dma_free_coherent(&hba->pdev->dev, utrdl_size, | ||
346 | hba->utrdl_base_addr, hba->utrdl_dma_addr); | ||
347 | } | ||
348 | |||
349 | if (hba->ucdl_base_addr) { | ||
350 | ucdl_size = | ||
351 | (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | ||
352 | dma_free_coherent(&hba->pdev->dev, ucdl_size, | ||
353 | hba->ucdl_base_addr, hba->ucdl_dma_addr); | ||
354 | } | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * ufshcd_is_valid_req_rsp - checks if controller TR response is valid | ||
359 | * @ucd_rsp_ptr: pointer to response UPIU | ||
360 | * | ||
361 | * This function checks the response UPIU for valid transaction type in | ||
362 | * response field | ||
363 | * Returns 0 on success, non-zero on failure | ||
364 | */ | ||
365 | static inline int | ||
366 | ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) | ||
367 | { | ||
368 | return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == | ||
369 | UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | ||
374 | * @ucd_rsp_ptr: pointer to response UPIU | ||
375 | * | ||
376 | * This function gets the response status and scsi_status from response UPIU | ||
377 | * Returns the response result code. | ||
378 | */ | ||
379 | static inline int | ||
380 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | ||
381 | { | ||
382 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * ufshcd_config_int_aggr - Configure interrupt aggregation values. | ||
387 | * Currently there is no use case where we want to configure | ||
388 | * interrupt aggregation dynamically. So to configure interrupt | ||
389 | * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and | ||
390 | * INT_AGGR_TIMEOUT_VALUE are used. | ||
391 | * @hba: per adapter instance | ||
392 | * @option: Interrupt aggregation option | ||
393 | */ | ||
394 | static inline void | ||
395 | ufshcd_config_int_aggr(struct ufs_hba *hba, int option) | ||
396 | { | ||
397 | switch (option) { | ||
398 | case INT_AGGR_RESET: | ||
399 | writel((INT_AGGR_ENABLE | | ||
400 | INT_AGGR_COUNTER_AND_TIMER_RESET), | ||
401 | (hba->mmio_base + | ||
402 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); | ||
403 | break; | ||
404 | case INT_AGGR_CONFIG: | ||
405 | writel((INT_AGGR_ENABLE | | ||
406 | INT_AGGR_PARAM_WRITE | | ||
407 | INT_AGGR_COUNTER_THRESHOLD_VALUE | | ||
408 | INT_AGGR_TIMEOUT_VALUE), | ||
409 | (hba->mmio_base + | ||
410 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | ||
417 | * When run-stop registers are set to 1, it indicates the | ||
418 | * host controller that it can process the requests | ||
419 | * @hba: per adapter instance | ||
420 | */ | ||
421 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | ||
422 | { | ||
423 | writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT, | ||
424 | (hba->mmio_base + | ||
425 | REG_UTP_TASK_REQ_LIST_RUN_STOP)); | ||
426 | writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | ||
427 | (hba->mmio_base + | ||
428 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP)); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * ufshcd_hba_stop - Send controller to reset state | ||
433 | * @hba: per adapter instance | ||
434 | */ | ||
435 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) | ||
436 | { | ||
437 | writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE)); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * ufshcd_hba_start - Start controller initialization sequence | ||
442 | * @hba: per adapter instance | ||
443 | */ | ||
444 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | ||
445 | { | ||
446 | writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE)); | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * ufshcd_is_hba_active - Get controller state | ||
451 | * @hba: per adapter instance | ||
452 | * | ||
453 | * Returns zero if controller is active, 1 otherwise | ||
454 | */ | ||
455 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | ||
456 | { | ||
457 | return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * ufshcd_send_command - Send SCSI or device management commands | ||
462 | * @hba: per adapter instance | ||
463 | * @task_tag: Task tag of the command | ||
464 | */ | ||
465 | static inline | ||
466 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | ||
467 | { | ||
468 | __set_bit(task_tag, &hba->outstanding_reqs); | ||
469 | writel((1 << task_tag), | ||
470 | (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL)); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | ||
475 | * @lrb - pointer to local reference block | ||
476 | */ | ||
477 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | ||
478 | { | ||
479 | int len; | ||
480 | if (lrbp->sense_buffer) { | ||
481 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); | ||
482 | memcpy(lrbp->sense_buffer, | ||
483 | lrbp->ucd_rsp_ptr->sense_data, | ||
484 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /** | ||
489 | * ufshcd_hba_capabilities - Read controller capabilities | ||
490 | * @hba: per adapter instance | ||
491 | */ | ||
492 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | ||
493 | { | ||
494 | hba->capabilities = | ||
495 | readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES); | ||
496 | |||
497 | /* nutrs and nutmrs are 0 based values */ | ||
498 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | ||
499 | hba->nutmrs = | ||
500 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * ufshcd_send_uic_command - Send UIC commands to unipro layers | ||
505 | * @hba: per adapter instance | ||
506 | * @uic_command: UIC command | ||
507 | */ | ||
508 | static inline void | ||
509 | ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd) | ||
510 | { | ||
511 | /* Write Args */ | ||
512 | writel(uic_cmnd->argument1, | ||
513 | (hba->mmio_base + REG_UIC_COMMAND_ARG_1)); | ||
514 | writel(uic_cmnd->argument2, | ||
515 | (hba->mmio_base + REG_UIC_COMMAND_ARG_2)); | ||
516 | writel(uic_cmnd->argument3, | ||
517 | (hba->mmio_base + REG_UIC_COMMAND_ARG_3)); | ||
518 | |||
519 | /* Write UIC Cmd */ | ||
520 | writel((uic_cmnd->command & COMMAND_OPCODE_MASK), | ||
521 | (hba->mmio_base + REG_UIC_COMMAND)); | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * ufshcd_map_sg - Map scatter-gather list to prdt | ||
526 | * @lrbp - pointer to local reference block | ||
527 | * | ||
528 | * Returns 0 in case of success, non-zero value in case of failure | ||
529 | */ | ||
530 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) | ||
531 | { | ||
532 | struct ufshcd_sg_entry *prd_table; | ||
533 | struct scatterlist *sg; | ||
534 | struct scsi_cmnd *cmd; | ||
535 | int sg_segments; | ||
536 | int i; | ||
537 | |||
538 | cmd = lrbp->cmd; | ||
539 | sg_segments = scsi_dma_map(cmd); | ||
540 | if (sg_segments < 0) | ||
541 | return sg_segments; | ||
542 | |||
543 | if (sg_segments) { | ||
544 | lrbp->utr_descriptor_ptr->prd_table_length = | ||
545 | cpu_to_le16((u16) (sg_segments)); | ||
546 | |||
547 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | ||
548 | |||
549 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | ||
550 | prd_table[i].size = | ||
551 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | ||
552 | prd_table[i].base_addr = | ||
553 | cpu_to_le32(lower_32_bits(sg->dma_address)); | ||
554 | prd_table[i].upper_addr = | ||
555 | cpu_to_le32(upper_32_bits(sg->dma_address)); | ||
556 | } | ||
557 | } else { | ||
558 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /** | ||
565 | * ufshcd_int_config - enable/disable interrupts | ||
566 | * @hba: per adapter instance | ||
567 | * @option: interrupt option | ||
568 | */ | ||
569 | static void ufshcd_int_config(struct ufs_hba *hba, u32 option) | ||
570 | { | ||
571 | switch (option) { | ||
572 | case UFSHCD_INT_ENABLE: | ||
573 | writel(hba->int_enable_mask, | ||
574 | (hba->mmio_base + REG_INTERRUPT_ENABLE)); | ||
575 | break; | ||
576 | case UFSHCD_INT_DISABLE: | ||
577 | if (hba->ufs_version == UFSHCI_VERSION_10) | ||
578 | writel(INTERRUPT_DISABLE_MASK_10, | ||
579 | (hba->mmio_base + REG_INTERRUPT_ENABLE)); | ||
580 | else | ||
581 | writel(INTERRUPT_DISABLE_MASK_11, | ||
582 | (hba->mmio_base + REG_INTERRUPT_ENABLE)); | ||
583 | break; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | /** | ||
588 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) | ||
589 | * @lrb - pointer to local reference block | ||
590 | */ | ||
591 | static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) | ||
592 | { | ||
593 | struct utp_transfer_req_desc *req_desc; | ||
594 | struct utp_upiu_cmd *ucd_cmd_ptr; | ||
595 | u32 data_direction; | ||
596 | u32 upiu_flags; | ||
597 | |||
598 | ucd_cmd_ptr = lrbp->ucd_cmd_ptr; | ||
599 | req_desc = lrbp->utr_descriptor_ptr; | ||
600 | |||
601 | switch (lrbp->command_type) { | ||
602 | case UTP_CMD_TYPE_SCSI: | ||
603 | if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
604 | data_direction = UTP_DEVICE_TO_HOST; | ||
605 | upiu_flags = UPIU_CMD_FLAGS_READ; | ||
606 | } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
607 | data_direction = UTP_HOST_TO_DEVICE; | ||
608 | upiu_flags = UPIU_CMD_FLAGS_WRITE; | ||
609 | } else { | ||
610 | data_direction = UTP_NO_DATA_TRANSFER; | ||
611 | upiu_flags = UPIU_CMD_FLAGS_NONE; | ||
612 | } | ||
613 | |||
614 | /* Transfer request descriptor header fields */ | ||
615 | req_desc->header.dword_0 = | ||
616 | cpu_to_le32(data_direction | UTP_SCSI_COMMAND); | ||
617 | |||
618 | /* | ||
619 | * assigning invalid value for command status. Controller | ||
620 | * updates OCS on command completion, with the command | ||
621 | * status | ||
622 | */ | ||
623 | req_desc->header.dword_2 = | ||
624 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | ||
625 | |||
626 | /* command descriptor fields */ | ||
627 | ucd_cmd_ptr->header.dword_0 = | ||
628 | cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, | ||
629 | upiu_flags, | ||
630 | lrbp->lun, | ||
631 | lrbp->task_tag)); | ||
632 | ucd_cmd_ptr->header.dword_1 = | ||
633 | cpu_to_be32( | ||
634 | UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, | ||
635 | 0, | ||
636 | 0, | ||
637 | 0)); | ||
638 | |||
639 | /* Total EHS length and Data segment length will be zero */ | ||
640 | ucd_cmd_ptr->header.dword_2 = 0; | ||
641 | |||
642 | ucd_cmd_ptr->exp_data_transfer_len = | ||
643 | cpu_to_be32(lrbp->cmd->transfersize); | ||
644 | |||
645 | memcpy(ucd_cmd_ptr->cdb, | ||
646 | lrbp->cmd->cmnd, | ||
647 | (min_t(unsigned short, | ||
648 | lrbp->cmd->cmd_len, | ||
649 | MAX_CDB_SIZE))); | ||
650 | break; | ||
651 | case UTP_CMD_TYPE_DEV_MANAGE: | ||
652 | /* For query function implementation */ | ||
653 | break; | ||
654 | case UTP_CMD_TYPE_UFS: | ||
655 | /* For UFS native command implementation */ | ||
656 | break; | ||
657 | } /* end of switch */ | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * ufshcd_queuecommand - main entry point for SCSI requests | ||
662 | * @cmd: command from SCSI Midlayer | ||
663 | * @done: call back function | ||
664 | * | ||
665 | * Returns 0 for success, non-zero in case of failure | ||
666 | */ | ||
667 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | ||
668 | { | ||
669 | struct ufshcd_lrb *lrbp; | ||
670 | struct ufs_hba *hba; | ||
671 | unsigned long flags; | ||
672 | int tag; | ||
673 | int err = 0; | ||
674 | |||
675 | hba = shost_priv(host); | ||
676 | |||
677 | tag = cmd->request->tag; | ||
678 | |||
679 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { | ||
680 | err = SCSI_MLQUEUE_HOST_BUSY; | ||
681 | goto out; | ||
682 | } | ||
683 | |||
684 | lrbp = &hba->lrb[tag]; | ||
685 | |||
686 | lrbp->cmd = cmd; | ||
687 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | ||
688 | lrbp->sense_buffer = cmd->sense_buffer; | ||
689 | lrbp->task_tag = tag; | ||
690 | lrbp->lun = cmd->device->lun; | ||
691 | |||
692 | lrbp->command_type = UTP_CMD_TYPE_SCSI; | ||
693 | |||
694 | /* form UPIU before issuing the command */ | ||
695 | ufshcd_compose_upiu(lrbp); | ||
696 | err = ufshcd_map_sg(lrbp); | ||
697 | if (err) | ||
698 | goto out; | ||
699 | |||
700 | /* issue command to the controller */ | ||
701 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
702 | ufshcd_send_command(hba, tag); | ||
703 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
704 | out: | ||
705 | return err; | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | ||
710 | * @hba: per adapter instance | ||
711 | * | ||
712 | * 1. Allocate DMA memory for Command Descriptor array | ||
713 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | ||
714 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | ||
715 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | ||
716 | * (UTMRDL) | ||
717 | * 4. Allocate memory for local reference block(lrb). | ||
718 | * | ||
719 | * Returns 0 for success, non-zero in case of failure | ||
720 | */ | ||
721 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | ||
722 | { | ||
723 | size_t utmrdl_size, utrdl_size, ucdl_size; | ||
724 | |||
725 | /* Allocate memory for UTP command descriptors */ | ||
726 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | ||
727 | hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | ||
728 | ucdl_size, | ||
729 | &hba->ucdl_dma_addr, | ||
730 | GFP_KERNEL); | ||
731 | |||
732 | /* | ||
733 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | ||
734 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | ||
735 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | ||
736 | * be aligned to 128 bytes as well | ||
737 | */ | ||
738 | if (!hba->ucdl_base_addr || | ||
739 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | ||
740 | dev_err(&hba->pdev->dev, | ||
741 | "Command Descriptor Memory allocation failed\n"); | ||
742 | goto out; | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * Allocate memory for UTP Transfer descriptors | ||
747 | * UFSHCI requires 1024 byte alignment of UTRD | ||
748 | */ | ||
749 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | ||
750 | hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | ||
751 | utrdl_size, | ||
752 | &hba->utrdl_dma_addr, | ||
753 | GFP_KERNEL); | ||
754 | if (!hba->utrdl_base_addr || | ||
755 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | ||
756 | dev_err(&hba->pdev->dev, | ||
757 | "Transfer Descriptor Memory allocation failed\n"); | ||
758 | goto out; | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | * Allocate memory for UTP Task Management descriptors | ||
763 | * UFSHCI requires 1024 byte alignment of UTMRD | ||
764 | */ | ||
765 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | ||
766 | hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev, | ||
767 | utmrdl_size, | ||
768 | &hba->utmrdl_dma_addr, | ||
769 | GFP_KERNEL); | ||
770 | if (!hba->utmrdl_base_addr || | ||
771 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | ||
772 | dev_err(&hba->pdev->dev, | ||
773 | "Task Management Descriptor Memory allocation failed\n"); | ||
774 | goto out; | ||
775 | } | ||
776 | |||
777 | /* Allocate memory for local reference block */ | ||
778 | hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); | ||
779 | if (!hba->lrb) { | ||
780 | dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n"); | ||
781 | goto out; | ||
782 | } | ||
783 | return 0; | ||
784 | out: | ||
785 | ufshcd_free_hba_memory(hba); | ||
786 | return -ENOMEM; | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * ufshcd_host_memory_configure - configure local reference block with | ||
791 | * memory offsets | ||
792 | * @hba: per adapter instance | ||
793 | * | ||
794 | * Configure Host memory space | ||
795 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | ||
796 | * address. | ||
797 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | ||
798 | * and PRDT offset. | ||
799 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | ||
800 | * into local reference block. | ||
801 | */ | ||
802 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | ||
803 | { | ||
804 | struct utp_transfer_cmd_desc *cmd_descp; | ||
805 | struct utp_transfer_req_desc *utrdlp; | ||
806 | dma_addr_t cmd_desc_dma_addr; | ||
807 | dma_addr_t cmd_desc_element_addr; | ||
808 | u16 response_offset; | ||
809 | u16 prdt_offset; | ||
810 | int cmd_desc_size; | ||
811 | int i; | ||
812 | |||
813 | utrdlp = hba->utrdl_base_addr; | ||
814 | cmd_descp = hba->ucdl_base_addr; | ||
815 | |||
816 | response_offset = | ||
817 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | ||
818 | prdt_offset = | ||
819 | offsetof(struct utp_transfer_cmd_desc, prd_table); | ||
820 | |||
821 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | ||
822 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | ||
823 | |||
824 | for (i = 0; i < hba->nutrs; i++) { | ||
825 | /* Configure UTRD with command descriptor base address */ | ||
826 | cmd_desc_element_addr = | ||
827 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | ||
828 | utrdlp[i].command_desc_base_addr_lo = | ||
829 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | ||
830 | utrdlp[i].command_desc_base_addr_hi = | ||
831 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | ||
832 | |||
833 | /* Response upiu and prdt offset should be in double words */ | ||
834 | utrdlp[i].response_upiu_offset = | ||
835 | cpu_to_le16((response_offset >> 2)); | ||
836 | utrdlp[i].prd_table_offset = | ||
837 | cpu_to_le16((prdt_offset >> 2)); | ||
838 | utrdlp[i].response_upiu_length = | ||
839 | cpu_to_le16(ALIGNED_UPIU_SIZE); | ||
840 | |||
841 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | ||
842 | hba->lrb[i].ucd_cmd_ptr = | ||
843 | (struct utp_upiu_cmd *)(cmd_descp + i); | ||
844 | hba->lrb[i].ucd_rsp_ptr = | ||
845 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | ||
846 | hba->lrb[i].ucd_prdt_ptr = | ||
847 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | ||
848 | } | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | ||
853 | * @hba: per adapter instance | ||
854 | * | ||
855 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | ||
856 | * in order to initialize the Unipro link startup procedure. | ||
857 | * Once the Unipro links are up, the device connected to the controller | ||
858 | * is detected. | ||
859 | * | ||
860 | * Returns 0 on success, non-zero value on failure | ||
861 | */ | ||
862 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | ||
863 | { | ||
864 | struct uic_command *uic_cmd; | ||
865 | unsigned long flags; | ||
866 | |||
867 | /* check if controller is ready to accept UIC commands */ | ||
868 | if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) & | ||
869 | UIC_COMMAND_READY) == 0x0) { | ||
870 | dev_err(&hba->pdev->dev, | ||
871 | "Controller not ready" | ||
872 | " to accept UIC commands\n"); | ||
873 | return -EIO; | ||
874 | } | ||
875 | |||
876 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
877 | |||
878 | /* form UIC command */ | ||
879 | uic_cmd = &hba->active_uic_cmd; | ||
880 | uic_cmd->command = UIC_CMD_DME_LINK_STARTUP; | ||
881 | uic_cmd->argument1 = 0; | ||
882 | uic_cmd->argument2 = 0; | ||
883 | uic_cmd->argument3 = 0; | ||
884 | |||
885 | /* enable UIC related interrupts */ | ||
886 | hba->int_enable_mask |= UIC_COMMAND_COMPL; | ||
887 | ufshcd_int_config(hba, UFSHCD_INT_ENABLE); | ||
888 | |||
889 | /* sending UIC commands to controller */ | ||
890 | ufshcd_send_uic_command(hba, uic_cmd); | ||
891 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | /** | ||
896 | * ufshcd_make_hba_operational - Make UFS controller operational | ||
897 | * @hba: per adapter instance | ||
898 | * | ||
899 | * To bring UFS host controller to operational state, | ||
900 | * 1. Check if device is present | ||
901 | * 2. Configure run-stop-registers | ||
902 | * 3. Enable required interrupts | ||
903 | * 4. Configure interrupt aggregation | ||
904 | * | ||
905 | * Returns 0 on success, non-zero value on failure | ||
906 | */ | ||
907 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | ||
908 | { | ||
909 | int err = 0; | ||
910 | u32 reg; | ||
911 | |||
912 | /* check if device present */ | ||
913 | reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); | ||
914 | if (ufshcd_is_device_present(reg)) { | ||
915 | dev_err(&hba->pdev->dev, "cc: Device not present\n"); | ||
916 | err = -ENXIO; | ||
917 | goto out; | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | ||
922 | * DEI, HEI bits must be 0 | ||
923 | */ | ||
924 | if (!(ufshcd_get_lists_status(reg))) { | ||
925 | ufshcd_enable_run_stop_reg(hba); | ||
926 | } else { | ||
927 | dev_err(&hba->pdev->dev, | ||
928 | "Host controller not ready to process requests"); | ||
929 | err = -EIO; | ||
930 | goto out; | ||
931 | } | ||
932 | |||
933 | /* Enable required interrupts */ | ||
934 | hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL | | ||
935 | UIC_ERROR | | ||
936 | UTP_TASK_REQ_COMPL | | ||
937 | DEVICE_FATAL_ERROR | | ||
938 | CONTROLLER_FATAL_ERROR | | ||
939 | SYSTEM_BUS_FATAL_ERROR); | ||
940 | ufshcd_int_config(hba, UFSHCD_INT_ENABLE); | ||
941 | |||
942 | /* Configure interrupt aggregation */ | ||
943 | ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); | ||
944 | |||
945 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | ||
946 | scsi_unblock_requests(hba->host); | ||
947 | |||
948 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | ||
949 | scsi_scan_host(hba->host); | ||
950 | out: | ||
951 | return err; | ||
952 | } | ||
953 | |||
954 | /** | ||
955 | * ufshcd_hba_enable - initialize the controller | ||
956 | * @hba: per adapter instance | ||
957 | * | ||
958 | * The controller resets itself and controller firmware initialization | ||
959 | * sequence kicks off. When controller is ready it will set | ||
960 | * the Host Controller Enable bit to 1. | ||
961 | * | ||
962 | * Returns 0 on success, non-zero value on failure | ||
963 | */ | ||
964 | static int ufshcd_hba_enable(struct ufs_hba *hba) | ||
965 | { | ||
966 | int retry; | ||
967 | |||
968 | /* | ||
969 | * msleep of 1 and 5 used in this function might result in msleep(20), | ||
970 | * but it was necessary to send the UFS FPGA to reset mode during | ||
971 | * development and testing of this driver. msleep can be changed to | ||
972 | * mdelay and retry count can be reduced based on the controller. | ||
973 | */ | ||
974 | if (!ufshcd_is_hba_active(hba)) { | ||
975 | |||
976 | /* change controller state to "reset state" */ | ||
977 | ufshcd_hba_stop(hba); | ||
978 | |||
979 | /* | ||
980 | * This delay is based on the testing done with UFS host | ||
981 | * controller FPGA. The delay can be changed based on the | ||
982 | * host controller used. | ||
983 | */ | ||
984 | msleep(5); | ||
985 | } | ||
986 | |||
987 | /* start controller initialization sequence */ | ||
988 | ufshcd_hba_start(hba); | ||
989 | |||
990 | /* | ||
991 | * To initialize a UFS host controller HCE bit must be set to 1. | ||
992 | * During initialization the HCE bit value changes from 1->0->1. | ||
993 | * When the host controller completes initialization sequence | ||
994 | * it sets the value of HCE bit to 1. The same HCE bit is read back | ||
995 | * to check if the controller has completed initialization sequence. | ||
996 | * So without this delay the value HCE = 1, set in the previous | ||
997 | * instruction might be read back. | ||
998 | * This delay can be changed based on the controller. | ||
999 | */ | ||
1000 | msleep(1); | ||
1001 | |||
1002 | /* wait for the host controller to complete initialization */ | ||
1003 | retry = 10; | ||
1004 | while (ufshcd_is_hba_active(hba)) { | ||
1005 | if (retry) { | ||
1006 | retry--; | ||
1007 | } else { | ||
1008 | dev_err(&hba->pdev->dev, | ||
1009 | "Controller enable failed\n"); | ||
1010 | return -EIO; | ||
1011 | } | ||
1012 | msleep(5); | ||
1013 | } | ||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
1018 | * ufshcd_initialize_hba - start the initialization process | ||
1019 | * @hba: per adapter instance | ||
1020 | * | ||
1021 | * 1. Enable the controller via ufshcd_hba_enable. | ||
1022 | * 2. Program the Transfer Request List Address with the starting address of | ||
1023 | * UTRDL. | ||
1024 | * 3. Program the Task Management Request List Address with starting address | ||
1025 | * of UTMRDL. | ||
1026 | * | ||
1027 | * Returns 0 on success, non-zero value on failure. | ||
1028 | */ | ||
1029 | static int ufshcd_initialize_hba(struct ufs_hba *hba) | ||
1030 | { | ||
1031 | if (ufshcd_hba_enable(hba)) | ||
1032 | return -EIO; | ||
1033 | |||
1034 | /* Configure UTRL and UTMRL base address registers */ | ||
1035 | writel(hba->utrdl_dma_addr, | ||
1036 | (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L)); | ||
1037 | writel(lower_32_bits(hba->utrdl_dma_addr), | ||
1038 | (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H)); | ||
1039 | writel(hba->utmrdl_dma_addr, | ||
1040 | (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L)); | ||
1041 | writel(upper_32_bits(hba->utmrdl_dma_addr), | ||
1042 | (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H)); | ||
1043 | |||
1044 | /* Initialize unipro link startup procedure */ | ||
1045 | return ufshcd_dme_link_startup(hba); | ||
1046 | } | ||
1047 | |||
1048 | /** | ||
1049 | * ufshcd_do_reset - reset the host controller | ||
1050 | * @hba: per adapter instance | ||
1051 | * | ||
1052 | * Returns SUCCESS/FAILED | ||
1053 | */ | ||
1054 | static int ufshcd_do_reset(struct ufs_hba *hba) | ||
1055 | { | ||
1056 | struct ufshcd_lrb *lrbp; | ||
1057 | unsigned long flags; | ||
1058 | int tag; | ||
1059 | |||
1060 | /* block commands from midlayer */ | ||
1061 | scsi_block_requests(hba->host); | ||
1062 | |||
1063 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1064 | hba->ufshcd_state = UFSHCD_STATE_RESET; | ||
1065 | |||
1066 | /* send controller to reset state */ | ||
1067 | ufshcd_hba_stop(hba); | ||
1068 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1069 | |||
1070 | /* abort outstanding commands */ | ||
1071 | for (tag = 0; tag < hba->nutrs; tag++) { | ||
1072 | if (test_bit(tag, &hba->outstanding_reqs)) { | ||
1073 | lrbp = &hba->lrb[tag]; | ||
1074 | scsi_dma_unmap(lrbp->cmd); | ||
1075 | lrbp->cmd->result = DID_RESET << 16; | ||
1076 | lrbp->cmd->scsi_done(lrbp->cmd); | ||
1077 | lrbp->cmd = NULL; | ||
1078 | } | ||
1079 | } | ||
1080 | |||
1081 | /* clear outstanding request/task bit maps */ | ||
1082 | hba->outstanding_reqs = 0; | ||
1083 | hba->outstanding_tasks = 0; | ||
1084 | |||
1085 | /* start the initialization process */ | ||
1086 | if (ufshcd_initialize_hba(hba)) { | ||
1087 | dev_err(&hba->pdev->dev, | ||
1088 | "Reset: Controller initialization failed\n"); | ||
1089 | return FAILED; | ||
1090 | } | ||
1091 | return SUCCESS; | ||
1092 | } | ||
1093 | |||
1094 | /** | ||
1095 | * ufshcd_slave_alloc - handle initial SCSI device configurations | ||
1096 | * @sdev: pointer to SCSI device | ||
1097 | * | ||
1098 | * Returns success | ||
1099 | */ | ||
1100 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | ||
1101 | { | ||
1102 | struct ufs_hba *hba; | ||
1103 | |||
1104 | hba = shost_priv(sdev->host); | ||
1105 | sdev->tagged_supported = 1; | ||
1106 | |||
1107 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | ||
1108 | sdev->use_10_for_ms = 1; | ||
1109 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); | ||
1110 | |||
1111 | /* | ||
1112 | * Inform SCSI Midlayer that the LUN queue depth is same as the | ||
1113 | * controller queue depth. If a LUN queue depth is less than the | ||
1114 | * controller queue depth and if the LUN reports | ||
1115 | * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted | ||
1116 | * with scsi_adjust_queue_depth. | ||
1117 | */ | ||
1118 | scsi_activate_tcq(sdev, hba->nutrs); | ||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | /** | ||
1123 | * ufshcd_slave_destroy - remove SCSI device configurations | ||
1124 | * @sdev: pointer to SCSI device | ||
1125 | */ | ||
1126 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | ||
1127 | { | ||
1128 | struct ufs_hba *hba; | ||
1129 | |||
1130 | hba = shost_priv(sdev->host); | ||
1131 | scsi_deactivate_tcq(sdev, hba->nutrs); | ||
1132 | } | ||
1133 | |||
1134 | /** | ||
1135 | * ufshcd_task_req_compl - handle task management request completion | ||
1136 | * @hba: per adapter instance | ||
1137 | * @index: index of the completed request | ||
1138 | * | ||
1139 | * Returns SUCCESS/FAILED | ||
1140 | */ | ||
1141 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | ||
1142 | { | ||
1143 | struct utp_task_req_desc *task_req_descp; | ||
1144 | struct utp_upiu_task_rsp *task_rsp_upiup; | ||
1145 | unsigned long flags; | ||
1146 | int ocs_value; | ||
1147 | int task_result; | ||
1148 | |||
1149 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1150 | |||
1151 | /* Clear completed tasks from outstanding_tasks */ | ||
1152 | __clear_bit(index, &hba->outstanding_tasks); | ||
1153 | |||
1154 | task_req_descp = hba->utmrdl_base_addr; | ||
1155 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); | ||
1156 | |||
1157 | if (ocs_value == OCS_SUCCESS) { | ||
1158 | task_rsp_upiup = (struct utp_upiu_task_rsp *) | ||
1159 | task_req_descp[index].task_rsp_upiu; | ||
1160 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | ||
1161 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | ||
1162 | |||
1163 | if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL || | ||
1164 | task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) | ||
1165 | task_result = FAILED; | ||
1166 | } else { | ||
1167 | task_result = FAILED; | ||
1168 | dev_err(&hba->pdev->dev, | ||
1169 | "trc: Invalid ocs = %x\n", ocs_value); | ||
1170 | } | ||
1171 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1172 | return task_result; | ||
1173 | } | ||
1174 | |||
1175 | /** | ||
1176 | * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with | ||
1177 | * SAM_STAT_TASK_SET_FULL SCSI command status. | ||
1178 | * @cmd: pointer to SCSI command | ||
1179 | */ | ||
1180 | static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) | ||
1181 | { | ||
1182 | struct ufs_hba *hba; | ||
1183 | int i; | ||
1184 | int lun_qdepth = 0; | ||
1185 | |||
1186 | hba = shost_priv(cmd->device->host); | ||
1187 | |||
1188 | /* | ||
1189 | * LUN queue depth can be obtained by counting outstanding commands | ||
1190 | * on the LUN. | ||
1191 | */ | ||
1192 | for (i = 0; i < hba->nutrs; i++) { | ||
1193 | if (test_bit(i, &hba->outstanding_reqs)) { | ||
1194 | |||
1195 | /* | ||
1196 | * Check if the outstanding command belongs | ||
1197 | * to the LUN which reported SAM_STAT_TASK_SET_FULL. | ||
1198 | */ | ||
1199 | if (cmd->device->lun == hba->lrb[i].lun) | ||
1200 | lun_qdepth++; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | /* | ||
1205 | * LUN queue depth will be total outstanding commands, except the | ||
1206 | * command for which the LUN reported SAM_STAT_TASK_SET_FULL. | ||
1207 | */ | ||
1208 | scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); | ||
1209 | } | ||
1210 | |||
1211 | /** | ||
1212 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | ||
1213 | * @lrb: pointer to local reference block of completed command | ||
1214 | * @scsi_status: SCSI command status | ||
1215 | * | ||
1216 | * Returns value base on SCSI command status | ||
1217 | */ | ||
1218 | static inline int | ||
1219 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | ||
1220 | { | ||
1221 | int result = 0; | ||
1222 | |||
1223 | switch (scsi_status) { | ||
1224 | case SAM_STAT_GOOD: | ||
1225 | result |= DID_OK << 16 | | ||
1226 | COMMAND_COMPLETE << 8 | | ||
1227 | SAM_STAT_GOOD; | ||
1228 | break; | ||
1229 | case SAM_STAT_CHECK_CONDITION: | ||
1230 | result |= DID_OK << 16 | | ||
1231 | COMMAND_COMPLETE << 8 | | ||
1232 | SAM_STAT_CHECK_CONDITION; | ||
1233 | ufshcd_copy_sense_data(lrbp); | ||
1234 | break; | ||
1235 | case SAM_STAT_BUSY: | ||
1236 | result |= SAM_STAT_BUSY; | ||
1237 | break; | ||
1238 | case SAM_STAT_TASK_SET_FULL: | ||
1239 | |||
1240 | /* | ||
1241 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue | ||
1242 | * depth needs to be adjusted to the exact number of | ||
1243 | * outstanding commands the LUN can handle at any given time. | ||
1244 | */ | ||
1245 | ufshcd_adjust_lun_qdepth(lrbp->cmd); | ||
1246 | result |= SAM_STAT_TASK_SET_FULL; | ||
1247 | break; | ||
1248 | case SAM_STAT_TASK_ABORTED: | ||
1249 | result |= SAM_STAT_TASK_ABORTED; | ||
1250 | break; | ||
1251 | default: | ||
1252 | result |= DID_ERROR << 16; | ||
1253 | break; | ||
1254 | } /* end of switch */ | ||
1255 | |||
1256 | return result; | ||
1257 | } | ||
1258 | |||
1259 | /** | ||
1260 | * ufshcd_transfer_rsp_status - Get overall status of the response | ||
1261 | * @hba: per adapter instance | ||
1262 | * @lrb: pointer to local reference block of completed command | ||
1263 | * | ||
1264 | * Returns result of the command to notify SCSI midlayer | ||
1265 | */ | ||
1266 | static inline int | ||
1267 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | ||
1268 | { | ||
1269 | int result = 0; | ||
1270 | int scsi_status; | ||
1271 | int ocs; | ||
1272 | |||
1273 | /* overall command status of utrd */ | ||
1274 | ocs = ufshcd_get_tr_ocs(lrbp); | ||
1275 | |||
1276 | switch (ocs) { | ||
1277 | case OCS_SUCCESS: | ||
1278 | |||
1279 | /* check if the returned transfer response is valid */ | ||
1280 | result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); | ||
1281 | if (result) { | ||
1282 | dev_err(&hba->pdev->dev, | ||
1283 | "Invalid response = %x\n", result); | ||
1284 | break; | ||
1285 | } | ||
1286 | |||
1287 | /* | ||
1288 | * get the response UPIU result to extract | ||
1289 | * the SCSI command status | ||
1290 | */ | ||
1291 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | ||
1292 | |||
1293 | /* | ||
1294 | * get the result based on SCSI status response | ||
1295 | * to notify the SCSI midlayer of the command status | ||
1296 | */ | ||
1297 | scsi_status = result & MASK_SCSI_STATUS; | ||
1298 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | ||
1299 | break; | ||
1300 | case OCS_ABORTED: | ||
1301 | result |= DID_ABORT << 16; | ||
1302 | break; | ||
1303 | case OCS_INVALID_CMD_TABLE_ATTR: | ||
1304 | case OCS_INVALID_PRDT_ATTR: | ||
1305 | case OCS_MISMATCH_DATA_BUF_SIZE: | ||
1306 | case OCS_MISMATCH_RESP_UPIU_SIZE: | ||
1307 | case OCS_PEER_COMM_FAILURE: | ||
1308 | case OCS_FATAL_ERROR: | ||
1309 | default: | ||
1310 | result |= DID_ERROR << 16; | ||
1311 | dev_err(&hba->pdev->dev, | ||
1312 | "OCS error from controller = %x\n", ocs); | ||
1313 | break; | ||
1314 | } /* end of switch */ | ||
1315 | |||
1316 | return result; | ||
1317 | } | ||
1318 | |||
1319 | /** | ||
1320 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | ||
1321 | * @hba: per adapter instance | ||
1322 | */ | ||
1323 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | ||
1324 | { | ||
1325 | struct ufshcd_lrb *lrb; | ||
1326 | unsigned long completed_reqs; | ||
1327 | u32 tr_doorbell; | ||
1328 | int result; | ||
1329 | int index; | ||
1330 | |||
1331 | lrb = hba->lrb; | ||
1332 | tr_doorbell = | ||
1333 | readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL); | ||
1334 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; | ||
1335 | |||
1336 | for (index = 0; index < hba->nutrs; index++) { | ||
1337 | if (test_bit(index, &completed_reqs)) { | ||
1338 | |||
1339 | result = ufshcd_transfer_rsp_status(hba, &lrb[index]); | ||
1340 | |||
1341 | if (lrb[index].cmd) { | ||
1342 | scsi_dma_unmap(lrb[index].cmd); | ||
1343 | lrb[index].cmd->result = result; | ||
1344 | lrb[index].cmd->scsi_done(lrb[index].cmd); | ||
1345 | |||
1346 | /* Mark completed command as NULL in LRB */ | ||
1347 | lrb[index].cmd = NULL; | ||
1348 | } | ||
1349 | } /* end of if */ | ||
1350 | } /* end of for */ | ||
1351 | |||
1352 | /* clear corresponding bits of completed commands */ | ||
1353 | hba->outstanding_reqs ^= completed_reqs; | ||
1354 | |||
1355 | /* Reset interrupt aggregation counters */ | ||
1356 | ufshcd_config_int_aggr(hba, INT_AGGR_RESET); | ||
1357 | } | ||
1358 | |||
1359 | /** | ||
1360 | * ufshcd_uic_cc_handler - handle UIC command completion | ||
1361 | * @work: pointer to a work queue structure | ||
1362 | * | ||
1363 | * Returns 0 on success, non-zero value on failure | ||
1364 | */ | ||
1365 | static void ufshcd_uic_cc_handler (struct work_struct *work) | ||
1366 | { | ||
1367 | struct ufs_hba *hba; | ||
1368 | |||
1369 | hba = container_of(work, struct ufs_hba, uic_workq); | ||
1370 | |||
1371 | if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) && | ||
1372 | !(ufshcd_get_uic_cmd_result(hba))) { | ||
1373 | |||
1374 | if (ufshcd_make_hba_operational(hba)) | ||
1375 | dev_err(&hba->pdev->dev, | ||
1376 | "cc: hba not operational state\n"); | ||
1377 | return; | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * ufshcd_fatal_err_handler - handle fatal errors | ||
1383 | * @hba: per adapter instance | ||
1384 | */ | ||
1385 | static void ufshcd_fatal_err_handler(struct work_struct *work) | ||
1386 | { | ||
1387 | struct ufs_hba *hba; | ||
1388 | hba = container_of(work, struct ufs_hba, feh_workq); | ||
1389 | |||
1390 | /* check if reset is already in progress */ | ||
1391 | if (hba->ufshcd_state != UFSHCD_STATE_RESET) | ||
1392 | ufshcd_do_reset(hba); | ||
1393 | } | ||
1394 | |||
1395 | /** | ||
1396 | * ufshcd_err_handler - Check for fatal errors | ||
1397 | * @work: pointer to a work queue structure | ||
1398 | */ | ||
1399 | static void ufshcd_err_handler(struct ufs_hba *hba) | ||
1400 | { | ||
1401 | u32 reg; | ||
1402 | |||
1403 | if (hba->errors & INT_FATAL_ERRORS) | ||
1404 | goto fatal_eh; | ||
1405 | |||
1406 | if (hba->errors & UIC_ERROR) { | ||
1407 | |||
1408 | reg = readl(hba->mmio_base + | ||
1409 | REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); | ||
1410 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | ||
1411 | goto fatal_eh; | ||
1412 | } | ||
1413 | return; | ||
1414 | fatal_eh: | ||
1415 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | ||
1416 | schedule_work(&hba->feh_workq); | ||
1417 | } | ||
1418 | |||
1419 | /** | ||
1420 | * ufshcd_tmc_handler - handle task management function completion | ||
1421 | * @hba: per adapter instance | ||
1422 | */ | ||
1423 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | ||
1424 | { | ||
1425 | u32 tm_doorbell; | ||
1426 | |||
1427 | tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL); | ||
1428 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; | ||
1429 | wake_up_interruptible(&hba->ufshcd_tm_wait_queue); | ||
1430 | } | ||
1431 | |||
1432 | /** | ||
1433 | * ufshcd_sl_intr - Interrupt service routine | ||
1434 | * @hba: per adapter instance | ||
1435 | * @intr_status: contains interrupts generated by the controller | ||
1436 | */ | ||
1437 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | ||
1438 | { | ||
1439 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | ||
1440 | if (hba->errors) | ||
1441 | ufshcd_err_handler(hba); | ||
1442 | |||
1443 | if (intr_status & UIC_COMMAND_COMPL) | ||
1444 | schedule_work(&hba->uic_workq); | ||
1445 | |||
1446 | if (intr_status & UTP_TASK_REQ_COMPL) | ||
1447 | ufshcd_tmc_handler(hba); | ||
1448 | |||
1449 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | ||
1450 | ufshcd_transfer_req_compl(hba); | ||
1451 | } | ||
1452 | |||
1453 | /** | ||
1454 | * ufshcd_intr - Main interrupt service routine | ||
1455 | * @irq: irq number | ||
1456 | * @__hba: pointer to adapter instance | ||
1457 | * | ||
1458 | * Returns IRQ_HANDLED - If interrupt is valid | ||
1459 | * IRQ_NONE - If invalid interrupt | ||
1460 | */ | ||
1461 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | ||
1462 | { | ||
1463 | u32 intr_status; | ||
1464 | irqreturn_t retval = IRQ_NONE; | ||
1465 | struct ufs_hba *hba = __hba; | ||
1466 | |||
1467 | spin_lock(hba->host->host_lock); | ||
1468 | intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS); | ||
1469 | |||
1470 | if (intr_status) { | ||
1471 | ufshcd_sl_intr(hba, intr_status); | ||
1472 | |||
1473 | /* If UFSHCI 1.0 then clear interrupt status register */ | ||
1474 | if (hba->ufs_version == UFSHCI_VERSION_10) | ||
1475 | writel(intr_status, | ||
1476 | (hba->mmio_base + REG_INTERRUPT_STATUS)); | ||
1477 | retval = IRQ_HANDLED; | ||
1478 | } | ||
1479 | spin_unlock(hba->host->host_lock); | ||
1480 | return retval; | ||
1481 | } | ||
1482 | |||
1483 | /** | ||
1484 | * ufshcd_issue_tm_cmd - issues task management commands to controller | ||
1485 | * @hba: per adapter instance | ||
1486 | * @lrbp: pointer to local reference block | ||
1487 | * | ||
1488 | * Returns SUCCESS/FAILED | ||
1489 | */ | ||
1490 | static int | ||
1491 | ufshcd_issue_tm_cmd(struct ufs_hba *hba, | ||
1492 | struct ufshcd_lrb *lrbp, | ||
1493 | u8 tm_function) | ||
1494 | { | ||
1495 | struct utp_task_req_desc *task_req_descp; | ||
1496 | struct utp_upiu_task_req *task_req_upiup; | ||
1497 | struct Scsi_Host *host; | ||
1498 | unsigned long flags; | ||
1499 | int free_slot = 0; | ||
1500 | int err; | ||
1501 | |||
1502 | host = hba->host; | ||
1503 | |||
1504 | spin_lock_irqsave(host->host_lock, flags); | ||
1505 | |||
1506 | /* If task management queue is full */ | ||
1507 | free_slot = ufshcd_get_tm_free_slot(hba); | ||
1508 | if (free_slot >= hba->nutmrs) { | ||
1509 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1510 | dev_err(&hba->pdev->dev, "Task management queue full\n"); | ||
1511 | err = FAILED; | ||
1512 | goto out; | ||
1513 | } | ||
1514 | |||
1515 | task_req_descp = hba->utmrdl_base_addr; | ||
1516 | task_req_descp += free_slot; | ||
1517 | |||
1518 | /* Configure task request descriptor */ | ||
1519 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | ||
1520 | task_req_descp->header.dword_2 = | ||
1521 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | ||
1522 | |||
1523 | /* Configure task request UPIU */ | ||
1524 | task_req_upiup = | ||
1525 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | ||
1526 | task_req_upiup->header.dword_0 = | ||
1527 | cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, | ||
1528 | lrbp->lun, lrbp->task_tag)); | ||
1529 | task_req_upiup->header.dword_1 = | ||
1530 | cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); | ||
1531 | |||
1532 | task_req_upiup->input_param1 = lrbp->lun; | ||
1533 | task_req_upiup->input_param1 = | ||
1534 | cpu_to_be32(task_req_upiup->input_param1); | ||
1535 | task_req_upiup->input_param2 = lrbp->task_tag; | ||
1536 | task_req_upiup->input_param2 = | ||
1537 | cpu_to_be32(task_req_upiup->input_param2); | ||
1538 | |||
1539 | /* send command to the controller */ | ||
1540 | __set_bit(free_slot, &hba->outstanding_tasks); | ||
1541 | writel((1 << free_slot), | ||
1542 | (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL)); | ||
1543 | |||
1544 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1545 | |||
1546 | /* wait until the task management command is completed */ | ||
1547 | err = | ||
1548 | wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, | ||
1549 | (test_bit(free_slot, | ||
1550 | &hba->tm_condition) != 0), | ||
1551 | 60 * HZ); | ||
1552 | if (!err) { | ||
1553 | dev_err(&hba->pdev->dev, | ||
1554 | "Task management command timed-out\n"); | ||
1555 | err = FAILED; | ||
1556 | goto out; | ||
1557 | } | ||
1558 | clear_bit(free_slot, &hba->tm_condition); | ||
1559 | return ufshcd_task_req_compl(hba, free_slot); | ||
1560 | out: | ||
1561 | return err; | ||
1562 | } | ||
1563 | |||
1564 | /** | ||
1565 | * ufshcd_device_reset - reset device and abort all the pending commands | ||
1566 | * @cmd: SCSI command pointer | ||
1567 | * | ||
1568 | * Returns SUCCESS/FAILED | ||
1569 | */ | ||
1570 | static int ufshcd_device_reset(struct scsi_cmnd *cmd) | ||
1571 | { | ||
1572 | struct Scsi_Host *host; | ||
1573 | struct ufs_hba *hba; | ||
1574 | unsigned int tag; | ||
1575 | u32 pos; | ||
1576 | int err; | ||
1577 | |||
1578 | host = cmd->device->host; | ||
1579 | hba = shost_priv(host); | ||
1580 | tag = cmd->request->tag; | ||
1581 | |||
1582 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); | ||
1583 | if (err) | ||
1584 | goto out; | ||
1585 | |||
1586 | for (pos = 0; pos < hba->nutrs; pos++) { | ||
1587 | if (test_bit(pos, &hba->outstanding_reqs) && | ||
1588 | (hba->lrb[tag].lun == hba->lrb[pos].lun)) { | ||
1589 | |||
1590 | /* clear the respective UTRLCLR register bit */ | ||
1591 | ufshcd_utrl_clear(hba, pos); | ||
1592 | |||
1593 | clear_bit(pos, &hba->outstanding_reqs); | ||
1594 | |||
1595 | if (hba->lrb[pos].cmd) { | ||
1596 | scsi_dma_unmap(hba->lrb[pos].cmd); | ||
1597 | hba->lrb[pos].cmd->result = | ||
1598 | DID_ABORT << 16; | ||
1599 | hba->lrb[pos].cmd->scsi_done(cmd); | ||
1600 | hba->lrb[pos].cmd = NULL; | ||
1601 | } | ||
1602 | } | ||
1603 | } /* end of for */ | ||
1604 | out: | ||
1605 | return err; | ||
1606 | } | ||
1607 | |||
1608 | /** | ||
1609 | * ufshcd_host_reset - Main reset function registered with scsi layer | ||
1610 | * @cmd: SCSI command pointer | ||
1611 | * | ||
1612 | * Returns SUCCESS/FAILED | ||
1613 | */ | ||
1614 | static int ufshcd_host_reset(struct scsi_cmnd *cmd) | ||
1615 | { | ||
1616 | struct ufs_hba *hba; | ||
1617 | |||
1618 | hba = shost_priv(cmd->device->host); | ||
1619 | |||
1620 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | ||
1621 | return SUCCESS; | ||
1622 | |||
1623 | return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED; | ||
1624 | } | ||
1625 | |||
1626 | /** | ||
1627 | * ufshcd_abort - abort a specific command | ||
1628 | * @cmd: SCSI command pointer | ||
1629 | * | ||
1630 | * Returns SUCCESS/FAILED | ||
1631 | */ | ||
1632 | static int ufshcd_abort(struct scsi_cmnd *cmd) | ||
1633 | { | ||
1634 | struct Scsi_Host *host; | ||
1635 | struct ufs_hba *hba; | ||
1636 | unsigned long flags; | ||
1637 | unsigned int tag; | ||
1638 | int err; | ||
1639 | |||
1640 | host = cmd->device->host; | ||
1641 | hba = shost_priv(host); | ||
1642 | tag = cmd->request->tag; | ||
1643 | |||
1644 | spin_lock_irqsave(host->host_lock, flags); | ||
1645 | |||
1646 | /* check if command is still pending */ | ||
1647 | if (!(test_bit(tag, &hba->outstanding_reqs))) { | ||
1648 | err = FAILED; | ||
1649 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1650 | goto out; | ||
1651 | } | ||
1652 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1653 | |||
1654 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); | ||
1655 | if (err) | ||
1656 | goto out; | ||
1657 | |||
1658 | scsi_dma_unmap(cmd); | ||
1659 | |||
1660 | spin_lock_irqsave(host->host_lock, flags); | ||
1661 | |||
1662 | /* clear the respective UTRLCLR register bit */ | ||
1663 | ufshcd_utrl_clear(hba, tag); | ||
1664 | |||
1665 | __clear_bit(tag, &hba->outstanding_reqs); | ||
1666 | hba->lrb[tag].cmd = NULL; | ||
1667 | spin_unlock_irqrestore(host->host_lock, flags); | ||
1668 | out: | ||
1669 | return err; | ||
1670 | } | ||
1671 | |||
1672 | static struct scsi_host_template ufshcd_driver_template = { | ||
1673 | .module = THIS_MODULE, | ||
1674 | .name = UFSHCD, | ||
1675 | .proc_name = UFSHCD, | ||
1676 | .queuecommand = ufshcd_queuecommand, | ||
1677 | .slave_alloc = ufshcd_slave_alloc, | ||
1678 | .slave_destroy = ufshcd_slave_destroy, | ||
1679 | .eh_abort_handler = ufshcd_abort, | ||
1680 | .eh_device_reset_handler = ufshcd_device_reset, | ||
1681 | .eh_host_reset_handler = ufshcd_host_reset, | ||
1682 | .this_id = -1, | ||
1683 | .sg_tablesize = SG_ALL, | ||
1684 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | ||
1685 | .can_queue = UFSHCD_CAN_QUEUE, | ||
1686 | }; | ||
1687 | |||
1688 | /** | ||
1689 | * ufshcd_shutdown - main function to put the controller in reset state | ||
1690 | * @pdev: pointer to PCI device handle | ||
1691 | */ | ||
1692 | static void ufshcd_shutdown(struct pci_dev *pdev) | ||
1693 | { | ||
1694 | ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); | ||
1695 | } | ||
1696 | |||
1697 | #ifdef CONFIG_PM | ||
1698 | /** | ||
1699 | * ufshcd_suspend - suspend power management function | ||
1700 | * @pdev: pointer to PCI device handle | ||
1701 | * @state: power state | ||
1702 | * | ||
1703 | * Returns -ENOSYS | ||
1704 | */ | ||
1705 | static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1706 | { | ||
1707 | /* | ||
1708 | * TODO: | ||
1709 | * 1. Block SCSI requests from SCSI midlayer | ||
1710 | * 2. Change the internal driver state to non operational | ||
1711 | * 3. Set UTRLRSR and UTMRLRSR bits to zero | ||
1712 | * 4. Wait until outstanding commands are completed | ||
1713 | * 5. Set HCE to zero to send the UFS host controller to reset state | ||
1714 | */ | ||
1715 | |||
1716 | return -ENOSYS; | ||
1717 | } | ||
1718 | |||
1719 | /** | ||
1720 | * ufshcd_resume - resume power management function | ||
1721 | * @pdev: pointer to PCI device handle | ||
1722 | * | ||
1723 | * Returns -ENOSYS | ||
1724 | */ | ||
1725 | static int ufshcd_resume(struct pci_dev *pdev) | ||
1726 | { | ||
1727 | /* | ||
1728 | * TODO: | ||
1729 | * 1. Set HCE to 1, to start the UFS host controller | ||
1730 | * initialization process | ||
1731 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 | ||
1732 | * 3. Change the internal driver state to operational | ||
1733 | * 4. Unblock SCSI requests from SCSI midlayer | ||
1734 | */ | ||
1735 | |||
1736 | return -ENOSYS; | ||
1737 | } | ||
1738 | #endif /* CONFIG_PM */ | ||
1739 | |||
1740 | /** | ||
1741 | * ufshcd_hba_free - free allocated memory for | ||
1742 | * host memory space data structures | ||
1743 | * @hba: per adapter instance | ||
1744 | */ | ||
1745 | static void ufshcd_hba_free(struct ufs_hba *hba) | ||
1746 | { | ||
1747 | iounmap(hba->mmio_base); | ||
1748 | ufshcd_free_hba_memory(hba); | ||
1749 | pci_release_regions(hba->pdev); | ||
1750 | } | ||
1751 | |||
1752 | /** | ||
1753 | * ufshcd_remove - de-allocate PCI/SCSI host and host memory space | ||
1754 | * data structure memory | ||
1755 | * @pdev - pointer to PCI handle | ||
1756 | */ | ||
1757 | static void ufshcd_remove(struct pci_dev *pdev) | ||
1758 | { | ||
1759 | struct ufs_hba *hba = pci_get_drvdata(pdev); | ||
1760 | |||
1761 | /* disable interrupts */ | ||
1762 | ufshcd_int_config(hba, UFSHCD_INT_DISABLE); | ||
1763 | free_irq(pdev->irq, hba); | ||
1764 | |||
1765 | ufshcd_hba_stop(hba); | ||
1766 | ufshcd_hba_free(hba); | ||
1767 | |||
1768 | scsi_remove_host(hba->host); | ||
1769 | scsi_host_put(hba->host); | ||
1770 | pci_set_drvdata(pdev, NULL); | ||
1771 | pci_clear_master(pdev); | ||
1772 | pci_disable_device(pdev); | ||
1773 | } | ||
1774 | |||
1775 | /** | ||
1776 | * ufshcd_set_dma_mask - Set dma mask based on the controller | ||
1777 | * addressing capability | ||
1778 | * @pdev: PCI device structure | ||
1779 | * | ||
1780 | * Returns 0 for success, non-zero for failure | ||
1781 | */ | ||
1782 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | ||
1783 | { | ||
1784 | int err; | ||
1785 | u64 dma_mask; | ||
1786 | |||
1787 | /* | ||
1788 | * If controller supports 64 bit addressing mode, then set the DMA | ||
1789 | * mask to 64-bit, else set the DMA mask to 32-bit | ||
1790 | */ | ||
1791 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) | ||
1792 | dma_mask = DMA_BIT_MASK(64); | ||
1793 | else | ||
1794 | dma_mask = DMA_BIT_MASK(32); | ||
1795 | |||
1796 | err = pci_set_dma_mask(hba->pdev, dma_mask); | ||
1797 | if (err) | ||
1798 | return err; | ||
1799 | |||
1800 | err = pci_set_consistent_dma_mask(hba->pdev, dma_mask); | ||
1801 | |||
1802 | return err; | ||
1803 | } | ||
1804 | |||
1805 | /** | ||
1806 | * ufshcd_probe - probe routine of the driver | ||
1807 | * @pdev: pointer to PCI device handle | ||
1808 | * @id: PCI device id | ||
1809 | * | ||
1810 | * Returns 0 on success, non-zero value on failure | ||
1811 | */ | ||
1812 | static int __devinit | ||
1813 | ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
1814 | { | ||
1815 | struct Scsi_Host *host; | ||
1816 | struct ufs_hba *hba; | ||
1817 | int err; | ||
1818 | |||
1819 | err = pci_enable_device(pdev); | ||
1820 | if (err) { | ||
1821 | dev_err(&pdev->dev, "pci_enable_device failed\n"); | ||
1822 | goto out_error; | ||
1823 | } | ||
1824 | |||
1825 | pci_set_master(pdev); | ||
1826 | |||
1827 | host = scsi_host_alloc(&ufshcd_driver_template, | ||
1828 | sizeof(struct ufs_hba)); | ||
1829 | if (!host) { | ||
1830 | dev_err(&pdev->dev, "scsi_host_alloc failed\n"); | ||
1831 | err = -ENOMEM; | ||
1832 | goto out_disable; | ||
1833 | } | ||
1834 | hba = shost_priv(host); | ||
1835 | |||
1836 | err = pci_request_regions(pdev, UFSHCD); | ||
1837 | if (err < 0) { | ||
1838 | dev_err(&pdev->dev, "request regions failed\n"); | ||
1839 | goto out_disable; | ||
1840 | } | ||
1841 | |||
1842 | hba->mmio_base = pci_ioremap_bar(pdev, 0); | ||
1843 | if (!hba->mmio_base) { | ||
1844 | dev_err(&pdev->dev, "memory map failed\n"); | ||
1845 | err = -ENOMEM; | ||
1846 | goto out_release_regions; | ||
1847 | } | ||
1848 | |||
1849 | hba->host = host; | ||
1850 | hba->pdev = pdev; | ||
1851 | |||
1852 | /* Read capabilities registers */ | ||
1853 | ufshcd_hba_capabilities(hba); | ||
1854 | |||
1855 | /* Get UFS version supported by the controller */ | ||
1856 | hba->ufs_version = ufshcd_get_ufs_version(hba); | ||
1857 | |||
1858 | err = ufshcd_set_dma_mask(hba); | ||
1859 | if (err) { | ||
1860 | dev_err(&pdev->dev, "set dma mask failed\n"); | ||
1861 | goto out_iounmap; | ||
1862 | } | ||
1863 | |||
1864 | /* Allocate memory for host memory space */ | ||
1865 | err = ufshcd_memory_alloc(hba); | ||
1866 | if (err) { | ||
1867 | dev_err(&pdev->dev, "Memory allocation failed\n"); | ||
1868 | goto out_iounmap; | ||
1869 | } | ||
1870 | |||
1871 | /* Configure LRB */ | ||
1872 | ufshcd_host_memory_configure(hba); | ||
1873 | |||
1874 | host->can_queue = hba->nutrs; | ||
1875 | host->cmd_per_lun = hba->nutrs; | ||
1876 | host->max_id = UFSHCD_MAX_ID; | ||
1877 | host->max_lun = UFSHCD_MAX_LUNS; | ||
1878 | host->max_channel = UFSHCD_MAX_CHANNEL; | ||
1879 | host->unique_id = host->host_no; | ||
1880 | host->max_cmd_len = MAX_CDB_SIZE; | ||
1881 | |||
1882 | /* Initailize wait queue for task management */ | ||
1883 | init_waitqueue_head(&hba->ufshcd_tm_wait_queue); | ||
1884 | |||
1885 | /* Initialize work queues */ | ||
1886 | INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler); | ||
1887 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); | ||
1888 | |||
1889 | /* IRQ registration */ | ||
1890 | err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); | ||
1891 | if (err) { | ||
1892 | dev_err(&pdev->dev, "request irq failed\n"); | ||
1893 | goto out_lrb_free; | ||
1894 | } | ||
1895 | |||
1896 | /* Enable SCSI tag mapping */ | ||
1897 | err = scsi_init_shared_tag_map(host, host->can_queue); | ||
1898 | if (err) { | ||
1899 | dev_err(&pdev->dev, "init shared queue failed\n"); | ||
1900 | goto out_free_irq; | ||
1901 | } | ||
1902 | |||
1903 | pci_set_drvdata(pdev, hba); | ||
1904 | |||
1905 | err = scsi_add_host(host, &pdev->dev); | ||
1906 | if (err) { | ||
1907 | dev_err(&pdev->dev, "scsi_add_host failed\n"); | ||
1908 | goto out_free_irq; | ||
1909 | } | ||
1910 | |||
1911 | /* Initialization routine */ | ||
1912 | err = ufshcd_initialize_hba(hba); | ||
1913 | if (err) { | ||
1914 | dev_err(&pdev->dev, "Initialization failed\n"); | ||
1915 | goto out_free_irq; | ||
1916 | } | ||
1917 | |||
1918 | return 0; | ||
1919 | |||
1920 | out_free_irq: | ||
1921 | free_irq(pdev->irq, hba); | ||
1922 | out_lrb_free: | ||
1923 | ufshcd_free_hba_memory(hba); | ||
1924 | out_iounmap: | ||
1925 | iounmap(hba->mmio_base); | ||
1926 | out_release_regions: | ||
1927 | pci_release_regions(pdev); | ||
1928 | out_disable: | ||
1929 | scsi_host_put(host); | ||
1930 | pci_clear_master(pdev); | ||
1931 | pci_disable_device(pdev); | ||
1932 | out_error: | ||
1933 | return err; | ||
1934 | } | ||
1935 | |||
1936 | static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { | ||
1937 | { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
1938 | { } /* terminate list */ | ||
1939 | }; | ||
1940 | |||
1941 | MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl); | ||
1942 | |||
1943 | static struct pci_driver ufshcd_pci_driver = { | ||
1944 | .name = UFSHCD, | ||
1945 | .id_table = ufshcd_pci_tbl, | ||
1946 | .probe = ufshcd_probe, | ||
1947 | .remove = __devexit_p(ufshcd_remove), | ||
1948 | .shutdown = ufshcd_shutdown, | ||
1949 | #ifdef CONFIG_PM | ||
1950 | .suspend = ufshcd_suspend, | ||
1951 | .resume = ufshcd_resume, | ||
1952 | #endif | ||
1953 | }; | ||
1954 | |||
1955 | /** | ||
1956 | * ufshcd_init - Driver registration routine | ||
1957 | */ | ||
1958 | static int __init ufshcd_init(void) | ||
1959 | { | ||
1960 | return pci_register_driver(&ufshcd_pci_driver); | ||
1961 | } | ||
1962 | module_init(ufshcd_init); | ||
1963 | |||
1964 | /** | ||
1965 | * ufshcd_exit - Driver exit clean-up routine | ||
1966 | */ | ||
1967 | static void __exit ufshcd_exit(void) | ||
1968 | { | ||
1969 | pci_unregister_driver(&ufshcd_pci_driver); | ||
1970 | } | ||
1971 | module_exit(ufshcd_exit); | ||
1972 | |||
1973 | |||
1974 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, " | ||
1975 | "Vinayak Holikatti <h.vinayak@samsung.com>"); | ||
1976 | MODULE_DESCRIPTION("Generic UFS host controller driver"); | ||
1977 | MODULE_LICENSE("GPL"); | ||
1978 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); | ||
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h new file mode 100644 index 000000000000..6e3510f71167 --- /dev/null +++ b/drivers/scsi/ufs/ufshci.h | |||
@@ -0,0 +1,376 @@ | |||
1 | /* | ||
2 | * Universal Flash Storage Host controller driver | ||
3 | * | ||
4 | * This code is based on drivers/scsi/ufs/ufshci.h | ||
5 | * Copyright (C) 2011-2012 Samsung India Software Operations | ||
6 | * | ||
7 | * Santosh Yaraganavi <santosh.sy@samsung.com> | ||
8 | * Vinayak Holikatti <h.vinayak@samsung.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version 2 | ||
13 | * of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * NO WARRANTY | ||
21 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
22 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT | ||
23 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, | ||
24 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is | ||
25 | * solely responsible for determining the appropriateness of using and | ||
26 | * distributing the Program and assumes all risks associated with its | ||
27 | * exercise of rights under this Agreement, including but not limited to | ||
28 | * the risks and costs of program errors, damage to or loss of data, | ||
29 | * programs or equipment, and unavailability or interruption of operations. | ||
30 | |||
31 | * DISCLAIMER OF LIABILITY | ||
32 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY | ||
33 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
34 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | ||
36 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
37 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED | ||
38 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES | ||
39 | |||
40 | * You should have received a copy of the GNU General Public License | ||
41 | * along with this program; if not, write to the Free Software | ||
42 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, | ||
43 | * USA. | ||
44 | */ | ||
45 | |||
46 | #ifndef _UFSHCI_H | ||
47 | #define _UFSHCI_H | ||
48 | |||
49 | enum { | ||
50 | TASK_REQ_UPIU_SIZE_DWORDS = 8, | ||
51 | TASK_RSP_UPIU_SIZE_DWORDS = 8, | ||
52 | ALIGNED_UPIU_SIZE = 128, | ||
53 | }; | ||
54 | |||
55 | /* UFSHCI Registers */ | ||
56 | enum { | ||
57 | REG_CONTROLLER_CAPABILITIES = 0x00, | ||
58 | REG_UFS_VERSION = 0x08, | ||
59 | REG_CONTROLLER_DEV_ID = 0x10, | ||
60 | REG_CONTROLLER_PROD_ID = 0x14, | ||
61 | REG_INTERRUPT_STATUS = 0x20, | ||
62 | REG_INTERRUPT_ENABLE = 0x24, | ||
63 | REG_CONTROLLER_STATUS = 0x30, | ||
64 | REG_CONTROLLER_ENABLE = 0x34, | ||
65 | REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38, | ||
66 | REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C, | ||
67 | REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40, | ||
68 | REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44, | ||
69 | REG_UIC_ERROR_CODE_DME = 0x48, | ||
70 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C, | ||
71 | REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50, | ||
72 | REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54, | ||
73 | REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, | ||
74 | REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, | ||
75 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, | ||
76 | REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, | ||
77 | REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, | ||
78 | REG_UTP_TASK_REQ_DOOR_BELL = 0x78, | ||
79 | REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C, | ||
80 | REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80, | ||
81 | REG_UIC_COMMAND = 0x90, | ||
82 | REG_UIC_COMMAND_ARG_1 = 0x94, | ||
83 | REG_UIC_COMMAND_ARG_2 = 0x98, | ||
84 | REG_UIC_COMMAND_ARG_3 = 0x9C, | ||
85 | }; | ||
86 | |||
87 | /* Controller capability masks */ | ||
88 | enum { | ||
89 | MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, | ||
90 | MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, | ||
91 | MASK_64_ADDRESSING_SUPPORT = 0x01000000, | ||
92 | MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, | ||
93 | MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, | ||
94 | }; | ||
95 | |||
96 | /* UFS Version 08h */ | ||
97 | #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) | ||
98 | #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) | ||
99 | |||
100 | /* Controller UFSHCI version */ | ||
101 | enum { | ||
102 | UFSHCI_VERSION_10 = 0x00010000, | ||
103 | UFSHCI_VERSION_11 = 0x00010100, | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * HCDDID - Host Controller Identification Descriptor | ||
108 | * - Device ID and Device Class 10h | ||
109 | */ | ||
110 | #define DEVICE_CLASS UFS_MASK(0xFFFF, 0) | ||
111 | #define DEVICE_ID UFS_MASK(0xFF, 24) | ||
112 | |||
113 | /* | ||
114 | * HCPMID - Host Controller Identification Descriptor | ||
115 | * - Product/Manufacturer ID 14h | ||
116 | */ | ||
117 | #define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0) | ||
118 | #define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16) | ||
119 | |||
120 | #define UFS_BIT(x) (1L << (x)) | ||
121 | |||
122 | #define UTP_TRANSFER_REQ_COMPL UFS_BIT(0) | ||
123 | #define UIC_DME_END_PT_RESET UFS_BIT(1) | ||
124 | #define UIC_ERROR UFS_BIT(2) | ||
125 | #define UIC_TEST_MODE UFS_BIT(3) | ||
126 | #define UIC_POWER_MODE UFS_BIT(4) | ||
127 | #define UIC_HIBERNATE_EXIT UFS_BIT(5) | ||
128 | #define UIC_HIBERNATE_ENTER UFS_BIT(6) | ||
129 | #define UIC_LINK_LOST UFS_BIT(7) | ||
130 | #define UIC_LINK_STARTUP UFS_BIT(8) | ||
131 | #define UTP_TASK_REQ_COMPL UFS_BIT(9) | ||
132 | #define UIC_COMMAND_COMPL UFS_BIT(10) | ||
133 | #define DEVICE_FATAL_ERROR UFS_BIT(11) | ||
134 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) | ||
135 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) | ||
136 | |||
137 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ | ||
138 | DEVICE_FATAL_ERROR |\ | ||
139 | CONTROLLER_FATAL_ERROR |\ | ||
140 | SYSTEM_BUS_FATAL_ERROR) | ||
141 | |||
142 | #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ | ||
143 | CONTROLLER_FATAL_ERROR |\ | ||
144 | SYSTEM_BUS_FATAL_ERROR) | ||
145 | |||
146 | /* HCS - Host Controller Status 30h */ | ||
147 | #define DEVICE_PRESENT UFS_BIT(0) | ||
148 | #define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1) | ||
149 | #define UTP_TASK_REQ_LIST_READY UFS_BIT(2) | ||
150 | #define UIC_COMMAND_READY UFS_BIT(3) | ||
151 | #define HOST_ERROR_INDICATOR UFS_BIT(4) | ||
152 | #define DEVICE_ERROR_INDICATOR UFS_BIT(5) | ||
153 | #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) | ||
154 | |||
155 | /* HCE - Host Controller Enable 34h */ | ||
156 | #define CONTROLLER_ENABLE UFS_BIT(0) | ||
157 | #define CONTROLLER_DISABLE 0x0 | ||
158 | |||
159 | /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ | ||
160 | #define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) | ||
161 | #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F | ||
162 | |||
163 | /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ | ||
164 | #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) | ||
165 | #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF | ||
166 | #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 | ||
167 | |||
168 | /* UECN - Host UIC Error Code Network Layer 40h */ | ||
169 | #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) | ||
170 | #define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 | ||
171 | |||
172 | /* UECT - Host UIC Error Code Transport Layer 44h */ | ||
173 | #define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31) | ||
174 | #define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F | ||
175 | |||
176 | /* UECDME - Host UIC Error Code DME 48h */ | ||
177 | #define UIC_DME_ERROR UFS_BIT(31) | ||
178 | #define UIC_DME_ERROR_CODE_MASK 0x1 | ||
179 | |||
180 | #define INT_AGGR_TIMEOUT_VAL_MASK 0xFF | ||
181 | #define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) | ||
182 | #define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16) | ||
183 | #define INT_AGGR_STATUS_BIT UFS_BIT(20) | ||
184 | #define INT_AGGR_PARAM_WRITE UFS_BIT(24) | ||
185 | #define INT_AGGR_ENABLE UFS_BIT(31) | ||
186 | |||
187 | /* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ | ||
188 | #define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) | ||
189 | |||
190 | /* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ | ||
191 | #define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) | ||
192 | |||
193 | /* UICCMD - UIC Command */ | ||
194 | #define COMMAND_OPCODE_MASK 0xFF | ||
195 | #define GEN_SELECTOR_INDEX_MASK 0xFFFF | ||
196 | |||
197 | #define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16) | ||
198 | #define RESET_LEVEL 0xFF | ||
199 | |||
200 | #define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16) | ||
201 | #define CONFIG_RESULT_CODE_MASK 0xFF | ||
202 | #define GENERIC_ERROR_CODE_MASK 0xFF | ||
203 | |||
204 | /* UIC Commands */ | ||
205 | enum { | ||
206 | UIC_CMD_DME_GET = 0x01, | ||
207 | UIC_CMD_DME_SET = 0x02, | ||
208 | UIC_CMD_DME_PEER_GET = 0x03, | ||
209 | UIC_CMD_DME_PEER_SET = 0x04, | ||
210 | UIC_CMD_DME_POWERON = 0x10, | ||
211 | UIC_CMD_DME_POWEROFF = 0x11, | ||
212 | UIC_CMD_DME_ENABLE = 0x12, | ||
213 | UIC_CMD_DME_RESET = 0x14, | ||
214 | UIC_CMD_DME_END_PT_RST = 0x15, | ||
215 | UIC_CMD_DME_LINK_STARTUP = 0x16, | ||
216 | UIC_CMD_DME_HIBER_ENTER = 0x17, | ||
217 | UIC_CMD_DME_HIBER_EXIT = 0x18, | ||
218 | UIC_CMD_DME_TEST_MODE = 0x1A, | ||
219 | }; | ||
220 | |||
221 | /* UIC Config result code / Generic error code */ | ||
222 | enum { | ||
223 | UIC_CMD_RESULT_SUCCESS = 0x00, | ||
224 | UIC_CMD_RESULT_INVALID_ATTR = 0x01, | ||
225 | UIC_CMD_RESULT_FAILURE = 0x01, | ||
226 | UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, | ||
227 | UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, | ||
228 | UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, | ||
229 | UIC_CMD_RESULT_BAD_INDEX = 0x05, | ||
230 | UIC_CMD_RESULT_LOCKED_ATTR = 0x06, | ||
231 | UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, | ||
232 | UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, | ||
233 | UIC_CMD_RESULT_BUSY = 0x09, | ||
234 | UIC_CMD_RESULT_DME_FAILURE = 0x0A, | ||
235 | }; | ||
236 | |||
237 | #define MASK_UIC_COMMAND_RESULT 0xFF | ||
238 | |||
239 | #define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) | ||
240 | #define INT_AGGR_TIMEOUT_VALUE (0x02) | ||
241 | |||
242 | /* Interrupt disable masks */ | ||
243 | enum { | ||
244 | /* Interrupt disable mask for UFSHCI v1.0 */ | ||
245 | INTERRUPT_DISABLE_MASK_10 = 0xFFFF, | ||
246 | |||
247 | /* Interrupt disable mask for UFSHCI v1.1 */ | ||
248 | INTERRUPT_DISABLE_MASK_11 = 0x0, | ||
249 | }; | ||
250 | |||
251 | /* | ||
252 | * Request Descriptor Definitions | ||
253 | */ | ||
254 | |||
255 | /* Transfer request command type */ | ||
256 | enum { | ||
257 | UTP_CMD_TYPE_SCSI = 0x0, | ||
258 | UTP_CMD_TYPE_UFS = 0x1, | ||
259 | UTP_CMD_TYPE_DEV_MANAGE = 0x2, | ||
260 | }; | ||
261 | |||
262 | enum { | ||
263 | UTP_SCSI_COMMAND = 0x00000000, | ||
264 | UTP_NATIVE_UFS_COMMAND = 0x10000000, | ||
265 | UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, | ||
266 | UTP_REQ_DESC_INT_CMD = 0x01000000, | ||
267 | }; | ||
268 | |||
269 | /* UTP Transfer Request Data Direction (DD) */ | ||
270 | enum { | ||
271 | UTP_NO_DATA_TRANSFER = 0x00000000, | ||
272 | UTP_HOST_TO_DEVICE = 0x02000000, | ||
273 | UTP_DEVICE_TO_HOST = 0x04000000, | ||
274 | }; | ||
275 | |||
276 | /* Overall command status values */ | ||
277 | enum { | ||
278 | OCS_SUCCESS = 0x0, | ||
279 | OCS_INVALID_CMD_TABLE_ATTR = 0x1, | ||
280 | OCS_INVALID_PRDT_ATTR = 0x2, | ||
281 | OCS_MISMATCH_DATA_BUF_SIZE = 0x3, | ||
282 | OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, | ||
283 | OCS_PEER_COMM_FAILURE = 0x5, | ||
284 | OCS_ABORTED = 0x6, | ||
285 | OCS_FATAL_ERROR = 0x7, | ||
286 | OCS_INVALID_COMMAND_STATUS = 0x0F, | ||
287 | MASK_OCS = 0x0F, | ||
288 | }; | ||
289 | |||
290 | /** | ||
291 | * struct ufshcd_sg_entry - UFSHCI PRD Entry | ||
292 | * @base_addr: Lower 32bit physical address DW-0 | ||
293 | * @upper_addr: Upper 32bit physical address DW-1 | ||
294 | * @reserved: Reserved for future use DW-2 | ||
295 | * @size: size of physical segment DW-3 | ||
296 | */ | ||
297 | struct ufshcd_sg_entry { | ||
298 | u32 base_addr; | ||
299 | u32 upper_addr; | ||
300 | u32 reserved; | ||
301 | u32 size; | ||
302 | }; | ||
303 | |||
304 | /** | ||
305 | * struct utp_transfer_cmd_desc - UFS Command Descriptor structure | ||
306 | * @command_upiu: Command UPIU Frame address | ||
307 | * @response_upiu: Response UPIU Frame address | ||
308 | * @prd_table: Physical Region Descriptor | ||
309 | */ | ||
310 | struct utp_transfer_cmd_desc { | ||
311 | u8 command_upiu[ALIGNED_UPIU_SIZE]; | ||
312 | u8 response_upiu[ALIGNED_UPIU_SIZE]; | ||
313 | struct ufshcd_sg_entry prd_table[SG_ALL]; | ||
314 | }; | ||
315 | |||
316 | /** | ||
317 | * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD | ||
318 | * @dword0: Descriptor Header DW0 | ||
319 | * @dword1: Descriptor Header DW1 | ||
320 | * @dword2: Descriptor Header DW2 | ||
321 | * @dword3: Descriptor Header DW3 | ||
322 | */ | ||
323 | struct request_desc_header { | ||
324 | u32 dword_0; | ||
325 | u32 dword_1; | ||
326 | u32 dword_2; | ||
327 | u32 dword_3; | ||
328 | }; | ||
329 | |||
330 | /** | ||
331 | * struct utp_transfer_req_desc - UTRD structure | ||
332 | * @header: UTRD header DW-0 to DW-3 | ||
333 | * @command_desc_base_addr_lo: UCD base address low DW-4 | ||
334 | * @command_desc_base_addr_hi: UCD base address high DW-5 | ||
335 | * @response_upiu_length: response UPIU length DW-6 | ||
336 | * @response_upiu_offset: response UPIU offset DW-6 | ||
337 | * @prd_table_length: Physical region descriptor length DW-7 | ||
338 | * @prd_table_offset: Physical region descriptor offset DW-7 | ||
339 | */ | ||
340 | struct utp_transfer_req_desc { | ||
341 | |||
342 | /* DW 0-3 */ | ||
343 | struct request_desc_header header; | ||
344 | |||
345 | /* DW 4-5*/ | ||
346 | u32 command_desc_base_addr_lo; | ||
347 | u32 command_desc_base_addr_hi; | ||
348 | |||
349 | /* DW 6 */ | ||
350 | u16 response_upiu_length; | ||
351 | u16 response_upiu_offset; | ||
352 | |||
353 | /* DW 7 */ | ||
354 | u16 prd_table_length; | ||
355 | u16 prd_table_offset; | ||
356 | }; | ||
357 | |||
358 | /** | ||
359 | * struct utp_task_req_desc - UTMRD structure | ||
360 | * @header: UTMRD header DW-0 to DW-3 | ||
361 | * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11 | ||
362 | * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19 | ||
363 | */ | ||
364 | struct utp_task_req_desc { | ||
365 | |||
366 | /* DW 0-3 */ | ||
367 | struct request_desc_header header; | ||
368 | |||
369 | /* DW 4-11 */ | ||
370 | u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; | ||
371 | |||
372 | /* DW 12-19 */ | ||
373 | u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; | ||
374 | }; | ||
375 | |||
376 | #endif /* End of Header */ | ||
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 7264116185d5..4411d4224401 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
19 | * | 19 | * |
20 | * Maintained by: Alok N Kataria <akataria@vmware.com> | 20 | * Maintained by: Arvind Kumar <arvindkumar@vmware.com> |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
@@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter) | |||
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | /* | ||
1182 | * Query the device, fetch the config info and return the | ||
1183 | * maximum number of targets on the adapter. In case of | ||
1184 | * failure due to any reason return default i.e. 16. | ||
1185 | */ | ||
1186 | static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) | ||
1187 | { | ||
1188 | struct PVSCSICmdDescConfigCmd cmd; | ||
1189 | struct PVSCSIConfigPageHeader *header; | ||
1190 | struct device *dev; | ||
1191 | dma_addr_t configPagePA; | ||
1192 | void *config_page; | ||
1193 | u32 numPhys = 16; | ||
1194 | |||
1195 | dev = pvscsi_dev(adapter); | ||
1196 | config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, | ||
1197 | &configPagePA); | ||
1198 | if (!config_page) { | ||
1199 | dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); | ||
1200 | goto exit; | ||
1201 | } | ||
1202 | BUG_ON(configPagePA & ~PAGE_MASK); | ||
1203 | |||
1204 | /* Fetch config info from the device. */ | ||
1205 | cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; | ||
1206 | cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; | ||
1207 | cmd.cmpAddr = configPagePA; | ||
1208 | cmd._pad = 0; | ||
1209 | |||
1210 | /* | ||
1211 | * Mark the completion page header with error values. If the device | ||
1212 | * completes the command successfully, it sets the status values to | ||
1213 | * indicate success. | ||
1214 | */ | ||
1215 | header = config_page; | ||
1216 | memset(header, 0, sizeof *header); | ||
1217 | header->hostStatus = BTSTAT_INVPARAM; | ||
1218 | header->scsiStatus = SDSTAT_CHECK; | ||
1219 | |||
1220 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); | ||
1221 | |||
1222 | if (header->hostStatus == BTSTAT_SUCCESS && | ||
1223 | header->scsiStatus == SDSTAT_GOOD) { | ||
1224 | struct PVSCSIConfigPageController *config; | ||
1225 | |||
1226 | config = config_page; | ||
1227 | numPhys = config->numPhys; | ||
1228 | } else | ||
1229 | dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", | ||
1230 | header->hostStatus, header->scsiStatus); | ||
1231 | pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); | ||
1232 | exit: | ||
1233 | return numPhys; | ||
1234 | } | ||
1235 | |||
1181 | static int __devinit pvscsi_probe(struct pci_dev *pdev, | 1236 | static int __devinit pvscsi_probe(struct pci_dev *pdev, |
1182 | const struct pci_device_id *id) | 1237 | const struct pci_device_id *id) |
1183 | { | 1238 | { |
1184 | struct pvscsi_adapter *adapter; | 1239 | struct pvscsi_adapter *adapter; |
1185 | struct Scsi_Host *host; | 1240 | struct Scsi_Host *host; |
1241 | struct device *dev; | ||
1186 | unsigned int i; | 1242 | unsigned int i; |
1187 | unsigned long flags = 0; | 1243 | unsigned long flags = 0; |
1188 | int error; | 1244 | int error; |
@@ -1272,6 +1328,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev, | |||
1272 | } | 1328 | } |
1273 | 1329 | ||
1274 | /* | 1330 | /* |
1331 | * Ask the device for max number of targets. | ||
1332 | */ | ||
1333 | host->max_id = pvscsi_get_max_targets(adapter); | ||
1334 | dev = pvscsi_dev(adapter); | ||
1335 | dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); | ||
1336 | |||
1337 | /* | ||
1275 | * From this point on we should reset the adapter if anything goes | 1338 | * From this point on we should reset the adapter if anything goes |
1276 | * wrong. | 1339 | * wrong. |
1277 | */ | 1340 | */ |
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 62e36e75715e..3546e8662e30 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h | |||
@@ -17,7 +17,7 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
19 | * | 19 | * |
20 | * Maintained by: Alok N Kataria <akataria@vmware.com> | 20 | * Maintained by: Arvind Kumar <arvindkumar@vmware.com> |
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | 28 | ||
29 | #define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" | 29 | #define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k" |
30 | 30 | ||
31 | #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 | 31 | #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 |
32 | 32 | ||
@@ -39,28 +39,45 @@ | |||
39 | * host adapter status/error codes | 39 | * host adapter status/error codes |
40 | */ | 40 | */ |
41 | enum HostBusAdapterStatus { | 41 | enum HostBusAdapterStatus { |
42 | BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ | 42 | BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ |
43 | BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, | 43 | BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, |
44 | BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, | 44 | BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, |
45 | BTSTAT_DATA_UNDERRUN = 0x0c, | 45 | BTSTAT_DATA_UNDERRUN = 0x0c, |
46 | BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ | 46 | BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ |
47 | BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ | 47 | BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ |
48 | BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ | 48 | BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ |
49 | BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ | 49 | BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence |
50 | BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ | 50 | * requested by target */ |
51 | BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ | 51 | BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from |
52 | BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ | 52 | * first CCB */ |
53 | BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ | 53 | BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment |
54 | BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ | 54 | * list */ |
55 | BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ | 55 | BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ |
56 | BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ | 56 | BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message |
57 | BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ | 57 | * rejected by target */ |
58 | BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ | 58 | BTSTAT_BADMSG = 0x1d, /* unsupported message received by the |
59 | BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ | 59 | * host adapter */ |
60 | BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ | 60 | BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ |
61 | BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ | 61 | BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, |
62 | BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ | 62 | * sent a SCSI RST */ |
63 | BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ | 63 | BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ |
64 | BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI | ||
65 | * RST */ | ||
66 | BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly | ||
67 | * (w/o tag) */ | ||
68 | BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ | ||
69 | BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ | ||
70 | BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ | ||
71 | BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ | ||
72 | BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ | ||
73 | }; | ||
74 | |||
75 | /* | ||
76 | * SCSI device status values. | ||
77 | */ | ||
78 | enum ScsiDeviceStatus { | ||
79 | SDSTAT_GOOD = 0x00, /* No errors. */ | ||
80 | SDSTAT_CHECK = 0x02, /* Check condition. */ | ||
64 | }; | 81 | }; |
65 | 82 | ||
66 | /* | 83 | /* |
@@ -114,6 +131,29 @@ struct PVSCSICmdDescResetDevice { | |||
114 | } __packed; | 131 | } __packed; |
115 | 132 | ||
116 | /* | 133 | /* |
134 | * Command descriptor for PVSCSI_CMD_CONFIG -- | ||
135 | */ | ||
136 | |||
137 | struct PVSCSICmdDescConfigCmd { | ||
138 | u64 cmpAddr; | ||
139 | u64 configPageAddress; | ||
140 | u32 configPageNum; | ||
141 | u32 _pad; | ||
142 | } __packed; | ||
143 | |||
144 | enum PVSCSIConfigPageType { | ||
145 | PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958, | ||
146 | PVSCSI_CONFIG_PAGE_PHY = 0x1959, | ||
147 | PVSCSI_CONFIG_PAGE_DEVICE = 0x195a, | ||
148 | }; | ||
149 | |||
150 | enum PVSCSIConfigPageAddressType { | ||
151 | PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120, | ||
152 | PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121, | ||
153 | PVSCSI_CONFIG_PHY_ADDRESS = 0x2122, | ||
154 | }; | ||
155 | |||
156 | /* | ||
117 | * Command descriptor for PVSCSI_CMD_ABORT_CMD -- | 157 | * Command descriptor for PVSCSI_CMD_ABORT_CMD -- |
118 | * | 158 | * |
119 | * - currently does not support specifying the LUN. | 159 | * - currently does not support specifying the LUN. |
@@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc { | |||
332 | u32 _pad[2]; | 372 | u32 _pad[2]; |
333 | } __packed; | 373 | } __packed; |
334 | 374 | ||
375 | struct PVSCSIConfigPageHeader { | ||
376 | u32 pageNum; | ||
377 | u16 numDwords; | ||
378 | u16 hostStatus; | ||
379 | u16 scsiStatus; | ||
380 | u16 reserved[3]; | ||
381 | } __packed; | ||
382 | |||
383 | struct PVSCSIConfigPageController { | ||
384 | struct PVSCSIConfigPageHeader header; | ||
385 | u64 nodeWWN; /* Device name as defined in the SAS spec. */ | ||
386 | u16 manufacturer[64]; | ||
387 | u16 serialNumber[64]; | ||
388 | u16 opromVersion[32]; | ||
389 | u16 hwVersion[32]; | ||
390 | u16 firmwareVersion[32]; | ||
391 | u32 numPhys; | ||
392 | u8 useConsecutivePhyWWNs; | ||
393 | u8 reserved[3]; | ||
394 | } __packed; | ||
395 | |||
335 | /* | 396 | /* |
336 | * Interrupt status / IRQ bits. | 397 | * Interrupt status / IRQ bits. |
337 | */ | 398 | */ |