diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-26 00:06:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-26 00:06:13 -0400 |
commit | 9f34217c846a96dea03f4418e2f27423658d3542 (patch) | |
tree | 5b137af50db5758261700015911afb197ac8fc9f | |
parent | 95e14ed7fc4b2db62eb597a70850a0fede48b78a (diff) | |
parent | 3703b2c5d041a68095cdd22380c23ce27d449ad7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (55 commits)
[SCSI] tcm_loop: Add multi-fabric Linux/SCSI LLD fabric module
[SCSI] qla4xxx: Use polling mode for disable interrupt mailbox completion
[SCSI] Revert "[SCSI] Retrieve the Caching mode page"
[SCSI] bnx2fc: IO completion not processed due to missed wakeup
[SCSI] qla4xxx: Update driver version to 5.02.00-k6
[SCSI] qla4xxx: masking required bits of add_fw_options during initialization
[SCSI] qla4xxx: added new function qla4xxx_relogin_all_devices
[SCSI] qla4xxx: add support for ql4xsess_recovery_tmo cmd line param
[SCSI] qla4xxx: Add support for ql4xmaxqdepth command line parameter
[SCSI] qla4xxx: cleanup function qla4xxx_process_ddb_changed
[SCSI] qla4xxx: Prevent other port reinitialization during remove_adapter
[SCSI] qla4xxx: remove unused ddb flag DF_NO_RELOGIN
[SCSI] qla4xxx: cleanup DDB relogin logic during initialization
[SCSI] qla4xxx: Do not retry ISP82XX initialization if H/W state is failed
[SCSI] qla4xxx: Do not send mbox command if FW is in failed state
[SCSI] qla4xxx: cleanup qla4xxx_initialize_ddb_list()
[SCSI] ses: add subenclosure support
[SCSI] bnx2fc: Bump version to 1.0.1
[SCSI] bnx2fc: Remove unnecessary module state checks
[SCSI] bnx2fc: Fix MTU issue by using static MTU
...
74 files changed, 5830 insertions, 962 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index dbeb8a0d717..7ef9b843d52 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py | |||
@@ -239,8 +239,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
239 | buf += "#include <target/target_core_configfs.h>\n" | 239 | buf += "#include <target/target_core_configfs.h>\n" |
240 | buf += "#include <target/target_core_base.h>\n" | 240 | buf += "#include <target/target_core_base.h>\n" |
241 | buf += "#include <target/configfs_macros.h>\n\n" | 241 | buf += "#include <target/configfs_macros.h>\n\n" |
242 | buf += "#include <" + fabric_mod_name + "_base.h>\n" | 242 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" |
243 | buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" | 243 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" |
244 | 244 | ||
245 | buf += "/* Local pointer to allocated TCM configfs fabric module */\n" | 245 | buf += "/* Local pointer to allocated TCM configfs fabric module */\n" |
246 | buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" | 246 | buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" |
@@ -289,6 +289,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
289 | buf += "{\n" | 289 | buf += "{\n" |
290 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" | 290 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" |
291 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | 291 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" |
292 | buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" | ||
292 | buf += " kfree(nacl);\n" | 293 | buf += " kfree(nacl);\n" |
293 | buf += "}\n\n" | 294 | buf += "}\n\n" |
294 | 295 | ||
@@ -583,9 +584,9 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
583 | buf += "#include <target/target_core_fabric_lib.h>\n" | 584 | buf += "#include <target/target_core_fabric_lib.h>\n" |
584 | buf += "#include <target/target_core_device.h>\n" | 585 | buf += "#include <target/target_core_device.h>\n" |
585 | buf += "#include <target/target_core_tpg.h>\n" | 586 | buf += "#include <target/target_core_tpg.h>\n" |
586 | buf += "#include <target/target_core_configfs.h>\n" | 587 | buf += "#include <target/target_core_configfs.h>\n\n" |
587 | buf += "#include <" + fabric_mod_name + "_base.h>\n" | 588 | buf += "#include \"" + fabric_mod_name + "_base.h\"\n" |
588 | buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" | 589 | buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" |
589 | 590 | ||
590 | buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" | 591 | buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" |
591 | buf += "{\n" | 592 | buf += "{\n" |
@@ -973,14 +974,13 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
973 | def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): | 974 | def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): |
974 | 975 | ||
975 | buf = "" | 976 | buf = "" |
976 | f = fabric_mod_dir_var + "/Kbuild" | 977 | f = fabric_mod_dir_var + "/Makefile" |
977 | print "Writing file: " + f | 978 | print "Writing file: " + f |
978 | 979 | ||
979 | p = open(f, 'w') | 980 | p = open(f, 'w') |
980 | if not p: | 981 | if not p: |
981 | tcm_mod_err("Unable to open file: " + f) | 982 | tcm_mod_err("Unable to open file: " + f) |
982 | 983 | ||
983 | buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n" | ||
984 | buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" | 984 | buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" |
985 | buf += " " + fabric_mod_name + "_configfs.o\n" | 985 | buf += " " + fabric_mod_name + "_configfs.o\n" |
986 | buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" | 986 | buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" |
@@ -1018,7 +1018,7 @@ def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): | |||
1018 | 1018 | ||
1019 | def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): | 1019 | def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): |
1020 | buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" | 1020 | buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" |
1021 | kbuild = tcm_dir + "/drivers/target/Kbuild" | 1021 | kbuild = tcm_dir + "/drivers/target/Makefile" |
1022 | 1022 | ||
1023 | f = open(kbuild, 'a') | 1023 | f = open(kbuild, 'a') |
1024 | f.write(buf) | 1024 | f.write(buf) |
@@ -1064,7 +1064,7 @@ def main(modname, proto_ident): | |||
1064 | tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) | 1064 | tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) |
1065 | tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) | 1065 | tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) |
1066 | 1066 | ||
1067 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ") | 1067 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") |
1068 | if input == "yes" or input == "y": | 1068 | if input == "yes" or input == "y": |
1069 | tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) | 1069 | tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) |
1070 | 1070 | ||
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 92df4d6b614..1bd9fd18f7f 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile | |||
@@ -3,6 +3,6 @@ | |||
3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o | 3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o |
4 | 4 | ||
5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ | 5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ |
6 | dpcsup.o rx.o sa.o rkt.o nark.o | 6 | dpcsup.o rx.o sa.o rkt.o nark.o src.o |
7 | 7 | ||
8 | ccflags-y := -Idrivers/scsi | 8 | ccflags-y := -Idrivers/scsi |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7df2dd1d2c6..118ce83a737 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1486 | dev->a_ops.adapter_write = aac_write_block; | 1487 | dev->a_ops.adapter_write = aac_write_block; |
1487 | } | 1488 | } |
1488 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; | 1489 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; |
1489 | if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | 1490 | if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) |
1491 | dev->adapter_info.options |= AAC_OPT_NEW_COMM; | ||
1492 | if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | ||
1490 | /* | 1493 | /* |
1491 | * Worst case size that could cause sg overflow when | 1494 | * Worst case size that could cause sg overflow when |
1492 | * we break up SG elements that are larger than 64KB. | 1495 | * we break up SG elements that are larger than 64KB. |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4dbcc055ac7..29ab00016b7 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | *----------------------------------------------------------------------------*/ | 12 | *----------------------------------------------------------------------------*/ |
13 | 13 | ||
14 | #ifndef AAC_DRIVER_BUILD | 14 | #ifndef AAC_DRIVER_BUILD |
15 | # define AAC_DRIVER_BUILD 26400 | 15 | # define AAC_DRIVER_BUILD 28000 |
16 | # define AAC_DRIVER_BRANCH "-ms" | 16 | # define AAC_DRIVER_BRANCH "-ms" |
17 | #endif | 17 | #endif |
18 | #define MAXIMUM_NUM_CONTAINERS 32 | 18 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -277,6 +277,16 @@ enum aac_queue_types { | |||
277 | 277 | ||
278 | #define FsaNormal 1 | 278 | #define FsaNormal 1 |
279 | 279 | ||
280 | /* transport FIB header (PMC) */ | ||
281 | struct aac_fib_xporthdr { | ||
282 | u64 HostAddress; /* FIB host address w/o xport header */ | ||
283 | u32 Size; /* FIB size excluding xport header */ | ||
284 | u32 Handle; /* driver handle to reference the FIB */ | ||
285 | u64 Reserved[2]; | ||
286 | }; | ||
287 | |||
288 | #define ALIGN32 32 | ||
289 | |||
280 | /* | 290 | /* |
281 | * Define the FIB. The FIB is the where all the requested data and | 291 | * Define the FIB. The FIB is the where all the requested data and |
282 | * command information are put to the application on the FSA adapter. | 292 | * command information are put to the application on the FSA adapter. |
@@ -394,7 +404,9 @@ enum fib_xfer_state { | |||
394 | AdapterMicroFib = (1<<17), | 404 | AdapterMicroFib = (1<<17), |
395 | BIOSFibPath = (1<<18), | 405 | BIOSFibPath = (1<<18), |
396 | FastResponseCapable = (1<<19), | 406 | FastResponseCapable = (1<<19), |
397 | ApiFib = (1<<20) // Its an API Fib. | 407 | ApiFib = (1<<20), /* Its an API Fib */ |
408 | /* PMC NEW COMM: There is no more AIF data pending */ | ||
409 | NoMoreAifDataAvailable = (1<<21) | ||
398 | }; | 410 | }; |
399 | 411 | ||
400 | /* | 412 | /* |
@@ -404,6 +416,7 @@ enum fib_xfer_state { | |||
404 | 416 | ||
405 | #define ADAPTER_INIT_STRUCT_REVISION 3 | 417 | #define ADAPTER_INIT_STRUCT_REVISION 3 |
406 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science | 418 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science |
419 | #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ | ||
407 | 420 | ||
408 | struct aac_init | 421 | struct aac_init |
409 | { | 422 | { |
@@ -428,9 +441,15 @@ struct aac_init | |||
428 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 | 441 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 |
429 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 | 442 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 |
430 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 | 443 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 |
444 | #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 | ||
431 | __le32 MaxIoCommands; /* max outstanding commands */ | 445 | __le32 MaxIoCommands; /* max outstanding commands */ |
432 | __le32 MaxIoSize; /* largest I/O command */ | 446 | __le32 MaxIoSize; /* largest I/O command */ |
433 | __le32 MaxFibSize; /* largest FIB to adapter */ | 447 | __le32 MaxFibSize; /* largest FIB to adapter */ |
448 | /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ | ||
449 | __le32 MaxNumAif; /* max number of aif */ | ||
450 | /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ | ||
451 | __le32 HostRRQ_AddrLow; | ||
452 | __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ | ||
434 | }; | 453 | }; |
435 | 454 | ||
436 | enum aac_log_level { | 455 | enum aac_log_level { |
@@ -685,7 +704,7 @@ struct rx_inbound { | |||
685 | #define OutboundDoorbellReg MUnit.ODR | 704 | #define OutboundDoorbellReg MUnit.ODR |
686 | 705 | ||
687 | struct rx_registers { | 706 | struct rx_registers { |
688 | struct rx_mu_registers MUnit; /* 1300h - 1344h */ | 707 | struct rx_mu_registers MUnit; /* 1300h - 1347h */ |
689 | __le32 reserved1[2]; /* 1348h - 134ch */ | 708 | __le32 reserved1[2]; /* 1348h - 134ch */ |
690 | struct rx_inbound IndexRegs; | 709 | struct rx_inbound IndexRegs; |
691 | }; | 710 | }; |
@@ -703,7 +722,7 @@ struct rx_registers { | |||
703 | #define rkt_inbound rx_inbound | 722 | #define rkt_inbound rx_inbound |
704 | 723 | ||
705 | struct rkt_registers { | 724 | struct rkt_registers { |
706 | struct rkt_mu_registers MUnit; /* 1300h - 1344h */ | 725 | struct rkt_mu_registers MUnit; /* 1300h - 1347h */ |
707 | __le32 reserved1[1006]; /* 1348h - 22fch */ | 726 | __le32 reserved1[1006]; /* 1348h - 22fch */ |
708 | struct rkt_inbound IndexRegs; /* 2300h - */ | 727 | struct rkt_inbound IndexRegs; /* 2300h - */ |
709 | }; | 728 | }; |
@@ -713,6 +732,44 @@ struct rkt_registers { | |||
713 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) | 732 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) |
714 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) | 733 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) |
715 | 734 | ||
735 | /* | ||
736 | * PMC SRC message unit registers | ||
737 | */ | ||
738 | |||
739 | #define src_inbound rx_inbound | ||
740 | |||
741 | struct src_mu_registers { | ||
742 | /* PCI*| Name */ | ||
743 | __le32 reserved0[8]; /* 00h | Reserved */ | ||
744 | __le32 IDR; /* 20h | Inbound Doorbell Register */ | ||
745 | __le32 IISR; /* 24h | Inbound Int. Status Register */ | ||
746 | __le32 reserved1[3]; /* 28h | Reserved */ | ||
747 | __le32 OIMR; /* 34h | Outbound Int. Mask Register */ | ||
748 | __le32 reserved2[25]; /* 38h | Reserved */ | ||
749 | __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ | ||
750 | __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ | ||
751 | __le32 reserved3[6]; /* a4h | Reserved */ | ||
752 | __le32 OMR; /* bch | Outbound Message Register */ | ||
753 | __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ | ||
754 | __le32 IQ_H; /* c4h | Inbound Queue (High address) */ | ||
755 | }; | ||
756 | |||
757 | struct src_registers { | ||
758 | struct src_mu_registers MUnit; /* 00h - c7h */ | ||
759 | __le32 reserved1[130790]; /* c8h - 7fc5fh */ | ||
760 | struct src_inbound IndexRegs; /* 7fc60h */ | ||
761 | }; | ||
762 | |||
763 | #define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) | ||
764 | #define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) | ||
765 | #define src_writeb(AEP, CSR, value) writeb(value, \ | ||
766 | &((AEP)->regs.src.bar0->CSR)) | ||
767 | #define src_writel(AEP, CSR, value) writel(value, \ | ||
768 | &((AEP)->regs.src.bar0->CSR)) | ||
769 | |||
770 | #define SRC_ODR_SHIFT 12 | ||
771 | #define SRC_IDR_SHIFT 9 | ||
772 | |||
716 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); | 773 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); |
717 | 774 | ||
718 | struct aac_fib_context { | 775 | struct aac_fib_context { |
@@ -879,6 +936,7 @@ struct aac_supplement_adapter_info | |||
879 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) | 936 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) |
880 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) | 937 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) |
881 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) | 938 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) |
939 | #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) | ||
882 | #define AAC_SIS_VERSION_V3 3 | 940 | #define AAC_SIS_VERSION_V3 3 |
883 | #define AAC_SIS_SLOT_UNKNOWN 0xFF | 941 | #define AAC_SIS_SLOT_UNKNOWN 0xFF |
884 | 942 | ||
@@ -940,6 +998,7 @@ struct aac_bus_info_response { | |||
940 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) | 998 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) |
941 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) | 999 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) |
942 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) | 1000 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) |
1001 | #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) | ||
943 | 1002 | ||
944 | struct aac_dev | 1003 | struct aac_dev |
945 | { | 1004 | { |
@@ -952,6 +1011,7 @@ struct aac_dev | |||
952 | */ | 1011 | */ |
953 | unsigned max_fib_size; | 1012 | unsigned max_fib_size; |
954 | unsigned sg_tablesize; | 1013 | unsigned sg_tablesize; |
1014 | unsigned max_num_aif; | ||
955 | 1015 | ||
956 | /* | 1016 | /* |
957 | * Map for 128 fib objects (64k) | 1017 | * Map for 128 fib objects (64k) |
@@ -980,10 +1040,21 @@ struct aac_dev | |||
980 | struct adapter_ops a_ops; | 1040 | struct adapter_ops a_ops; |
981 | unsigned long fsrev; /* Main driver's revision number */ | 1041 | unsigned long fsrev; /* Main driver's revision number */ |
982 | 1042 | ||
983 | unsigned base_size; /* Size of mapped in region */ | 1043 | unsigned long dbg_base; /* address of UART |
1044 | * debug buffer */ | ||
1045 | |||
1046 | unsigned base_size, dbg_size; /* Size of | ||
1047 | * mapped in region */ | ||
1048 | |||
984 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ | 1049 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ |
985 | dma_addr_t init_pa; /* Holds physical address of the init struct */ | 1050 | dma_addr_t init_pa; /* Holds physical address of the init struct */ |
986 | 1051 | ||
1052 | u32 *host_rrq; /* response queue | ||
1053 | * if AAC_COMM_MESSAGE_TYPE1 */ | ||
1054 | |||
1055 | dma_addr_t host_rrq_pa; /* phys. address */ | ||
1056 | u32 host_rrq_idx; /* index into rrq buffer */ | ||
1057 | |||
987 | struct pci_dev *pdev; /* Our PCI interface */ | 1058 | struct pci_dev *pdev; /* Our PCI interface */ |
988 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ | 1059 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ |
989 | void * comm_addr; /* Base address of Comm area */ | 1060 | void * comm_addr; /* Base address of Comm area */ |
@@ -1003,14 +1074,20 @@ struct aac_dev | |||
1003 | */ | 1074 | */ |
1004 | #ifndef AAC_MIN_FOOTPRINT_SIZE | 1075 | #ifndef AAC_MIN_FOOTPRINT_SIZE |
1005 | # define AAC_MIN_FOOTPRINT_SIZE 8192 | 1076 | # define AAC_MIN_FOOTPRINT_SIZE 8192 |
1077 | # define AAC_MIN_SRC_BAR0_SIZE 0x400000 | ||
1078 | # define AAC_MIN_SRC_BAR1_SIZE 0x800 | ||
1006 | #endif | 1079 | #endif |
1007 | union | 1080 | union |
1008 | { | 1081 | { |
1009 | struct sa_registers __iomem *sa; | 1082 | struct sa_registers __iomem *sa; |
1010 | struct rx_registers __iomem *rx; | 1083 | struct rx_registers __iomem *rx; |
1011 | struct rkt_registers __iomem *rkt; | 1084 | struct rkt_registers __iomem *rkt; |
1085 | struct { | ||
1086 | struct src_registers __iomem *bar0; | ||
1087 | char __iomem *bar1; | ||
1088 | } src; | ||
1012 | } regs; | 1089 | } regs; |
1013 | volatile void __iomem *base; | 1090 | volatile void __iomem *base, *dbg_base_mapped; |
1014 | volatile struct rx_inbound __iomem *IndexRegs; | 1091 | volatile struct rx_inbound __iomem *IndexRegs; |
1015 | u32 OIMR; /* Mask Register Cache */ | 1092 | u32 OIMR; /* Mask Register Cache */ |
1016 | /* | 1093 | /* |
@@ -1031,9 +1108,8 @@ struct aac_dev | |||
1031 | u8 comm_interface; | 1108 | u8 comm_interface; |
1032 | # define AAC_COMM_PRODUCER 0 | 1109 | # define AAC_COMM_PRODUCER 0 |
1033 | # define AAC_COMM_MESSAGE 1 | 1110 | # define AAC_COMM_MESSAGE 1 |
1034 | /* macro side-effects BEWARE */ | 1111 | # define AAC_COMM_MESSAGE_TYPE1 3 |
1035 | # define raw_io_interface \ | 1112 | u8 raw_io_interface; |
1036 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) | ||
1037 | u8 raw_io_64; | 1113 | u8 raw_io_64; |
1038 | u8 printf_enabled; | 1114 | u8 printf_enabled; |
1039 | u8 in_reset; | 1115 | u8 in_reset; |
@@ -1789,6 +1865,10 @@ extern struct aac_common aac_config; | |||
1789 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ | 1865 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ |
1790 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ | 1866 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ |
1791 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ | 1867 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ |
1868 | #define DoorBellAifPending (1<<6) /* Adapter -> Host */ | ||
1869 | |||
1870 | /* PMC specific outbound doorbell bits */ | ||
1871 | #define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ | ||
1792 | 1872 | ||
1793 | /* | 1873 | /* |
1794 | * For FIB communication, we need all of the following things | 1874 | * For FIB communication, we need all of the following things |
@@ -1831,6 +1911,9 @@ extern struct aac_common aac_config; | |||
1831 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ | 1911 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ |
1832 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ | 1912 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ |
1833 | 1913 | ||
1914 | /* PMC NEW COMM: Request the event data */ | ||
1915 | #define AifReqEvent 200 | ||
1916 | |||
1834 | /* | 1917 | /* |
1835 | * Adapter Initiated FIB command structures. Start with the adapter | 1918 | * Adapter Initiated FIB command structures. Start with the adapter |
1836 | * initiated FIBs that really come from the adapter, and get responded | 1919 | * initiated FIBs that really come from the adapter, and get responded |
@@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev); | |||
1886 | int aac_rkt_init(struct aac_dev *dev); | 1969 | int aac_rkt_init(struct aac_dev *dev); |
1887 | int aac_nark_init(struct aac_dev *dev); | 1970 | int aac_nark_init(struct aac_dev *dev); |
1888 | int aac_sa_init(struct aac_dev *dev); | 1971 | int aac_sa_init(struct aac_dev *dev); |
1972 | int aac_src_init(struct aac_dev *dev); | ||
1889 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); | 1973 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); |
1890 | unsigned int aac_response_normal(struct aac_queue * q); | 1974 | unsigned int aac_response_normal(struct aac_queue * q); |
1891 | unsigned int aac_command_normal(struct aac_queue * q); | 1975 | unsigned int aac_command_normal(struct aac_queue * q); |
1892 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); | 1976 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, |
1977 | int isAif, int isFastResponse, | ||
1978 | struct hw_fib *aif_fib); | ||
1893 | int aac_reset_adapter(struct aac_dev * dev, int forced); | 1979 | int aac_reset_adapter(struct aac_dev * dev, int forced); |
1894 | int aac_check_health(struct aac_dev * dev); | 1980 | int aac_check_health(struct aac_dev * dev); |
1895 | int aac_command_thread(void *data); | 1981 | int aac_command_thread(void *data); |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 645ddd9d9b9..8a0b3303317 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index a7261486ccd..7ac8fdb5577 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
52 | unsigned long size, align; | 53 | unsigned long size, align; |
53 | const unsigned long fibsize = 4096; | 54 | const unsigned long fibsize = 4096; |
54 | const unsigned long printfbufsiz = 256; | 55 | const unsigned long printfbufsiz = 256; |
56 | unsigned long host_rrq_size = 0; | ||
55 | struct aac_init *init; | 57 | struct aac_init *init; |
56 | dma_addr_t phys; | 58 | dma_addr_t phys; |
57 | unsigned long aac_max_hostphysmempages; | 59 | unsigned long aac_max_hostphysmempages; |
58 | 60 | ||
59 | size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; | 61 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) |
60 | 62 | host_rrq_size = (dev->scsi_host_ptr->can_queue | |
63 | + AAC_NUM_MGT_FIB) * sizeof(u32); | ||
64 | size = fibsize + sizeof(struct aac_init) + commsize + | ||
65 | commalign + printfbufsiz + host_rrq_size; | ||
61 | 66 | ||
62 | base = pci_alloc_consistent(dev->pdev, size, &phys); | 67 | base = pci_alloc_consistent(dev->pdev, size, &phys); |
63 | 68 | ||
@@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
70 | dev->comm_phys = phys; | 75 | dev->comm_phys = phys; |
71 | dev->comm_size = size; | 76 | dev->comm_size = size; |
72 | 77 | ||
73 | dev->init = (struct aac_init *)(base + fibsize); | 78 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { |
74 | dev->init_pa = phys + fibsize; | 79 | dev->host_rrq = (u32 *)(base + fibsize); |
80 | dev->host_rrq_pa = phys + fibsize; | ||
81 | memset(dev->host_rrq, 0, host_rrq_size); | ||
82 | } | ||
83 | |||
84 | dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); | ||
85 | dev->init_pa = phys + fibsize + host_rrq_size; | ||
75 | 86 | ||
76 | init = dev->init; | 87 | init = dev->init; |
77 | 88 | ||
@@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
106 | 117 | ||
107 | init->InitFlags = 0; | 118 | init->InitFlags = 0; |
108 | if (dev->comm_interface == AAC_COMM_MESSAGE) { | 119 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
109 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); | 120 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); |
110 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); | 121 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); |
122 | } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
123 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); | ||
124 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); | ||
125 | dprintk((KERN_WARNING | ||
126 | "aacraid: New Comm Interface type1 enabled\n")); | ||
111 | } | 127 | } |
112 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | | 128 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | |
113 | INITFLAGS_DRIVER_SUPPORTS_PM); | 129 | INITFLAGS_DRIVER_SUPPORTS_PM); |
@@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
115 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); | 131 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); |
116 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); | 132 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); |
117 | 133 | ||
134 | init->MaxNumAif = cpu_to_le32(dev->max_num_aif); | ||
135 | init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); | ||
136 | init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); | ||
137 | |||
138 | |||
118 | /* | 139 | /* |
119 | * Increment the base address by the amount already used | 140 | * Increment the base address by the amount already used |
120 | */ | 141 | */ |
121 | base = base + fibsize + sizeof(struct aac_init); | 142 | base = base + fibsize + host_rrq_size + sizeof(struct aac_init); |
122 | phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); | 143 | phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + |
144 | sizeof(struct aac_init)); | ||
145 | |||
123 | /* | 146 | /* |
124 | * Align the beginning of Headers to commalign | 147 | * Align the beginning of Headers to commalign |
125 | */ | 148 | */ |
@@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
314 | - sizeof(struct aac_write) + sizeof(struct sgentry)) | 337 | - sizeof(struct aac_write) + sizeof(struct sgentry)) |
315 | / sizeof(struct sgentry); | 338 | / sizeof(struct sgentry); |
316 | dev->comm_interface = AAC_COMM_PRODUCER; | 339 | dev->comm_interface = AAC_COMM_PRODUCER; |
317 | dev->raw_io_64 = 0; | 340 | dev->raw_io_interface = dev->raw_io_64 = 0; |
341 | |||
318 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 342 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
319 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && | 343 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && |
320 | (status[0] == 0x00000001)) { | 344 | (status[0] == 0x00000001)) { |
321 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) | 345 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) |
322 | dev->raw_io_64 = 1; | 346 | dev->raw_io_64 = 1; |
323 | if (dev->a_ops.adapter_comm && | 347 | if (dev->a_ops.adapter_comm) { |
324 | (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) | 348 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { |
325 | dev->comm_interface = AAC_COMM_MESSAGE; | 349 | dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; |
350 | dev->raw_io_interface = 1; | ||
351 | } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) { | ||
352 | dev->comm_interface = AAC_COMM_MESSAGE; | ||
353 | dev->raw_io_interface = 1; | ||
354 | } | ||
355 | } | ||
326 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && | 356 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && |
327 | (status[2] > dev->base_size)) { | 357 | (status[2] > dev->base_size)) { |
328 | aac_adapter_ioremap(dev, 0); | 358 | aac_adapter_ioremap(dev, 0); |
@@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
350 | * status[3] & 0xFFFF maximum number FIBs outstanding | 380 | * status[3] & 0xFFFF maximum number FIBs outstanding |
351 | */ | 381 | */ |
352 | host->max_sectors = (status[1] >> 16) << 1; | 382 | host->max_sectors = (status[1] >> 16) << 1; |
353 | dev->max_fib_size = status[1] & 0xFFFF; | 383 | /* Multiple of 32 for PMC */ |
384 | dev->max_fib_size = status[1] & 0xFFE0; | ||
354 | host->sg_tablesize = status[2] >> 16; | 385 | host->sg_tablesize = status[2] >> 16; |
355 | dev->sg_tablesize = status[2] & 0xFFFF; | 386 | dev->sg_tablesize = status[2] & 0xFFFF; |
356 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; | 387 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; |
388 | dev->max_num_aif = status[4] & 0xFFFF; | ||
357 | /* | 389 | /* |
358 | * NOTE: | 390 | * NOTE: |
359 | * All these overrides are based on a fixed internal | 391 | * All these overrides are based on a fixed internal |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 060ac4bd5a1..dd7ad3ba2da 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev) | |||
63 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", | 64 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", |
64 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, | 65 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, |
65 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); | 66 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); |
66 | if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size | 67 | dev->hw_fib_va = pci_alloc_consistent(dev->pdev, |
67 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), | 68 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) |
68 | &dev->hw_fib_pa))==NULL) | 69 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), |
70 | &dev->hw_fib_pa); | ||
71 | if (dev->hw_fib_va == NULL) | ||
69 | return -ENOMEM; | 72 | return -ENOMEM; |
70 | return 0; | 73 | return 0; |
71 | } | 74 | } |
@@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev) | |||
110 | if (i<0) | 113 | if (i<0) |
111 | return -ENOMEM; | 114 | return -ENOMEM; |
112 | 115 | ||
116 | /* 32 byte alignment for PMC */ | ||
117 | hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); | ||
118 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
119 | (hw_fib_pa - dev->hw_fib_pa)); | ||
120 | dev->hw_fib_pa = hw_fib_pa; | ||
121 | memset(dev->hw_fib_va, 0, | ||
122 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * | ||
123 | (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
124 | |||
125 | /* add Xport header */ | ||
126 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
127 | sizeof(struct aac_fib_xporthdr)); | ||
128 | dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); | ||
129 | |||
113 | hw_fib = dev->hw_fib_va; | 130 | hw_fib = dev->hw_fib_va; |
114 | hw_fib_pa = dev->hw_fib_pa; | 131 | hw_fib_pa = dev->hw_fib_pa; |
115 | memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
116 | /* | 132 | /* |
117 | * Initialise the fibs | 133 | * Initialise the fibs |
118 | */ | 134 | */ |
@@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev) | |||
129 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); | 145 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); |
130 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); | 146 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); |
131 | fibptr->hw_fib_pa = hw_fib_pa; | 147 | fibptr->hw_fib_pa = hw_fib_pa; |
132 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size); | 148 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + |
133 | hw_fib_pa = hw_fib_pa + dev->max_fib_size; | 149 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); |
150 | hw_fib_pa = hw_fib_pa + | ||
151 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr); | ||
134 | } | 152 | } |
135 | /* | 153 | /* |
136 | * Add the fib chain to the free list | 154 | * Add the fib chain to the free list |
@@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
664 | unsigned long nointr = 0; | 682 | unsigned long nointr = 0; |
665 | unsigned long qflags; | 683 | unsigned long qflags; |
666 | 684 | ||
685 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
686 | kfree(hw_fib); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
667 | if (hw_fib->header.XferState == 0) { | 690 | if (hw_fib->header.XferState == 0) { |
668 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 691 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
669 | kfree (hw_fib); | 692 | kfree(hw_fib); |
670 | return 0; | 693 | return 0; |
671 | } | 694 | } |
672 | /* | 695 | /* |
@@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
674 | */ | 697 | */ |
675 | if (hw_fib->header.StructType != FIB_MAGIC) { | 698 | if (hw_fib->header.StructType != FIB_MAGIC) { |
676 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 699 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
677 | kfree (hw_fib); | 700 | kfree(hw_fib); |
678 | return -EINVAL; | 701 | return -EINVAL; |
679 | } | 702 | } |
680 | /* | 703 | /* |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 9c7408fe8c7..f0c66a80ad1 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
228 | return 0; | 229 | return 0; |
229 | } | 230 | } |
230 | 231 | ||
232 | /* | ||
233 | * | ||
234 | * aac_aif_callback | ||
235 | * @context: the context set in the fib - here it is scsi cmd | ||
236 | * @fibptr: pointer to the fib | ||
237 | * | ||
238 | * Handles the AIFs - new method (SRC) | ||
239 | * | ||
240 | */ | ||
241 | |||
242 | static void aac_aif_callback(void *context, struct fib * fibptr) | ||
243 | { | ||
244 | struct fib *fibctx; | ||
245 | struct aac_dev *dev; | ||
246 | struct aac_aifcmd *cmd; | ||
247 | int status; | ||
248 | |||
249 | fibctx = (struct fib *)context; | ||
250 | BUG_ON(fibptr == NULL); | ||
251 | dev = fibptr->dev; | ||
252 | |||
253 | if (fibptr->hw_fib_va->header.XferState & | ||
254 | cpu_to_le32(NoMoreAifDataAvailable)) { | ||
255 | aac_fib_complete(fibptr); | ||
256 | aac_fib_free(fibptr); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); | ||
261 | |||
262 | aac_fib_init(fibctx); | ||
263 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
264 | cmd->command = cpu_to_le32(AifReqEvent); | ||
265 | |||
266 | status = aac_fib_send(AifRequest, | ||
267 | fibctx, | ||
268 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
269 | FsaNormal, | ||
270 | 0, 1, | ||
271 | (fib_callback)aac_aif_callback, fibctx); | ||
272 | } | ||
273 | |||
231 | 274 | ||
232 | /** | 275 | /** |
233 | * aac_intr_normal - Handle command replies | 276 | * aac_intr_normal - Handle command replies |
@@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
238 | * know there is a response on our normal priority queue. We will pull off | 281 | * know there is a response on our normal priority queue. We will pull off |
239 | * all QE there are and wake up all the waiters before exiting. | 282 | * all QE there are and wake up all the waiters before exiting. |
240 | */ | 283 | */ |
241 | 284 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, | |
242 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | 285 | int isAif, int isFastResponse, struct hw_fib *aif_fib) |
243 | { | 286 | { |
244 | unsigned long mflags; | 287 | unsigned long mflags; |
245 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); | 288 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
246 | if ((index & 0x00000002L)) { | 289 | if (isAif == 1) { /* AIF - common */ |
247 | struct hw_fib * hw_fib; | 290 | struct hw_fib * hw_fib; |
248 | struct fib * fib; | 291 | struct fib * fib; |
249 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | 292 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; |
250 | unsigned long flags; | 293 | unsigned long flags; |
251 | 294 | ||
252 | if (index == 0xFFFFFFFEL) /* Special Case */ | ||
253 | return 0; /* Do nothing */ | ||
254 | /* | 295 | /* |
255 | * Allocate a FIB. For non queued stuff we can just use | 296 | * Allocate a FIB. For non queued stuff we can just use |
256 | * the stack so we are happy. We need a fib object in order to | 297 | * the stack so we are happy. We need a fib object in order to |
@@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
263 | kfree (fib); | 304 | kfree (fib); |
264 | return 1; | 305 | return 1; |
265 | } | 306 | } |
266 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | 307 | if (aif_fib != NULL) { |
267 | (index & ~0x00000002L)), sizeof(struct hw_fib)); | 308 | memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); |
309 | } else { | ||
310 | memcpy(hw_fib, | ||
311 | (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | ||
312 | index), sizeof(struct hw_fib)); | ||
313 | } | ||
268 | INIT_LIST_HEAD(&fib->fiblink); | 314 | INIT_LIST_HEAD(&fib->fiblink); |
269 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 315 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
270 | fib->size = sizeof(struct fib); | 316 | fib->size = sizeof(struct fib); |
@@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
277 | wake_up_interruptible(&q->cmdready); | 323 | wake_up_interruptible(&q->cmdready); |
278 | spin_unlock_irqrestore(q->lock, flags); | 324 | spin_unlock_irqrestore(q->lock, flags); |
279 | return 1; | 325 | return 1; |
326 | } else if (isAif == 2) { /* AIF - new (SRC) */ | ||
327 | struct fib *fibctx; | ||
328 | struct aac_aifcmd *cmd; | ||
329 | |||
330 | fibctx = aac_fib_alloc(dev); | ||
331 | if (!fibctx) | ||
332 | return 1; | ||
333 | aac_fib_init(fibctx); | ||
334 | |||
335 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
336 | cmd->command = cpu_to_le32(AifReqEvent); | ||
337 | |||
338 | return aac_fib_send(AifRequest, | ||
339 | fibctx, | ||
340 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
341 | FsaNormal, | ||
342 | 0, 1, | ||
343 | (fib_callback)aac_aif_callback, fibctx); | ||
280 | } else { | 344 | } else { |
281 | int fast = index & 0x01; | 345 | struct fib *fib = &dev->fibs[index]; |
282 | struct fib * fib = &dev->fibs[index >> 2]; | ||
283 | struct hw_fib * hwfib = fib->hw_fib_va; | 346 | struct hw_fib * hwfib = fib->hw_fib_va; |
284 | 347 | ||
285 | /* | 348 | /* |
@@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
298 | return 0; | 361 | return 0; |
299 | } | 362 | } |
300 | 363 | ||
301 | if (fast) { | 364 | if (isFastResponse) { |
302 | /* | 365 | /* |
303 | * Doctor the fib | 366 | * Doctor the fib |
304 | */ | 367 | */ |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 2c93d9496d6..4ff26521d75 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -54,7 +55,7 @@ | |||
54 | 55 | ||
55 | #include "aacraid.h" | 56 | #include "aacraid.h" |
56 | 57 | ||
57 | #define AAC_DRIVER_VERSION "1.1-5" | 58 | #define AAC_DRIVER_VERSION "1.1-7" |
58 | #ifndef AAC_DRIVER_BRANCH | 59 | #ifndef AAC_DRIVER_BRANCH |
59 | #define AAC_DRIVER_BRANCH "" | 60 | #define AAC_DRIVER_BRANCH "" |
60 | #endif | 61 | #endif |
@@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = { | |||
161 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ | 162 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ |
162 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ | 163 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ |
163 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ | 164 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ |
165 | { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ | ||
164 | { 0,} | 166 | { 0,} |
165 | }; | 167 | }; |
166 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); | 168 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); |
@@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = { | |||
235 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ | 237 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ |
236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 238 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 239 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ | 240 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
241 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ | ||
239 | }; | 242 | }; |
240 | 243 | ||
241 | /** | 244 | /** |
@@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) | |||
653 | * This adapter needs a blind reset, only do so for Adapters that | 656 | * This adapter needs a blind reset, only do so for Adapters that |
654 | * support a register, instead of a commanded, reset. | 657 | * support a register, instead of a commanded, reset. |
655 | */ | 658 | */ |
656 | if ((aac->supplement_adapter_info.SupportedOptions2 & | 659 | if (((aac->supplement_adapter_info.SupportedOptions2 & |
657 | AAC_OPTION_MU_RESET) && | 660 | AAC_OPTION_MU_RESET) || |
661 | (aac->supplement_adapter_info.SupportedOptions2 & | ||
662 | AAC_OPTION_DOORBELL_RESET)) && | ||
658 | aac_check_reset && | 663 | aac_check_reset && |
659 | ((aac_check_reset != 1) || | 664 | ((aac_check_reset != 1) || |
660 | !(aac->supplement_adapter_info.SupportedOptions2 & | 665 | !(aac->supplement_adapter_info.SupportedOptions2 & |
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index c55f7c862f0..f397d21a0c0 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * based on the old aacraid driver that is.. | 4 | * based on the old aacraid driver that is.. |
5 | * Adaptec aacraid device driver for Linux. | 5 | * Adaptec aacraid device driver for Linux. |
6 | * | 6 | * |
7 | * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com) | 7 | * Copyright (c) 2000-2010 Adaptec, Inc. |
8 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 16d8db55002..be44de92429 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 84d77fd86e5..ce530f113fd 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) | |||
84 | 85 | ||
85 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) | 86 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) |
86 | { | 87 | { |
88 | int isAif, isFastResponse, isSpecial; | ||
87 | struct aac_dev *dev = dev_id; | 89 | struct aac_dev *dev = dev_id; |
88 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); | 90 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); |
89 | if (unlikely(Index == 0xFFFFFFFFL)) | 91 | if (unlikely(Index == 0xFFFFFFFFL)) |
90 | Index = rx_readl(dev, MUnit.OutboundQueue); | 92 | Index = rx_readl(dev, MUnit.OutboundQueue); |
91 | if (likely(Index != 0xFFFFFFFFL)) { | 93 | if (likely(Index != 0xFFFFFFFFL)) { |
92 | do { | 94 | do { |
93 | if (unlikely(aac_intr_normal(dev, Index))) { | 95 | isAif = isFastResponse = isSpecial = 0; |
94 | rx_writel(dev, MUnit.OutboundQueue, Index); | 96 | if (Index & 0x00000002L) { |
95 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); | 97 | isAif = 1; |
98 | if (Index == 0xFFFFFFFEL) | ||
99 | isSpecial = 1; | ||
100 | Index &= ~0x00000002L; | ||
101 | } else { | ||
102 | if (Index & 0x00000001L) | ||
103 | isFastResponse = 1; | ||
104 | Index >>= 2; | ||
105 | } | ||
106 | if (!isSpecial) { | ||
107 | if (unlikely(aac_intr_normal(dev, | ||
108 | Index, isAif, | ||
109 | isFastResponse, NULL))) { | ||
110 | rx_writel(dev, | ||
111 | MUnit.OutboundQueue, | ||
112 | Index); | ||
113 | rx_writel(dev, | ||
114 | MUnit.ODR, | ||
115 | DoorBellAdapterNormRespReady); | ||
116 | } | ||
96 | } | 117 | } |
97 | Index = rx_readl(dev, MUnit.OutboundQueue); | 118 | Index = rx_readl(dev, MUnit.OutboundQueue); |
98 | } while (Index != 0xFFFFFFFFL); | 119 | } while (Index != 0xFFFFFFFFL); |
@@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev) | |||
631 | name, instance); | 652 | name, instance); |
632 | goto error_iounmap; | 653 | goto error_iounmap; |
633 | } | 654 | } |
655 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
656 | dev->dbg_base_mapped = dev->base; | ||
657 | dev->dbg_size = dev->base_size; | ||
658 | |||
634 | aac_adapter_enable_int(dev); | 659 | aac_adapter_enable_int(dev); |
635 | /* | 660 | /* |
636 | * Tell the adapter that all is configured, and it can | 661 | * Tell the adapter that all is configured, and it can |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 622c21c68e6..e5d4457121e 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev) | |||
391 | name, instance); | 392 | name, instance); |
392 | goto error_iounmap; | 393 | goto error_iounmap; |
393 | } | 394 | } |
395 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
396 | dev->dbg_base_mapped = dev->base; | ||
397 | dev->dbg_size = dev->base_size; | ||
398 | |||
394 | aac_adapter_enable_int(dev); | 399 | aac_adapter_enable_int(dev); |
395 | 400 | ||
396 | /* | 401 | /* |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c new file mode 100644 index 00000000000..c2049466060 --- /dev/null +++ b/drivers/scsi/aacraid/src.c | |||
@@ -0,0 +1,594 @@ | |||
1 | /* | ||
2 | * Adaptec AAC series RAID controller driver | ||
3 | * (c) Copyright 2001 Red Hat Inc. | ||
4 | * | ||
5 | * based on the old aacraid driver that is.. | ||
6 | * Adaptec aacraid device driver for Linux. | ||
7 | * | ||
8 | * Copyright (c) 2000-2010 Adaptec, Inc. | ||
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Module Name: | ||
26 | * src.c | ||
27 | * | ||
28 | * Abstract: Hardware Device Interface for PMC SRC based controllers | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/version.h> | ||
41 | #include <linux/completion.h> | ||
42 | #include <linux/time.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | |||
46 | #include "aacraid.h" | ||
47 | |||
48 | static irqreturn_t aac_src_intr_message(int irq, void *dev_id) | ||
49 | { | ||
50 | struct aac_dev *dev = dev_id; | ||
51 | unsigned long bellbits, bellbits_shifted; | ||
52 | int our_interrupt = 0; | ||
53 | int isFastResponse; | ||
54 | u32 index, handle; | ||
55 | |||
56 | bellbits = src_readl(dev, MUnit.ODR_R); | ||
57 | if (bellbits & PmDoorBellResponseSent) { | ||
58 | bellbits = PmDoorBellResponseSent; | ||
59 | /* handle async. status */ | ||
60 | our_interrupt = 1; | ||
61 | index = dev->host_rrq_idx; | ||
62 | if (dev->host_rrq[index] == 0) { | ||
63 | u32 old_index = index; | ||
64 | /* adjust index */ | ||
65 | do { | ||
66 | index++; | ||
67 | if (index == dev->scsi_host_ptr->can_queue + | ||
68 | AAC_NUM_MGT_FIB) | ||
69 | index = 0; | ||
70 | if (dev->host_rrq[index] != 0) | ||
71 | break; | ||
72 | } while (index != old_index); | ||
73 | dev->host_rrq_idx = index; | ||
74 | } | ||
75 | for (;;) { | ||
76 | isFastResponse = 0; | ||
77 | /* remove toggle bit (31) */ | ||
78 | handle = (dev->host_rrq[index] & 0x7fffffff); | ||
79 | /* check fast response bit (30) */ | ||
80 | if (handle & 0x40000000) | ||
81 | isFastResponse = 1; | ||
82 | handle &= 0x0000ffff; | ||
83 | if (handle == 0) | ||
84 | break; | ||
85 | |||
86 | aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); | ||
87 | |||
88 | dev->host_rrq[index++] = 0; | ||
89 | if (index == dev->scsi_host_ptr->can_queue + | ||
90 | AAC_NUM_MGT_FIB) | ||
91 | index = 0; | ||
92 | dev->host_rrq_idx = index; | ||
93 | } | ||
94 | } else { | ||
95 | bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); | ||
96 | if (bellbits_shifted & DoorBellAifPending) { | ||
97 | our_interrupt = 1; | ||
98 | /* handle AIF */ | ||
99 | aac_intr_normal(dev, 0, 2, 0, NULL); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | if (our_interrupt) { | ||
104 | src_writel(dev, MUnit.ODR_C, bellbits); | ||
105 | return IRQ_HANDLED; | ||
106 | } | ||
107 | return IRQ_NONE; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * aac_src_disable_interrupt - Disable interrupts | ||
112 | * @dev: Adapter | ||
113 | */ | ||
114 | |||
115 | static void aac_src_disable_interrupt(struct aac_dev *dev) | ||
116 | { | ||
117 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * aac_src_enable_interrupt_message - Enable interrupts | ||
122 | * @dev: Adapter | ||
123 | */ | ||
124 | |||
125 | static void aac_src_enable_interrupt_message(struct aac_dev *dev) | ||
126 | { | ||
127 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * src_sync_cmd - send a command and wait | ||
132 | * @dev: Adapter | ||
133 | * @command: Command to execute | ||
134 | * @p1: first parameter | ||
135 | * @ret: adapter status | ||
136 | * | ||
137 | * This routine will send a synchronous command to the adapter and wait | ||
138 | * for its completion. | ||
139 | */ | ||
140 | |||
141 | static int src_sync_cmd(struct aac_dev *dev, u32 command, | ||
142 | u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, | ||
143 | u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) | ||
144 | { | ||
145 | unsigned long start; | ||
146 | int ok; | ||
147 | |||
148 | /* | ||
149 | * Write the command into Mailbox 0 | ||
150 | */ | ||
151 | writel(command, &dev->IndexRegs->Mailbox[0]); | ||
152 | /* | ||
153 | * Write the parameters into Mailboxes 1 - 6 | ||
154 | */ | ||
155 | writel(p1, &dev->IndexRegs->Mailbox[1]); | ||
156 | writel(p2, &dev->IndexRegs->Mailbox[2]); | ||
157 | writel(p3, &dev->IndexRegs->Mailbox[3]); | ||
158 | writel(p4, &dev->IndexRegs->Mailbox[4]); | ||
159 | |||
160 | /* | ||
161 | * Clear the synch command doorbell to start on a clean slate. | ||
162 | */ | ||
163 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
164 | |||
165 | /* | ||
166 | * Disable doorbell interrupts | ||
167 | */ | ||
168 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
169 | |||
170 | /* | ||
171 | * Force the completion of the mask register write before issuing | ||
172 | * the interrupt. | ||
173 | */ | ||
174 | src_readl(dev, MUnit.OIMR); | ||
175 | |||
176 | /* | ||
177 | * Signal that there is a new synch command | ||
178 | */ | ||
179 | src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); | ||
180 | |||
181 | ok = 0; | ||
182 | start = jiffies; | ||
183 | |||
184 | /* | ||
185 | * Wait up to 30 seconds | ||
186 | */ | ||
187 | while (time_before(jiffies, start+30*HZ)) { | ||
188 | /* Delay 5 microseconds to let Mon960 get info. */ | ||
189 | udelay(5); | ||
190 | |||
191 | /* Mon960 will set doorbell0 bit | ||
192 | * when it has completed the command | ||
193 | */ | ||
194 | if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { | ||
195 | /* Clear the doorbell */ | ||
196 | src_writel(dev, | ||
197 | MUnit.ODR_C, | ||
198 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
199 | ok = 1; | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | /* Yield the processor in case we are slow */ | ||
204 | msleep(1); | ||
205 | } | ||
206 | if (unlikely(ok != 1)) { | ||
207 | /* Restore interrupt mask even though we timed out */ | ||
208 | aac_adapter_enable_int(dev); | ||
209 | return -ETIMEDOUT; | ||
210 | } | ||
211 | |||
212 | /* Pull the synch status from Mailbox 0 */ | ||
213 | if (status) | ||
214 | *status = readl(&dev->IndexRegs->Mailbox[0]); | ||
215 | if (r1) | ||
216 | *r1 = readl(&dev->IndexRegs->Mailbox[1]); | ||
217 | if (r2) | ||
218 | *r2 = readl(&dev->IndexRegs->Mailbox[2]); | ||
219 | if (r3) | ||
220 | *r3 = readl(&dev->IndexRegs->Mailbox[3]); | ||
221 | if (r4) | ||
222 | *r4 = readl(&dev->IndexRegs->Mailbox[4]); | ||
223 | |||
224 | /* Clear the synch command doorbell */ | ||
225 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
226 | |||
227 | /* Restore interrupt mask */ | ||
228 | aac_adapter_enable_int(dev); | ||
229 | return 0; | ||
230 | |||
231 | } | ||
232 | |||
233 | /** | ||
234 | * aac_src_interrupt_adapter - interrupt adapter | ||
235 | * @dev: Adapter | ||
236 | * | ||
237 | * Send an interrupt to the i960 and breakpoint it. | ||
238 | */ | ||
239 | |||
240 | static void aac_src_interrupt_adapter(struct aac_dev *dev) | ||
241 | { | ||
242 | src_sync_cmd(dev, BREAKPOINT_REQUEST, | ||
243 | 0, 0, 0, 0, 0, 0, | ||
244 | NULL, NULL, NULL, NULL, NULL); | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * aac_src_notify_adapter - send an event to the adapter | ||
249 | * @dev: Adapter | ||
250 | * @event: Event to send | ||
251 | * | ||
252 | * Notify the i960 that something it probably cares about has | ||
253 | * happened. | ||
254 | */ | ||
255 | |||
256 | static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) | ||
257 | { | ||
258 | switch (event) { | ||
259 | |||
260 | case AdapNormCmdQue: | ||
261 | src_writel(dev, MUnit.ODR_C, | ||
262 | INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); | ||
263 | break; | ||
264 | case HostNormRespNotFull: | ||
265 | src_writel(dev, MUnit.ODR_C, | ||
266 | INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); | ||
267 | break; | ||
268 | case AdapNormRespQue: | ||
269 | src_writel(dev, MUnit.ODR_C, | ||
270 | INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); | ||
271 | break; | ||
272 | case HostNormCmdNotFull: | ||
273 | src_writel(dev, MUnit.ODR_C, | ||
274 | INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); | ||
275 | break; | ||
276 | case FastIo: | ||
277 | src_writel(dev, MUnit.ODR_C, | ||
278 | INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); | ||
279 | break; | ||
280 | case AdapPrintfDone: | ||
281 | src_writel(dev, MUnit.ODR_C, | ||
282 | INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); | ||
283 | break; | ||
284 | default: | ||
285 | BUG(); | ||
286 | break; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * aac_src_start_adapter - activate adapter | ||
292 | * @dev: Adapter | ||
293 | * | ||
294 | * Start up processing on an i960 based AAC adapter | ||
295 | */ | ||
296 | |||
297 | static void aac_src_start_adapter(struct aac_dev *dev) | ||
298 | { | ||
299 | struct aac_init *init; | ||
300 | |||
301 | init = dev->init; | ||
302 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | ||
303 | |||
304 | /* We can only use a 32 bit address here */ | ||
305 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | ||
306 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * aac_src_check_health | ||
311 | * @dev: device to check if healthy | ||
312 | * | ||
313 | * Will attempt to determine if the specified adapter is alive and | ||
314 | * capable of handling requests, returning 0 if alive. | ||
315 | */ | ||
316 | static int aac_src_check_health(struct aac_dev *dev) | ||
317 | { | ||
318 | u32 status = src_readl(dev, MUnit.OMR); | ||
319 | |||
320 | /* | ||
321 | * Check to see if the board failed any self tests. | ||
322 | */ | ||
323 | if (unlikely(status & SELF_TEST_FAILED)) | ||
324 | return -1; | ||
325 | |||
326 | /* | ||
327 | * Check to see if the board panic'd. | ||
328 | */ | ||
329 | if (unlikely(status & KERNEL_PANIC)) | ||
330 | return (status >> 16) & 0xFF; | ||
331 | /* | ||
332 | * Wait for the adapter to be up and running. | ||
333 | */ | ||
334 | if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) | ||
335 | return -3; | ||
336 | /* | ||
337 | * Everything is OK | ||
338 | */ | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * aac_src_deliver_message | ||
344 | * @fib: fib to issue | ||
345 | * | ||
346 | * Will send a fib, returning 0 if successful. | ||
347 | */ | ||
348 | static int aac_src_deliver_message(struct fib *fib) | ||
349 | { | ||
350 | struct aac_dev *dev = fib->dev; | ||
351 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | ||
352 | unsigned long qflags; | ||
353 | u32 fibsize; | ||
354 | u64 address; | ||
355 | struct aac_fib_xporthdr *pFibX; | ||
356 | |||
357 | spin_lock_irqsave(q->lock, qflags); | ||
358 | q->numpending++; | ||
359 | spin_unlock_irqrestore(q->lock, qflags); | ||
360 | |||
361 | /* Calculate the amount to the fibsize bits */ | ||
362 | fibsize = (sizeof(struct aac_fib_xporthdr) + | ||
363 | fib->hw_fib_va->header.Size + 127) / 128 - 1; | ||
364 | if (fibsize > (ALIGN32 - 1)) | ||
365 | fibsize = ALIGN32 - 1; | ||
366 | |||
367 | /* Fill XPORT header */ | ||
368 | pFibX = (struct aac_fib_xporthdr *) | ||
369 | ((unsigned char *)fib->hw_fib_va - | ||
370 | sizeof(struct aac_fib_xporthdr)); | ||
371 | pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; | ||
372 | pFibX->HostAddress = fib->hw_fib_pa; | ||
373 | pFibX->Size = fib->hw_fib_va->header.Size; | ||
374 | address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); | ||
375 | |||
376 | src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); | ||
377 | src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * aac_src_ioremap | ||
383 | * @size: mapping resize request | ||
384 | * | ||
385 | */ | ||
386 | static int aac_src_ioremap(struct aac_dev *dev, u32 size) | ||
387 | { | ||
388 | if (!size) { | ||
389 | iounmap(dev->regs.src.bar0); | ||
390 | dev->regs.src.bar0 = NULL; | ||
391 | iounmap(dev->base); | ||
392 | dev->base = NULL; | ||
393 | return 0; | ||
394 | } | ||
395 | dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), | ||
396 | AAC_MIN_SRC_BAR1_SIZE); | ||
397 | dev->base = NULL; | ||
398 | if (dev->regs.src.bar1 == NULL) | ||
399 | return -1; | ||
400 | dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, | ||
401 | size); | ||
402 | if (dev->base == NULL) { | ||
403 | iounmap(dev->regs.src.bar1); | ||
404 | dev->regs.src.bar1 = NULL; | ||
405 | return -1; | ||
406 | } | ||
407 | dev->IndexRegs = &((struct src_registers __iomem *) | ||
408 | dev->base)->IndexRegs; | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int aac_src_restart_adapter(struct aac_dev *dev, int bled) | ||
413 | { | ||
414 | u32 var, reset_mask; | ||
415 | |||
416 | if (bled >= 0) { | ||
417 | if (bled) | ||
418 | printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", | ||
419 | dev->name, dev->id, bled); | ||
420 | bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, | ||
421 | 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); | ||
422 | if (bled || (var != 0x00000001)) | ||
423 | bled = -EINVAL; | ||
424 | if (dev->supplement_adapter_info.SupportedOptions2 & | ||
425 | AAC_OPTION_DOORBELL_RESET) { | ||
426 | src_writel(dev, MUnit.IDR, reset_mask); | ||
427 | msleep(5000); /* Delay 5 seconds */ | ||
428 | } | ||
429 | } | ||
430 | |||
431 | if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) | ||
432 | return -ENODEV; | ||
433 | |||
434 | if (startup_timeout < 300) | ||
435 | startup_timeout = 300; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * aac_src_select_comm - Select communications method | ||
442 | * @dev: Adapter | ||
443 | * @comm: communications method | ||
444 | */ | ||
445 | int aac_src_select_comm(struct aac_dev *dev, int comm) | ||
446 | { | ||
447 | switch (comm) { | ||
448 | case AAC_COMM_MESSAGE: | ||
449 | dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; | ||
450 | dev->a_ops.adapter_intr = aac_src_intr_message; | ||
451 | dev->a_ops.adapter_deliver = aac_src_deliver_message; | ||
452 | break; | ||
453 | default: | ||
454 | return 1; | ||
455 | } | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * aac_src_init - initialize an Cardinal Frey Bar card | ||
461 | * @dev: device to configure | ||
462 | * | ||
463 | */ | ||
464 | |||
465 | int aac_src_init(struct aac_dev *dev) | ||
466 | { | ||
467 | unsigned long start; | ||
468 | unsigned long status; | ||
469 | int restart = 0; | ||
470 | int instance = dev->id; | ||
471 | const char *name = dev->name; | ||
472 | |||
473 | dev->a_ops.adapter_ioremap = aac_src_ioremap; | ||
474 | dev->a_ops.adapter_comm = aac_src_select_comm; | ||
475 | |||
476 | dev->base_size = AAC_MIN_SRC_BAR0_SIZE; | ||
477 | if (aac_adapter_ioremap(dev, dev->base_size)) { | ||
478 | printk(KERN_WARNING "%s: unable to map adapter.\n", name); | ||
479 | goto error_iounmap; | ||
480 | } | ||
481 | |||
482 | /* Failure to reset here is an option ... */ | ||
483 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
484 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; | ||
485 | if ((aac_reset_devices || reset_devices) && | ||
486 | !aac_src_restart_adapter(dev, 0)) | ||
487 | ++restart; | ||
488 | /* | ||
489 | * Check to see if the board panic'd while booting. | ||
490 | */ | ||
491 | status = src_readl(dev, MUnit.OMR); | ||
492 | if (status & KERNEL_PANIC) { | ||
493 | if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) | ||
494 | goto error_iounmap; | ||
495 | ++restart; | ||
496 | } | ||
497 | /* | ||
498 | * Check to see if the board failed any self tests. | ||
499 | */ | ||
500 | status = src_readl(dev, MUnit.OMR); | ||
501 | if (status & SELF_TEST_FAILED) { | ||
502 | printk(KERN_ERR "%s%d: adapter self-test failed.\n", | ||
503 | dev->name, instance); | ||
504 | goto error_iounmap; | ||
505 | } | ||
506 | /* | ||
507 | * Check to see if the monitor panic'd while booting. | ||
508 | */ | ||
509 | if (status & MONITOR_PANIC) { | ||
510 | printk(KERN_ERR "%s%d: adapter monitor panic.\n", | ||
511 | dev->name, instance); | ||
512 | goto error_iounmap; | ||
513 | } | ||
514 | start = jiffies; | ||
515 | /* | ||
516 | * Wait for the adapter to be up and running. Wait up to 3 minutes | ||
517 | */ | ||
518 | while (!((status = src_readl(dev, MUnit.OMR)) & | ||
519 | KERNEL_UP_AND_RUNNING)) { | ||
520 | if ((restart && | ||
521 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || | ||
522 | time_after(jiffies, start+HZ*startup_timeout)) { | ||
523 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", | ||
524 | dev->name, instance, status); | ||
525 | goto error_iounmap; | ||
526 | } | ||
527 | if (!restart && | ||
528 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || | ||
529 | time_after(jiffies, start + HZ * | ||
530 | ((startup_timeout > 60) | ||
531 | ? (startup_timeout - 60) | ||
532 | : (startup_timeout / 2))))) { | ||
533 | if (likely(!aac_src_restart_adapter(dev, | ||
534 | aac_src_check_health(dev)))) | ||
535 | start = jiffies; | ||
536 | ++restart; | ||
537 | } | ||
538 | msleep(1); | ||
539 | } | ||
540 | if (restart && aac_commit) | ||
541 | aac_commit = 1; | ||
542 | /* | ||
543 | * Fill in the common function dispatch table. | ||
544 | */ | ||
545 | dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; | ||
546 | dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; | ||
547 | dev->a_ops.adapter_notify = aac_src_notify_adapter; | ||
548 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
549 | dev->a_ops.adapter_check_health = aac_src_check_health; | ||
550 | dev->a_ops.adapter_restart = aac_src_restart_adapter; | ||
551 | |||
552 | /* | ||
553 | * First clear out all interrupts. Then enable the one's that we | ||
554 | * can handle. | ||
555 | */ | ||
556 | aac_adapter_comm(dev, AAC_COMM_MESSAGE); | ||
557 | aac_adapter_disable_int(dev); | ||
558 | src_writel(dev, MUnit.ODR_C, 0xffffffff); | ||
559 | aac_adapter_enable_int(dev); | ||
560 | |||
561 | if (aac_init_adapter(dev) == NULL) | ||
562 | goto error_iounmap; | ||
563 | if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) | ||
564 | goto error_iounmap; | ||
565 | |||
566 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); | ||
567 | |||
568 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, | ||
569 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | ||
570 | |||
571 | if (dev->msi) | ||
572 | pci_disable_msi(dev->pdev); | ||
573 | |||
574 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | ||
575 | name, instance); | ||
576 | goto error_iounmap; | ||
577 | } | ||
578 | dev->dbg_base = pci_resource_start(dev->pdev, 2); | ||
579 | dev->dbg_base_mapped = dev->regs.src.bar1; | ||
580 | dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; | ||
581 | |||
582 | aac_adapter_enable_int(dev); | ||
583 | /* | ||
584 | * Tell the adapter that all is configured, and it can | ||
585 | * start accepting requests | ||
586 | */ | ||
587 | aac_src_start_adapter(dev); | ||
588 | |||
589 | return 0; | ||
590 | |||
591 | error_iounmap: | ||
592 | |||
593 | return -1; | ||
594 | } | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index df2fc09ba47..b6d350ac428 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "bnx2fc_constants.h" | 62 | #include "bnx2fc_constants.h" |
63 | 63 | ||
64 | #define BNX2FC_NAME "bnx2fc" | 64 | #define BNX2FC_NAME "bnx2fc" |
65 | #define BNX2FC_VERSION "1.0.0" | 65 | #define BNX2FC_VERSION "1.0.1" |
66 | 66 | ||
67 | #define PFX "bnx2fc: " | 67 | #define PFX "bnx2fc: " |
68 | 68 | ||
@@ -84,9 +84,15 @@ | |||
84 | #define BNX2FC_NUM_MAX_SESS 128 | 84 | #define BNX2FC_NUM_MAX_SESS 128 |
85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) | 85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) |
86 | 86 | ||
87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 | 87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 |
88 | #define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS | ||
89 | #define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE | ||
88 | #define BNX2FC_MIN_PAYLOAD 256 | 90 | #define BNX2FC_MIN_PAYLOAD 256 |
89 | #define BNX2FC_MAX_PAYLOAD 2048 | 91 | #define BNX2FC_MAX_PAYLOAD 2048 |
92 | #define BNX2FC_MFS \ | ||
93 | (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) | ||
94 | #define BNX2FC_MINI_JUMBO_MTU 2500 | ||
95 | |||
90 | 96 | ||
91 | #define BNX2FC_RQ_BUF_SZ 256 | 97 | #define BNX2FC_RQ_BUF_SZ 256 |
92 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) | 98 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) |
@@ -98,7 +104,8 @@ | |||
98 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) | 104 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) |
99 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 | 105 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 |
100 | 106 | ||
101 | #define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS | 107 | #define BNX2FC_MAX_TASKS \ |
108 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) | ||
102 | #define BNX2FC_TASK_SIZE 128 | 109 | #define BNX2FC_TASK_SIZE 128 |
103 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) | 110 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) |
104 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) | 111 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) |
@@ -112,10 +119,10 @@ | |||
112 | #define BNX2FC_WRITE (1 << 0) | 119 | #define BNX2FC_WRITE (1 << 0) |
113 | 120 | ||
114 | #define BNX2FC_MIN_XID 0 | 121 | #define BNX2FC_MIN_XID 0 |
115 | #define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) | 122 | #define BNX2FC_MAX_XID \ |
116 | #define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) | 123 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) |
117 | #define FCOE_MAX_XID \ | 124 | #define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) |
118 | (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) | 125 | #define FCOE_MAX_XID (FCOE_MIN_XID + 4095) |
119 | #define BNX2FC_MAX_LUN 0xFFFF | 126 | #define BNX2FC_MAX_LUN 0xFFFF |
120 | #define BNX2FC_MAX_FCP_TGT 256 | 127 | #define BNX2FC_MAX_FCP_TGT 256 |
121 | #define BNX2FC_MAX_CMD_LEN 16 | 128 | #define BNX2FC_MAX_CMD_LEN 16 |
@@ -125,7 +132,6 @@ | |||
125 | 132 | ||
126 | #define BNX2FC_WAIT_CNT 120 | 133 | #define BNX2FC_WAIT_CNT 120 |
127 | #define BNX2FC_FW_TIMEOUT (3 * HZ) | 134 | #define BNX2FC_FW_TIMEOUT (3 * HZ) |
128 | |||
129 | #define PORT_MAX 2 | 135 | #define PORT_MAX 2 |
130 | 136 | ||
131 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) | 137 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index e476e875307..e2e647509a7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
21 | 21 | ||
22 | #define DRV_MODULE_NAME "bnx2fc" | 22 | #define DRV_MODULE_NAME "bnx2fc" |
23 | #define DRV_MODULE_VERSION BNX2FC_VERSION | 23 | #define DRV_MODULE_VERSION BNX2FC_VERSION |
24 | #define DRV_MODULE_RELDATE "Jan 25, 2011" | 24 | #define DRV_MODULE_RELDATE "Mar 17, 2011" |
25 | 25 | ||
26 | 26 | ||
27 | static char version[] __devinitdata = | 27 | static char version[] __devinitdata = |
@@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg) | |||
437 | set_current_state(TASK_INTERRUPTIBLE); | 437 | set_current_state(TASK_INTERRUPTIBLE); |
438 | while (!kthread_should_stop()) { | 438 | while (!kthread_should_stop()) { |
439 | schedule(); | 439 | schedule(); |
440 | set_current_state(TASK_RUNNING); | ||
441 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 440 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
442 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { | 441 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { |
443 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 442 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
444 | bnx2fc_recv_frame(skb); | 443 | bnx2fc_recv_frame(skb); |
445 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 444 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
446 | } | 445 | } |
446 | __set_current_state(TASK_INTERRUPTIBLE); | ||
447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
448 | set_current_state(TASK_INTERRUPTIBLE); | ||
449 | } | 448 | } |
450 | set_current_state(TASK_RUNNING); | 449 | __set_current_state(TASK_RUNNING); |
451 | return 0; | 450 | return 0; |
452 | } | 451 | } |
453 | 452 | ||
@@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
569 | set_current_state(TASK_INTERRUPTIBLE); | 568 | set_current_state(TASK_INTERRUPTIBLE); |
570 | while (!kthread_should_stop()) { | 569 | while (!kthread_should_stop()) { |
571 | schedule(); | 570 | schedule(); |
572 | set_current_state(TASK_RUNNING); | ||
573 | spin_lock_bh(&p->fp_work_lock); | 571 | spin_lock_bh(&p->fp_work_lock); |
574 | while (!list_empty(&p->work_list)) { | 572 | while (!list_empty(&p->work_list)) { |
575 | list_splice_init(&p->work_list, &work_list); | 573 | list_splice_init(&p->work_list, &work_list); |
@@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
583 | 581 | ||
584 | spin_lock_bh(&p->fp_work_lock); | 582 | spin_lock_bh(&p->fp_work_lock); |
585 | } | 583 | } |
584 | __set_current_state(TASK_INTERRUPTIBLE); | ||
586 | spin_unlock_bh(&p->fp_work_lock); | 585 | spin_unlock_bh(&p->fp_work_lock); |
587 | set_current_state(TASK_INTERRUPTIBLE); | ||
588 | } | 586 | } |
589 | set_current_state(TASK_RUNNING); | 587 | __set_current_state(TASK_RUNNING); |
590 | 588 | ||
591 | return 0; | 589 | return 0; |
592 | } | 590 | } |
@@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
661 | return 0; | 659 | return 0; |
662 | } | 660 | } |
663 | 661 | ||
664 | static int bnx2fc_mfs_update(struct fc_lport *lport) | ||
665 | { | ||
666 | struct fcoe_port *port = lport_priv(lport); | ||
667 | struct bnx2fc_hba *hba = port->priv; | ||
668 | struct net_device *netdev = hba->netdev; | ||
669 | u32 mfs; | ||
670 | u32 max_mfs; | ||
671 | |||
672 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + | ||
673 | sizeof(struct fcoe_crc_eof)); | ||
674 | max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); | ||
675 | BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); | ||
676 | if (mfs > max_mfs) | ||
677 | mfs = max_mfs; | ||
678 | |||
679 | /* Adjust mfs to be a multiple of 256 bytes */ | ||
680 | mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * | ||
681 | BNX2FC_MIN_PAYLOAD); | ||
682 | mfs = mfs + sizeof(struct fc_frame_header); | ||
683 | |||
684 | BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); | ||
685 | if (fc_set_mfs(lport, mfs)) | ||
686 | return -EINVAL; | ||
687 | return 0; | ||
688 | } | ||
689 | static void bnx2fc_link_speed_update(struct fc_lport *lport) | 662 | static void bnx2fc_link_speed_update(struct fc_lport *lport) |
690 | { | 663 | { |
691 | struct fcoe_port *port = lport_priv(lport); | 664 | struct fcoe_port *port = lport_priv(lport); |
@@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport) | |||
754 | !hba->phys_dev->ethtool_ops->get_pauseparam) | 727 | !hba->phys_dev->ethtool_ops->get_pauseparam) |
755 | return -EOPNOTSUPP; | 728 | return -EOPNOTSUPP; |
756 | 729 | ||
757 | if (bnx2fc_mfs_update(lport)) | 730 | if (fc_set_mfs(lport, BNX2FC_MFS)) |
758 | return -EINVAL; | 731 | return -EINVAL; |
759 | 732 | ||
760 | skb_queue_head_init(&port->fcoe_pending_queue); | 733 | skb_queue_head_init(&port->fcoe_pending_queue); |
@@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) | |||
825 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) | 798 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) |
826 | printk(KERN_ERR "indicate_netevent: "\ | 799 | printk(KERN_ERR "indicate_netevent: "\ |
827 | "adapter is not UP!!\n"); | 800 | "adapter is not UP!!\n"); |
828 | /* fall thru to update mfs if MTU has changed */ | ||
829 | case NETDEV_CHANGEMTU: | ||
830 | BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); | ||
831 | bnx2fc_mfs_update(lport); | ||
832 | mutex_lock(&lport->lp_mutex); | ||
833 | list_for_each_entry(vport, &lport->vports, list) | ||
834 | bnx2fc_mfs_update(vport); | ||
835 | mutex_unlock(&lport->lp_mutex); | ||
836 | break; | 801 | break; |
837 | 802 | ||
838 | case NETDEV_DOWN: | 803 | case NETDEV_DOWN: |
@@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) | |||
1095 | struct netdev_hw_addr *ha; | 1060 | struct netdev_hw_addr *ha; |
1096 | int sel_san_mac = 0; | 1061 | int sel_san_mac = 0; |
1097 | 1062 | ||
1098 | /* Do not support for bonding device */ | ||
1099 | if ((netdev->priv_flags & IFF_MASTER_ALB) || | ||
1100 | (netdev->priv_flags & IFF_SLAVE_INACTIVE) || | ||
1101 | (netdev->priv_flags & IFF_MASTER_8023AD)) { | ||
1102 | return -EOPNOTSUPP; | ||
1103 | } | ||
1104 | |||
1105 | /* setup Source MAC Address */ | 1063 | /* setup Source MAC Address */ |
1106 | rcu_read_lock(); | 1064 | rcu_read_lock(); |
1107 | for_each_dev_addr(physdev, ha) { | 1065 | for_each_dev_addr(physdev, ha) { |
@@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev) | |||
1432 | struct net_device *phys_dev; | 1390 | struct net_device *phys_dev; |
1433 | int rc = 0; | 1391 | int rc = 0; |
1434 | 1392 | ||
1435 | if (!rtnl_trylock()) | 1393 | rtnl_lock(); |
1436 | return restart_syscall(); | ||
1437 | 1394 | ||
1438 | mutex_lock(&bnx2fc_dev_lock); | 1395 | mutex_lock(&bnx2fc_dev_lock); |
1439 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | ||
1440 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1441 | rc = -ENODEV; | ||
1442 | goto netdev_err; | ||
1443 | } | ||
1444 | #endif | ||
1445 | /* obtain physical netdev */ | 1396 | /* obtain physical netdev */ |
1446 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1397 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1447 | phys_dev = vlan_dev_real_dev(netdev); | 1398 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev) | |||
1805 | struct ethtool_drvinfo drvinfo; | 1756 | struct ethtool_drvinfo drvinfo; |
1806 | int rc = 0; | 1757 | int rc = 0; |
1807 | 1758 | ||
1808 | if (!rtnl_trylock()) { | 1759 | rtnl_lock(); |
1809 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1810 | return -EIO; | ||
1811 | } | ||
1812 | 1760 | ||
1813 | mutex_lock(&bnx2fc_dev_lock); | 1761 | mutex_lock(&bnx2fc_dev_lock); |
1814 | 1762 | ||
1815 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1816 | rc = -ENODEV; | ||
1817 | goto nodev; | ||
1818 | } | ||
1819 | |||
1820 | /* obtain physical netdev */ | 1763 | /* obtain physical netdev */ |
1821 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1764 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1822 | phys_dev = vlan_dev_real_dev(netdev); | 1765 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev) | |||
1867 | struct ethtool_drvinfo drvinfo; | 1810 | struct ethtool_drvinfo drvinfo; |
1868 | int rc = 0; | 1811 | int rc = 0; |
1869 | 1812 | ||
1870 | if (!rtnl_trylock()) { | 1813 | rtnl_lock(); |
1871 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1872 | return -EIO; | ||
1873 | } | ||
1874 | 1814 | ||
1875 | BNX2FC_MISC_DBG("Entered %s\n", __func__); | 1815 | BNX2FC_MISC_DBG("Entered %s\n", __func__); |
1876 | mutex_lock(&bnx2fc_dev_lock); | 1816 | mutex_lock(&bnx2fc_dev_lock); |
1877 | 1817 | ||
1878 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1879 | rc = -ENODEV; | ||
1880 | goto nodev; | ||
1881 | } | ||
1882 | |||
1883 | /* obtain physical netdev */ | 1818 | /* obtain physical netdev */ |
1884 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1819 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1885 | phys_dev = vlan_dev_real_dev(netdev); | 1820 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | |||
1942 | return -EIO; | 1877 | return -EIO; |
1943 | } | 1878 | } |
1944 | 1879 | ||
1945 | if (!rtnl_trylock()) { | 1880 | rtnl_lock(); |
1946 | printk(KERN_ERR "trying for rtnl_lock\n"); | ||
1947 | return -EIO; | ||
1948 | } | ||
1949 | mutex_lock(&bnx2fc_dev_lock); | ||
1950 | 1881 | ||
1951 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | 1882 | mutex_lock(&bnx2fc_dev_lock); |
1952 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1953 | rc = -ENODEV; | ||
1954 | goto mod_err; | ||
1955 | } | ||
1956 | #endif | ||
1957 | 1883 | ||
1958 | if (!try_module_get(THIS_MODULE)) { | 1884 | if (!try_module_get(THIS_MODULE)) { |
1959 | rc = -EINVAL; | 1885 | rc = -EINVAL; |
@@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = { | |||
2506 | .change_queue_type = fc_change_queue_type, | 2432 | .change_queue_type = fc_change_queue_type, |
2507 | .this_id = -1, | 2433 | .this_id = -1, |
2508 | .cmd_per_lun = 3, | 2434 | .cmd_per_lun = 3, |
2509 | .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), | 2435 | .can_queue = BNX2FC_CAN_QUEUE, |
2510 | .use_clustering = ENABLE_CLUSTERING, | 2436 | .use_clustering = ENABLE_CLUSTERING, |
2511 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, | 2437 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, |
2512 | .max_sectors = 512, | 2438 | .max_sectors = 512, |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 4f409683674..1b680e288c5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |||
87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; | 87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; |
88 | fcoe_init1.task_list_pbl_addr_hi = | 88 | fcoe_init1.task_list_pbl_addr_hi = |
89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); | 89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); |
90 | fcoe_init1.mtu = hba->netdev->mtu; | 90 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
91 | 91 | ||
92 | fcoe_init1.flags = (PAGE_SHIFT << | 92 | fcoe_init1.flags = (PAGE_SHIFT << |
93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); | 93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); |
@@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
590 | 590 | ||
591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; | 591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; |
592 | 592 | ||
593 | spin_lock_bh(&tgt->tgt_lock); | ||
593 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); | 594 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); |
595 | spin_unlock_bh(&tgt->tgt_lock); | ||
596 | |||
594 | if (rq_data) { | 597 | if (rq_data) { |
595 | buf = rq_data; | 598 | buf = rq_data; |
596 | } else { | 599 | } else { |
@@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
603 | } | 606 | } |
604 | 607 | ||
605 | for (i = 0; i < num_rq; i++) { | 608 | for (i = 0; i < num_rq; i++) { |
609 | spin_lock_bh(&tgt->tgt_lock); | ||
606 | rq_data = (unsigned char *) | 610 | rq_data = (unsigned char *) |
607 | bnx2fc_get_next_rqe(tgt, 1); | 611 | bnx2fc_get_next_rqe(tgt, 1); |
612 | spin_unlock_bh(&tgt->tgt_lock); | ||
608 | len = BNX2FC_RQ_BUF_SZ; | 613 | len = BNX2FC_RQ_BUF_SZ; |
609 | memcpy(buf1, rq_data, len); | 614 | memcpy(buf1, rq_data, len); |
610 | buf1 += len; | 615 | buf1 += len; |
@@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
615 | 620 | ||
616 | if (buf != rq_data) | 621 | if (buf != rq_data) |
617 | kfree(buf); | 622 | kfree(buf); |
623 | spin_lock_bh(&tgt->tgt_lock); | ||
618 | bnx2fc_return_rqe(tgt, num_rq); | 624 | bnx2fc_return_rqe(tgt, num_rq); |
625 | spin_unlock_bh(&tgt->tgt_lock); | ||
619 | break; | 626 | break; |
620 | 627 | ||
621 | case FCOE_ERROR_DETECTION_CQE_TYPE: | 628 | case FCOE_ERROR_DETECTION_CQE_TYPE: |
622 | /* | 629 | /* |
623 | *In case of error reporting CQE a single RQ entry | 630 | * In case of error reporting CQE a single RQ entry |
624 | * is consumes. | 631 | * is consumed. |
625 | */ | 632 | */ |
626 | spin_lock_bh(&tgt->tgt_lock); | 633 | spin_lock_bh(&tgt->tgt_lock); |
627 | num_rq = 1; | 634 | num_rq = 1; |
@@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
705 | *In case of warning reporting CQE a single RQ entry | 712 | *In case of warning reporting CQE a single RQ entry |
706 | * is consumes. | 713 | * is consumes. |
707 | */ | 714 | */ |
715 | spin_lock_bh(&tgt->tgt_lock); | ||
708 | num_rq = 1; | 716 | num_rq = 1; |
709 | err_entry = (struct fcoe_err_report_entry *) | 717 | err_entry = (struct fcoe_err_report_entry *) |
710 | bnx2fc_get_next_rqe(tgt, 1); | 718 | bnx2fc_get_next_rqe(tgt, 1); |
@@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
717 | err_entry->tx_buf_off, err_entry->rx_buf_off); | 725 | err_entry->tx_buf_off, err_entry->rx_buf_off); |
718 | 726 | ||
719 | bnx2fc_return_rqe(tgt, 1); | 727 | bnx2fc_return_rqe(tgt, 1); |
728 | spin_unlock_bh(&tgt->tgt_lock); | ||
720 | break; | 729 | break; |
721 | 730 | ||
722 | default: | 731 | default: |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0f1dd23730d..d3fc302c241 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -11,6 +11,9 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "bnx2fc.h" | 13 | #include "bnx2fc.h" |
14 | |||
15 | #define RESERVE_FREE_LIST_INDEX num_possible_cpus() | ||
16 | |||
14 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, | 17 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, |
15 | int bd_index); | 18 | int bd_index); |
16 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); | 19 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); |
@@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
242 | u32 mem_size; | 245 | u32 mem_size; |
243 | u16 xid; | 246 | u16 xid; |
244 | int i; | 247 | int i; |
245 | int num_ios; | 248 | int num_ios, num_pri_ios; |
246 | size_t bd_tbl_sz; | 249 | size_t bd_tbl_sz; |
250 | int arr_sz = num_possible_cpus() + 1; | ||
247 | 251 | ||
248 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { | 252 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { |
249 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ | 253 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ |
@@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
263 | } | 267 | } |
264 | 268 | ||
265 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * | 269 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * |
266 | num_possible_cpus(), GFP_KERNEL); | 270 | arr_sz, GFP_KERNEL); |
267 | if (!cmgr->free_list) { | 271 | if (!cmgr->free_list) { |
268 | printk(KERN_ERR PFX "failed to alloc free_list\n"); | 272 | printk(KERN_ERR PFX "failed to alloc free_list\n"); |
269 | goto mem_err; | 273 | goto mem_err; |
270 | } | 274 | } |
271 | 275 | ||
272 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * | 276 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * |
273 | num_possible_cpus(), GFP_KERNEL); | 277 | arr_sz, GFP_KERNEL); |
274 | if (!cmgr->free_list_lock) { | 278 | if (!cmgr->free_list_lock) { |
275 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); | 279 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); |
276 | goto mem_err; | 280 | goto mem_err; |
@@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
279 | cmgr->hba = hba; | 283 | cmgr->hba = hba; |
280 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); | 284 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); |
281 | 285 | ||
282 | for (i = 0; i < num_possible_cpus(); i++) { | 286 | for (i = 0; i < arr_sz; i++) { |
283 | INIT_LIST_HEAD(&cmgr->free_list[i]); | 287 | INIT_LIST_HEAD(&cmgr->free_list[i]); |
284 | spin_lock_init(&cmgr->free_list_lock[i]); | 288 | spin_lock_init(&cmgr->free_list_lock[i]); |
285 | } | 289 | } |
286 | 290 | ||
287 | /* Pre-allocated pool of bnx2fc_cmds */ | 291 | /* |
292 | * Pre-allocated pool of bnx2fc_cmds. | ||
293 | * Last entry in the free list array is the free list | ||
294 | * of slow path requests. | ||
295 | */ | ||
288 | xid = BNX2FC_MIN_XID; | 296 | xid = BNX2FC_MIN_XID; |
297 | num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; | ||
289 | for (i = 0; i < num_ios; i++) { | 298 | for (i = 0; i < num_ios; i++) { |
290 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); | 299 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); |
291 | 300 | ||
@@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
298 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); | 307 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); |
299 | 308 | ||
300 | io_req->xid = xid++; | 309 | io_req->xid = xid++; |
301 | if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) | 310 | if (i < num_pri_ios) |
302 | printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", | 311 | list_add_tail(&io_req->link, |
303 | io_req->xid); | 312 | &cmgr->free_list[io_req->xid % |
304 | list_add_tail(&io_req->link, | 313 | num_possible_cpus()]); |
305 | &cmgr->free_list[io_req->xid % num_possible_cpus()]); | 314 | else |
315 | list_add_tail(&io_req->link, | ||
316 | &cmgr->free_list[num_possible_cpus()]); | ||
306 | io_req++; | 317 | io_req++; |
307 | } | 318 | } |
308 | 319 | ||
@@ -389,7 +400,7 @@ free_cmd_pool: | |||
389 | if (!cmgr->free_list) | 400 | if (!cmgr->free_list) |
390 | goto free_cmgr; | 401 | goto free_cmgr; |
391 | 402 | ||
392 | for (i = 0; i < num_possible_cpus(); i++) { | 403 | for (i = 0; i < num_possible_cpus() + 1; i++) { |
393 | struct list_head *list; | 404 | struct list_head *list; |
394 | struct list_head *tmp; | 405 | struct list_head *tmp; |
395 | 406 | ||
@@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
413 | struct bnx2fc_cmd *io_req; | 424 | struct bnx2fc_cmd *io_req; |
414 | struct list_head *listp; | 425 | struct list_head *listp; |
415 | struct io_bdt *bd_tbl; | 426 | struct io_bdt *bd_tbl; |
427 | int index = RESERVE_FREE_LIST_INDEX; | ||
416 | u32 max_sqes; | 428 | u32 max_sqes; |
417 | u16 xid; | 429 | u16 xid; |
418 | 430 | ||
@@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
432 | * NOTE: Free list insertions and deletions are protected with | 444 | * NOTE: Free list insertions and deletions are protected with |
433 | * cmgr lock | 445 | * cmgr lock |
434 | */ | 446 | */ |
435 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 447 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
436 | if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || | 448 | if ((list_empty(&(cmd_mgr->free_list[index]))) || |
437 | (tgt->num_active_ios.counter >= max_sqes)) { | 449 | (tgt->num_active_ios.counter >= max_sqes)) { |
438 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " | 450 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " |
439 | "ios(%d):sqes(%d)\n", | 451 | "ios(%d):sqes(%d)\n", |
440 | tgt->num_active_ios.counter, tgt->max_sqes); | 452 | tgt->num_active_ios.counter, tgt->max_sqes); |
441 | if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) | 453 | if (list_empty(&(cmd_mgr->free_list[index]))) |
442 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); | 454 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); |
443 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 455 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
444 | return NULL; | 456 | return NULL; |
445 | } | 457 | } |
446 | 458 | ||
447 | listp = (struct list_head *) | 459 | listp = (struct list_head *) |
448 | cmd_mgr->free_list[smp_processor_id()].next; | 460 | cmd_mgr->free_list[index].next; |
449 | list_del_init(listp); | 461 | list_del_init(listp); |
450 | io_req = (struct bnx2fc_cmd *) listp; | 462 | io_req = (struct bnx2fc_cmd *) listp; |
451 | xid = io_req->xid; | 463 | xid = io_req->xid; |
452 | cmd_mgr->cmds[xid] = io_req; | 464 | cmd_mgr->cmds[xid] = io_req; |
453 | atomic_inc(&tgt->num_active_ios); | 465 | atomic_inc(&tgt->num_active_ios); |
454 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 466 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
455 | 467 | ||
456 | INIT_LIST_HEAD(&io_req->link); | 468 | INIT_LIST_HEAD(&io_req->link); |
457 | 469 | ||
@@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
479 | struct io_bdt *bd_tbl; | 491 | struct io_bdt *bd_tbl; |
480 | u32 max_sqes; | 492 | u32 max_sqes; |
481 | u16 xid; | 493 | u16 xid; |
494 | int index = get_cpu(); | ||
482 | 495 | ||
483 | max_sqes = BNX2FC_SCSI_MAX_SQES; | 496 | max_sqes = BNX2FC_SCSI_MAX_SQES; |
484 | /* | 497 | /* |
485 | * NOTE: Free list insertions and deletions are protected with | 498 | * NOTE: Free list insertions and deletions are protected with |
486 | * cmgr lock | 499 | * cmgr lock |
487 | */ | 500 | */ |
488 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 501 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
489 | if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || | 502 | if ((list_empty(&cmd_mgr->free_list[index])) || |
490 | (tgt->num_active_ios.counter >= max_sqes)) { | 503 | (tgt->num_active_ios.counter >= max_sqes)) { |
491 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 504 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
505 | put_cpu(); | ||
492 | return NULL; | 506 | return NULL; |
493 | } | 507 | } |
494 | 508 | ||
495 | listp = (struct list_head *) | 509 | listp = (struct list_head *) |
496 | cmd_mgr->free_list[smp_processor_id()].next; | 510 | cmd_mgr->free_list[index].next; |
497 | list_del_init(listp); | 511 | list_del_init(listp); |
498 | io_req = (struct bnx2fc_cmd *) listp; | 512 | io_req = (struct bnx2fc_cmd *) listp; |
499 | xid = io_req->xid; | 513 | xid = io_req->xid; |
500 | cmd_mgr->cmds[xid] = io_req; | 514 | cmd_mgr->cmds[xid] = io_req; |
501 | atomic_inc(&tgt->num_active_ios); | 515 | atomic_inc(&tgt->num_active_ios); |
502 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 516 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
517 | put_cpu(); | ||
503 | 518 | ||
504 | INIT_LIST_HEAD(&io_req->link); | 519 | INIT_LIST_HEAD(&io_req->link); |
505 | 520 | ||
@@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
522 | struct bnx2fc_cmd *io_req = container_of(ref, | 537 | struct bnx2fc_cmd *io_req = container_of(ref, |
523 | struct bnx2fc_cmd, refcount); | 538 | struct bnx2fc_cmd, refcount); |
524 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; | 539 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; |
540 | int index; | ||
541 | |||
542 | if (io_req->cmd_type == BNX2FC_SCSI_CMD) | ||
543 | index = io_req->xid % num_possible_cpus(); | ||
544 | else | ||
545 | index = RESERVE_FREE_LIST_INDEX; | ||
525 | 546 | ||
526 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 547 | |
548 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | ||
527 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) | 549 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) |
528 | bnx2fc_free_mp_resc(io_req); | 550 | bnx2fc_free_mp_resc(io_req); |
529 | cmd_mgr->cmds[io_req->xid] = NULL; | 551 | cmd_mgr->cmds[io_req->xid] = NULL; |
@@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
531 | list_del_init(&io_req->link); | 553 | list_del_init(&io_req->link); |
532 | /* Add it to the free list */ | 554 | /* Add it to the free list */ |
533 | list_add(&io_req->link, | 555 | list_add(&io_req->link, |
534 | &cmd_mgr->free_list[smp_processor_id()]); | 556 | &cmd_mgr->free_list[index]); |
535 | atomic_dec(&io_req->tgt->num_active_ios); | 557 | atomic_dec(&io_req->tgt->num_active_ios); |
536 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 558 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
559 | |||
537 | } | 560 | } |
538 | 561 | ||
539 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | 562 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 7ea93af6026..7cc05e4e82d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port, | |||
304 | " not sent to FW\n"); | 304 | " not sent to FW\n"); |
305 | 305 | ||
306 | /* Free session resources */ | 306 | /* Free session resources */ |
307 | spin_lock_bh(&tgt->cq_lock); | ||
308 | bnx2fc_free_session_resc(hba, tgt); | 307 | bnx2fc_free_session_resc(hba, tgt); |
309 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | 308 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); |
310 | spin_unlock_bh(&tgt->cq_lock); | ||
311 | } | 309 | } |
312 | 310 | ||
313 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | 311 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, |
@@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |||
830 | tgt->rq = NULL; | 828 | tgt->rq = NULL; |
831 | } | 829 | } |
832 | /* Free CQ */ | 830 | /* Free CQ */ |
831 | spin_lock_bh(&tgt->cq_lock); | ||
833 | if (tgt->cq) { | 832 | if (tgt->cq) { |
834 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 833 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
835 | tgt->cq, tgt->cq_dma); | 834 | tgt->cq, tgt->cq_dma); |
836 | tgt->cq = NULL; | 835 | tgt->cq = NULL; |
837 | } | 836 | } |
837 | spin_unlock_bh(&tgt->cq_lock); | ||
838 | /* Free SQ */ | 838 | /* Free SQ */ |
839 | if (tgt->sq) { | 839 | if (tgt->sq) { |
840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 8eeb39ffa37..e98ae33f129 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) | |||
132 | if (page_count(sg_page(sg)) >= 1 && !recv) | 132 | if (page_count(sg_page(sg)) >= 1 && !recv) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | 135 | if (recv) { |
136 | segment->atomic_mapped = true; | ||
137 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | ||
138 | } else { | ||
139 | segment->atomic_mapped = false; | ||
140 | /* the xmit path can sleep with the page mapped so use kmap */ | ||
141 | segment->sg_mapped = kmap(sg_page(sg)); | ||
142 | } | ||
143 | |||
136 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; | 144 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; |
137 | } | 145 | } |
138 | 146 | ||
139 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) | 147 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) |
140 | { | 148 | { |
141 | if (segment->sg_mapped) { | 149 | if (segment->sg_mapped) { |
142 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | 150 | if (segment->atomic_mapped) |
151 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | ||
152 | else | ||
153 | kunmap(sg_page(segment->sg)); | ||
143 | segment->sg_mapped = NULL; | 154 | segment->sg_mapped = NULL; |
144 | segment->data = NULL; | 155 | segment->data = NULL; |
145 | } | 156 | } |
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 14de249917f..88928f00aa2 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | #/******************************************************************* | 1 | #/******************************************************************* |
2 | # * This file is part of the Emulex Linux Device Driver for * | 2 | # * This file is part of the Emulex Linux Device Driver for * |
3 | # * Fibre Channel Host Bus Adapters. * | 3 | # * Fibre Channel Host Bus Adapters. * |
4 | # * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | # * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | # * EMULEX and SLI are trademarks of Emulex. * | 5 | # * EMULEX and SLI are trademarks of Emulex. * |
6 | # * www.emulex.com * | 6 | # * www.emulex.com * |
7 | # * * | 7 | # * * |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b64c6da870d..60e98a62f30 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -539,6 +539,8 @@ struct lpfc_hba { | |||
539 | (struct lpfc_hba *, uint32_t); | 539 | (struct lpfc_hba *, uint32_t); |
540 | int (*lpfc_hba_down_link) | 540 | int (*lpfc_hba_down_link) |
541 | (struct lpfc_hba *, uint32_t); | 541 | (struct lpfc_hba *, uint32_t); |
542 | int (*lpfc_selective_reset) | ||
543 | (struct lpfc_hba *); | ||
542 | 544 | ||
543 | /* SLI4 specific HBA data structure */ | 545 | /* SLI4 specific HBA data structure */ |
544 | struct lpfc_sli4_hba sli4_hba; | 546 | struct lpfc_sli4_hba sli4_hba; |
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) | |||
895 | return; | 897 | return; |
896 | } | 898 | } |
897 | 899 | ||
898 | static inline void | 900 | static inline int |
901 | lpfc_readl(void __iomem *addr, uint32_t *data) | ||
902 | { | ||
903 | uint32_t temp; | ||
904 | temp = readl(addr); | ||
905 | if (temp == 0xffffffff) | ||
906 | return -EIO; | ||
907 | *data = temp; | ||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | static inline int | ||
899 | lpfc_sli_read_hs(struct lpfc_hba *phba) | 912 | lpfc_sli_read_hs(struct lpfc_hba *phba) |
900 | { | 913 | { |
901 | /* | 914 | /* |
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) | |||
904 | */ | 917 | */ |
905 | phba->sli.slistat.err_attn_event++; | 918 | phba->sli.slistat.err_attn_event++; |
906 | 919 | ||
907 | /* Save status info */ | 920 | /* Save status info and check for unplug error */ |
908 | phba->work_hs = readl(phba->HSregaddr); | 921 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || |
909 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | 922 | lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || |
910 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | 923 | lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { |
924 | return -EIO; | ||
925 | } | ||
911 | 926 | ||
912 | /* Clear chip Host Attention error bit */ | 927 | /* Clear chip Host Attention error bit */ |
913 | writel(HA_ERATT, phba->HAregaddr); | 928 | writel(HA_ERATT, phba->HAregaddr); |
914 | readl(phba->HAregaddr); /* flush */ | 929 | readl(phba->HAregaddr); /* flush */ |
915 | phba->pport->stopped = 1; | 930 | phba->pport->stopped = 1; |
916 | 931 | ||
917 | return; | 932 | return 0; |
918 | } | 933 | } |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e7c020df12f..4e0faa00b96 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) | |||
685 | * -EIO reset not configured or error posting the event | 685 | * -EIO reset not configured or error posting the event |
686 | * zero for success | 686 | * zero for success |
687 | **/ | 687 | **/ |
688 | static int | 688 | int |
689 | lpfc_selective_reset(struct lpfc_hba *phba) | 689 | lpfc_selective_reset(struct lpfc_hba *phba) |
690 | { | 690 | { |
691 | struct completion online_compl; | 691 | struct completion online_compl; |
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, | |||
746 | int status = -EINVAL; | 746 | int status = -EINVAL; |
747 | 747 | ||
748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) | 748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) |
749 | status = lpfc_selective_reset(phba); | 749 | status = phba->lpfc_selective_reset(phba); |
750 | 750 | ||
751 | if (status == 0) | 751 | if (status == 0) |
752 | return strlen(buf); | 752 | return strlen(buf); |
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1224 | if (val & ENABLE_FCP_RING_POLLING) { | 1224 | if (val & ENABLE_FCP_RING_POLLING) { |
1225 | if ((val & DISABLE_FCP_RING_INT) && | 1225 | if ((val & DISABLE_FCP_RING_INT) && |
1226 | !(old_val & DISABLE_FCP_RING_INT)) { | 1226 | !(old_val & DISABLE_FCP_RING_INT)) { |
1227 | creg_val = readl(phba->HCregaddr); | 1227 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1228 | spin_unlock_irq(&phba->hbalock); | ||
1229 | return -EINVAL; | ||
1230 | } | ||
1228 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 1231 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
1229 | writel(creg_val, phba->HCregaddr); | 1232 | writel(creg_val, phba->HCregaddr); |
1230 | readl(phba->HCregaddr); /* flush */ | 1233 | readl(phba->HCregaddr); /* flush */ |
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1242 | spin_unlock_irq(&phba->hbalock); | 1245 | spin_unlock_irq(&phba->hbalock); |
1243 | del_timer(&phba->fcp_poll_timer); | 1246 | del_timer(&phba->fcp_poll_timer); |
1244 | spin_lock_irq(&phba->hbalock); | 1247 | spin_lock_irq(&phba->hbalock); |
1245 | creg_val = readl(phba->HCregaddr); | 1248 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1249 | spin_unlock_irq(&phba->hbalock); | ||
1250 | return -EINVAL; | ||
1251 | } | ||
1246 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1252 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1247 | writel(creg_val, phba->HCregaddr); | 1253 | writel(creg_val, phba->HCregaddr); |
1248 | readl(phba->HCregaddr); /* flush */ | 1254 | readl(phba->HCregaddr); /* flush */ |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 0dd43bb9161..793b9f1131f 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
348 | dd_data->context_un.iocb.bmp = bmp; | 348 | dd_data->context_un.iocb.bmp = bmp; |
349 | 349 | ||
350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
351 | creg_val = readl(phba->HCregaddr); | 351 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
352 | rc = -EIO ; | ||
353 | goto free_cmdiocbq; | ||
354 | } | ||
352 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 355 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
353 | writel(creg_val, phba->HCregaddr); | 356 | writel(creg_val, phba->HCregaddr); |
354 | readl(phba->HCregaddr); /* flush */ | 357 | readl(phba->HCregaddr); /* flush */ |
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
599 | dd_data->context_un.iocb.ndlp = ndlp; | 602 | dd_data->context_un.iocb.ndlp = ndlp; |
600 | 603 | ||
601 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 604 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
602 | creg_val = readl(phba->HCregaddr); | 605 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
606 | rc = -EIO; | ||
607 | goto linkdown_err; | ||
608 | } | ||
603 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 609 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
604 | writel(creg_val, phba->HCregaddr); | 610 | writel(creg_val, phba->HCregaddr); |
605 | readl(phba->HCregaddr); /* flush */ | 611 | readl(phba->HCregaddr); /* flush */ |
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
613 | else | 619 | else |
614 | rc = -EIO; | 620 | rc = -EIO; |
615 | 621 | ||
622 | linkdown_err: | ||
616 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 623 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
617 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 624 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
618 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 625 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1357 | dd_data->context_un.iocb.ndlp = ndlp; | 1364 | dd_data->context_un.iocb.ndlp = ndlp; |
1358 | 1365 | ||
1359 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 1366 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
1360 | creg_val = readl(phba->HCregaddr); | 1367 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1368 | rc = -IOCB_ERROR; | ||
1369 | goto issue_ct_rsp_exit; | ||
1370 | } | ||
1361 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1371 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1362 | writel(creg_val, phba->HCregaddr); | 1372 | writel(creg_val, phba->HCregaddr); |
1363 | readl(phba->HCregaddr); /* flush */ | 1373 | readl(phba->HCregaddr); /* flush */ |
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | |||
2479 | 2489 | ||
2480 | from = (uint8_t *)dd_data->context_un.mbox.mb; | 2490 | from = (uint8_t *)dd_data->context_un.mbox.mb; |
2481 | job = dd_data->context_un.mbox.set_job; | 2491 | job = dd_data->context_un.mbox.set_job; |
2482 | size = job->reply_payload.payload_len; | 2492 | if (job) { |
2483 | job->reply->reply_payload_rcv_len = | 2493 | size = job->reply_payload.payload_len; |
2484 | sg_copy_from_buffer(job->reply_payload.sg_list, | 2494 | job->reply->reply_payload_rcv_len = |
2485 | job->reply_payload.sg_cnt, | 2495 | sg_copy_from_buffer(job->reply_payload.sg_list, |
2486 | from, size); | 2496 | job->reply_payload.sg_cnt, |
2487 | job->reply->result = 0; | 2497 | from, size); |
2498 | job->reply->result = 0; | ||
2488 | 2499 | ||
2500 | job->dd_data = NULL; | ||
2501 | job->job_done(job); | ||
2502 | } | ||
2489 | dd_data->context_un.mbox.set_job = NULL; | 2503 | dd_data->context_un.mbox.set_job = NULL; |
2490 | job->dd_data = NULL; | ||
2491 | job->job_done(job); | ||
2492 | /* need to hold the lock until we call job done to hold off | 2504 | /* need to hold the lock until we call job done to hold off |
2493 | * the timeout handler returning to the midlayer while | 2505 | * the timeout handler returning to the midlayer while |
2494 | * we are stillprocessing the job | 2506 | * we are stillprocessing the job |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 3d40023f480..f0b332f4eed 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); | |||
254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, | 254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, |
255 | uint32_t); | 255 | uint32_t); |
256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); | 256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); |
257 | 257 | int lpfc_selective_reset(struct lpfc_hba *); | |
258 | void lpfc_reset_barrier(struct lpfc_hba * phba); | 258 | void lpfc_reset_barrier(struct lpfc_hba *); |
259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); | 259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
260 | int lpfc_sli_brdkill(struct lpfc_hba *); | 260 | int lpfc_sli_brdkill(struct lpfc_hba *); |
261 | int lpfc_sli_brdreset(struct lpfc_hba *); | 261 | int lpfc_sli_brdreset(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8e28edf9801..735028fedda 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) | |||
89 | return 0; | 89 | return 0; |
90 | 90 | ||
91 | /* Read the HBA Host Attention Register */ | 91 | /* Read the HBA Host Attention Register */ |
92 | ha_copy = readl(phba->HAregaddr); | 92 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
93 | return 1; | ||
93 | 94 | ||
94 | if (!(ha_copy & HA_LATT)) | 95 | if (!(ha_copy & HA_LATT)) |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 94ae37c5111..95f11ed7946 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ | 1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ |
1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ | 1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ |
1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ | 1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ |
1347 | 1347 | #define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ | |
1348 | /* Host Control Register */ | 1348 | /* Host Control Register */ |
1349 | 1349 | ||
1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ | 1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ |
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 { | |||
1713 | #define pde6_apptagval_WORD word2 | 1713 | #define pde6_apptagval_WORD word2 |
1714 | }; | 1714 | }; |
1715 | 1715 | ||
1716 | struct lpfc_pde7 { | ||
1717 | uint32_t word0; | ||
1718 | #define pde7_type_SHIFT 24 | ||
1719 | #define pde7_type_MASK 0x000000ff | ||
1720 | #define pde7_type_WORD word0 | ||
1721 | #define pde7_rsvd0_SHIFT 0 | ||
1722 | #define pde7_rsvd0_MASK 0x00ffffff | ||
1723 | #define pde7_rsvd0_WORD word0 | ||
1724 | uint32_t addrHigh; | ||
1725 | uint32_t addrLow; | ||
1726 | }; | ||
1716 | 1727 | ||
1717 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ | 1728 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ |
1718 | 1729 | ||
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */ | |||
3621 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ | 3632 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ |
3622 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ | 3633 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ |
3623 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ | 3634 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ |
3624 | struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ | 3635 | struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ |
3625 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ | 3636 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ |
3626 | } un; | 3637 | } un; |
3627 | union { | 3638 | union { |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c7178d60c7b..8433ac0d9fb 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags { | |||
215 | #define lpfc_fip_flag_WORD word0 | 215 | #define lpfc_fip_flag_WORD word0 |
216 | }; | 216 | }; |
217 | 217 | ||
218 | struct sli4_bls_acc { | 218 | struct sli4_bls_rsp { |
219 | uint32_t word0_rsvd; /* Word0 must be reserved */ | 219 | uint32_t word0_rsvd; /* Word0 must be reserved */ |
220 | uint32_t word1; | 220 | uint32_t word1; |
221 | #define lpfc_abts_orig_SHIFT 0 | 221 | #define lpfc_abts_orig_SHIFT 0 |
@@ -231,6 +231,16 @@ struct sli4_bls_acc { | |||
231 | #define lpfc_abts_oxid_MASK 0x0000FFFF | 231 | #define lpfc_abts_oxid_MASK 0x0000FFFF |
232 | #define lpfc_abts_oxid_WORD word2 | 232 | #define lpfc_abts_oxid_WORD word2 |
233 | uint32_t word3; | 233 | uint32_t word3; |
234 | #define lpfc_vndr_code_SHIFT 0 | ||
235 | #define lpfc_vndr_code_MASK 0x000000FF | ||
236 | #define lpfc_vndr_code_WORD word3 | ||
237 | #define lpfc_rsn_expln_SHIFT 8 | ||
238 | #define lpfc_rsn_expln_MASK 0x000000FF | ||
239 | #define lpfc_rsn_expln_WORD word3 | ||
240 | #define lpfc_rsn_code_SHIFT 16 | ||
241 | #define lpfc_rsn_code_MASK 0x000000FF | ||
242 | #define lpfc_rsn_code_WORD word3 | ||
243 | |||
234 | uint32_t word4; | 244 | uint32_t word4; |
235 | uint32_t word5_rsvd; /* Word5 must be reserved */ | 245 | uint32_t word5_rsvd; /* Word5 must be reserved */ |
236 | }; | 246 | }; |
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr { | |||
711 | union lpfc_sli4_cfg_shdr { | 721 | union lpfc_sli4_cfg_shdr { |
712 | struct { | 722 | struct { |
713 | uint32_t word6; | 723 | uint32_t word6; |
714 | #define lpfc_mbox_hdr_opcode_SHIFT 0 | 724 | #define lpfc_mbox_hdr_opcode_SHIFT 0 |
715 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF | 725 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF |
716 | #define lpfc_mbox_hdr_opcode_WORD word6 | 726 | #define lpfc_mbox_hdr_opcode_WORD word6 |
717 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 | 727 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 |
718 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF | 728 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF |
719 | #define lpfc_mbox_hdr_subsystem_WORD word6 | 729 | #define lpfc_mbox_hdr_subsystem_WORD word6 |
720 | #define lpfc_mbox_hdr_port_number_SHIFT 16 | 730 | #define lpfc_mbox_hdr_port_number_SHIFT 16 |
721 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF | 731 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF |
722 | #define lpfc_mbox_hdr_port_number_WORD word6 | 732 | #define lpfc_mbox_hdr_port_number_WORD word6 |
723 | #define lpfc_mbox_hdr_domain_SHIFT 24 | 733 | #define lpfc_mbox_hdr_domain_SHIFT 24 |
724 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF | 734 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF |
725 | #define lpfc_mbox_hdr_domain_WORD word6 | 735 | #define lpfc_mbox_hdr_domain_WORD word6 |
726 | uint32_t timeout; | 736 | uint32_t timeout; |
727 | uint32_t request_length; | 737 | uint32_t request_length; |
728 | uint32_t reserved9; | 738 | uint32_t word9; |
739 | #define lpfc_mbox_hdr_version_SHIFT 0 | ||
740 | #define lpfc_mbox_hdr_version_MASK 0x000000FF | ||
741 | #define lpfc_mbox_hdr_version_WORD word9 | ||
742 | #define LPFC_Q_CREATE_VERSION_2 2 | ||
743 | #define LPFC_Q_CREATE_VERSION_1 1 | ||
744 | #define LPFC_Q_CREATE_VERSION_0 0 | ||
729 | } request; | 745 | } request; |
730 | struct { | 746 | struct { |
731 | uint32_t word6; | 747 | uint32_t word6; |
@@ -917,9 +933,12 @@ struct cq_context { | |||
917 | #define LPFC_CQ_CNT_512 0x1 | 933 | #define LPFC_CQ_CNT_512 0x1 |
918 | #define LPFC_CQ_CNT_1024 0x2 | 934 | #define LPFC_CQ_CNT_1024 0x2 |
919 | uint32_t word1; | 935 | uint32_t word1; |
920 | #define lpfc_cq_eq_id_SHIFT 22 | 936 | #define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ |
921 | #define lpfc_cq_eq_id_MASK 0x000000FF | 937 | #define lpfc_cq_eq_id_MASK 0x000000FF |
922 | #define lpfc_cq_eq_id_WORD word1 | 938 | #define lpfc_cq_eq_id_WORD word1 |
939 | #define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ | ||
940 | #define lpfc_cq_eq_id_2_MASK 0x0000FFFF | ||
941 | #define lpfc_cq_eq_id_2_WORD word1 | ||
923 | uint32_t reserved0; | 942 | uint32_t reserved0; |
924 | uint32_t reserved1; | 943 | uint32_t reserved1; |
925 | }; | 944 | }; |
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create { | |||
929 | union { | 948 | union { |
930 | struct { | 949 | struct { |
931 | uint32_t word0; | 950 | uint32_t word0; |
951 | #define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ | ||
952 | #define lpfc_mbx_cq_create_page_size_MASK 0x000000FF | ||
953 | #define lpfc_mbx_cq_create_page_size_WORD word0 | ||
932 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 | 954 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 |
933 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF | 955 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF |
934 | #define lpfc_mbx_cq_create_num_pages_WORD word0 | 956 | #define lpfc_mbx_cq_create_num_pages_WORD word0 |
@@ -969,7 +991,7 @@ struct wq_context { | |||
969 | struct lpfc_mbx_wq_create { | 991 | struct lpfc_mbx_wq_create { |
970 | struct mbox_header header; | 992 | struct mbox_header header; |
971 | union { | 993 | union { |
972 | struct { | 994 | struct { /* Version 0 Request */ |
973 | uint32_t word0; | 995 | uint32_t word0; |
974 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 | 996 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 |
975 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF | 997 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF |
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create { | |||
979 | #define lpfc_mbx_wq_create_cq_id_WORD word0 | 1001 | #define lpfc_mbx_wq_create_cq_id_WORD word0 |
980 | struct dma_address page[LPFC_MAX_WQ_PAGE]; | 1002 | struct dma_address page[LPFC_MAX_WQ_PAGE]; |
981 | } request; | 1003 | } request; |
1004 | struct { /* Version 1 Request */ | ||
1005 | uint32_t word0; /* Word 0 is the same as in v0 */ | ||
1006 | uint32_t word1; | ||
1007 | #define lpfc_mbx_wq_create_page_size_SHIFT 0 | ||
1008 | #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF | ||
1009 | #define lpfc_mbx_wq_create_page_size_WORD word1 | ||
1010 | #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 | ||
1011 | #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F | ||
1012 | #define lpfc_mbx_wq_create_wqe_size_WORD word1 | ||
1013 | #define LPFC_WQ_WQE_SIZE_64 0x5 | ||
1014 | #define LPFC_WQ_WQE_SIZE_128 0x6 | ||
1015 | #define lpfc_mbx_wq_create_wqe_count_SHIFT 16 | ||
1016 | #define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF | ||
1017 | #define lpfc_mbx_wq_create_wqe_count_WORD word1 | ||
1018 | uint32_t word2; | ||
1019 | struct dma_address page[LPFC_MAX_WQ_PAGE-1]; | ||
1020 | } request_1; | ||
982 | struct { | 1021 | struct { |
983 | uint32_t word0; | 1022 | uint32_t word0; |
984 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 | 1023 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 |
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy { | |||
1007 | #define LPFC_DATA_BUF_SIZE 2048 | 1046 | #define LPFC_DATA_BUF_SIZE 2048 |
1008 | struct rq_context { | 1047 | struct rq_context { |
1009 | uint32_t word0; | 1048 | uint32_t word0; |
1010 | #define lpfc_rq_context_rq_size_SHIFT 16 | 1049 | #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ |
1011 | #define lpfc_rq_context_rq_size_MASK 0x0000000F | 1050 | #define lpfc_rq_context_rqe_count_MASK 0x0000000F |
1012 | #define lpfc_rq_context_rq_size_WORD word0 | 1051 | #define lpfc_rq_context_rqe_count_WORD word0 |
1013 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ | 1052 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ |
1014 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ | 1053 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ |
1015 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ | 1054 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ |
1016 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ | 1055 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ |
1056 | #define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ | ||
1057 | #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF | ||
1058 | #define lpfc_rq_context_rqe_count_1_WORD word0 | ||
1059 | #define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ | ||
1060 | #define lpfc_rq_context_rqe_size_MASK 0x0000000F | ||
1061 | #define lpfc_rq_context_rqe_size_WORD word0 | ||
1062 | #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ | ||
1063 | #define lpfc_rq_context_page_size_MASK 0x000000FF | ||
1064 | #define lpfc_rq_context_page_size_WORD word0 | ||
1017 | uint32_t reserved1; | 1065 | uint32_t reserved1; |
1018 | uint32_t word2; | 1066 | uint32_t word2; |
1019 | #define lpfc_rq_context_cq_id_SHIFT 16 | 1067 | #define lpfc_rq_context_cq_id_SHIFT 16 |
@@ -1022,7 +1070,7 @@ struct rq_context { | |||
1022 | #define lpfc_rq_context_buf_size_SHIFT 0 | 1070 | #define lpfc_rq_context_buf_size_SHIFT 0 |
1023 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF | 1071 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF |
1024 | #define lpfc_rq_context_buf_size_WORD word2 | 1072 | #define lpfc_rq_context_buf_size_WORD word2 |
1025 | uint32_t reserved3; | 1073 | uint32_t buffer_size; /* Version 1 Only */ |
1026 | }; | 1074 | }; |
1027 | 1075 | ||
1028 | struct lpfc_mbx_rq_create { | 1076 | struct lpfc_mbx_rq_create { |
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy { | |||
1062 | 1110 | ||
1063 | struct mq_context { | 1111 | struct mq_context { |
1064 | uint32_t word0; | 1112 | uint32_t word0; |
1065 | #define lpfc_mq_context_cq_id_SHIFT 22 | 1113 | #define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ |
1066 | #define lpfc_mq_context_cq_id_MASK 0x000003FF | 1114 | #define lpfc_mq_context_cq_id_MASK 0x000003FF |
1067 | #define lpfc_mq_context_cq_id_WORD word0 | 1115 | #define lpfc_mq_context_cq_id_WORD word0 |
1068 | #define lpfc_mq_context_count_SHIFT 16 | 1116 | #define lpfc_mq_context_ring_size_SHIFT 16 |
1069 | #define lpfc_mq_context_count_MASK 0x0000000F | 1117 | #define lpfc_mq_context_ring_size_MASK 0x0000000F |
1070 | #define lpfc_mq_context_count_WORD word0 | 1118 | #define lpfc_mq_context_ring_size_WORD word0 |
1071 | #define LPFC_MQ_CNT_16 0x5 | 1119 | #define LPFC_MQ_RING_SIZE_16 0x5 |
1072 | #define LPFC_MQ_CNT_32 0x6 | 1120 | #define LPFC_MQ_RING_SIZE_32 0x6 |
1073 | #define LPFC_MQ_CNT_64 0x7 | 1121 | #define LPFC_MQ_RING_SIZE_64 0x7 |
1074 | #define LPFC_MQ_CNT_128 0x8 | 1122 | #define LPFC_MQ_RING_SIZE_128 0x8 |
1075 | uint32_t word1; | 1123 | uint32_t word1; |
1076 | #define lpfc_mq_context_valid_SHIFT 31 | 1124 | #define lpfc_mq_context_valid_SHIFT 31 |
1077 | #define lpfc_mq_context_valid_MASK 0x00000001 | 1125 | #define lpfc_mq_context_valid_MASK 0x00000001 |
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext { | |||
1105 | union { | 1153 | union { |
1106 | struct { | 1154 | struct { |
1107 | uint32_t word0; | 1155 | uint32_t word0; |
1108 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 | 1156 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 |
1109 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF | 1157 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF |
1110 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 | 1158 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 |
1159 | #define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ | ||
1160 | #define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF | ||
1161 | #define lpfc_mbx_mq_create_ext_cq_id_WORD word0 | ||
1111 | uint32_t async_evt_bmap; | 1162 | uint32_t async_evt_bmap; |
1112 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK | 1163 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK |
1113 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 | 1164 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 35665cfb568..e6ebe516cfb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; | 507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; |
508 | 508 | ||
509 | /* Enable appropriate host interrupts */ | 509 | /* Enable appropriate host interrupts */ |
510 | status = readl(phba->HCregaddr); | 510 | if (lpfc_readl(phba->HCregaddr, &status)) { |
511 | spin_unlock_irq(&phba->hbalock); | ||
512 | return -EIO; | ||
513 | } | ||
511 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; | 514 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; |
512 | if (psli->num_rings > 0) | 515 | if (psli->num_rings > 0) |
513 | status |= HC_R0INT_ENA; | 516 | status |= HC_R0INT_ENA; |
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) | |||
1222 | /* Wait for the ER1 bit to clear.*/ | 1225 | /* Wait for the ER1 bit to clear.*/ |
1223 | while (phba->work_hs & HS_FFER1) { | 1226 | while (phba->work_hs & HS_FFER1) { |
1224 | msleep(100); | 1227 | msleep(100); |
1225 | phba->work_hs = readl(phba->HSregaddr); | 1228 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { |
1229 | phba->work_hs = UNPLUG_ERR ; | ||
1230 | break; | ||
1231 | } | ||
1226 | /* If driver is unloading let the worker thread continue */ | 1232 | /* If driver is unloading let the worker thread continue */ |
1227 | if (phba->pport->load_flag & FC_UNLOADING) { | 1233 | if (phba->pport->load_flag & FC_UNLOADING) { |
1228 | phba->work_hs = 0; | 1234 | phba->work_hs = 0; |
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
4474 | { | 4480 | { |
4475 | phba->lpfc_hba_init_link = lpfc_hba_init_link; | 4481 | phba->lpfc_hba_init_link = lpfc_hba_init_link; |
4476 | phba->lpfc_hba_down_link = lpfc_hba_down_link; | 4482 | phba->lpfc_hba_down_link = lpfc_hba_down_link; |
4483 | phba->lpfc_selective_reset = lpfc_selective_reset; | ||
4477 | switch (dev_grp) { | 4484 | switch (dev_grp) { |
4478 | case LPFC_PCI_DEV_LP: | 4485 | case LPFC_PCI_DEV_LP: |
4479 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; | 4486 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; |
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5385 | int i, port_error = 0; | 5392 | int i, port_error = 0; |
5386 | uint32_t if_type; | 5393 | uint32_t if_type; |
5387 | 5394 | ||
5395 | memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); | ||
5396 | memset(®_data, 0, sizeof(reg_data)); | ||
5388 | if (!phba->sli4_hba.PSMPHRregaddr) | 5397 | if (!phba->sli4_hba.PSMPHRregaddr) |
5389 | return -ENODEV; | 5398 | return -ENODEV; |
5390 | 5399 | ||
5391 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ | 5400 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ |
5392 | for (i = 0; i < 3000; i++) { | 5401 | for (i = 0; i < 3000; i++) { |
5393 | portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); | 5402 | if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
5394 | if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { | 5403 | &portsmphr_reg.word0) || |
5404 | (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { | ||
5395 | /* Port has a fatal POST error, break out */ | 5405 | /* Port has a fatal POST error, break out */ |
5396 | port_error = -ENODEV; | 5406 | port_error = -ENODEV; |
5397 | break; | 5407 | break; |
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5472 | break; | 5482 | break; |
5473 | case LPFC_SLI_INTF_IF_TYPE_2: | 5483 | case LPFC_SLI_INTF_IF_TYPE_2: |
5474 | /* Final checks. The port status should be clean. */ | 5484 | /* Final checks. The port status should be clean. */ |
5475 | reg_data.word0 = | 5485 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
5476 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 5486 | ®_data.word0) || |
5477 | if (bf_get(lpfc_sliport_status_err, ®_data)) { | 5487 | bf_get(lpfc_sliport_status_err, ®_data)) { |
5478 | phba->work_status[0] = | 5488 | phba->work_status[0] = |
5479 | readl(phba->sli4_hba.u.if_type2. | 5489 | readl(phba->sli4_hba.u.if_type2. |
5480 | ERR1regaddr); | 5490 | ERR1regaddr); |
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6760 | * the loop again. | 6770 | * the loop again. |
6761 | */ | 6771 | */ |
6762 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { | 6772 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { |
6763 | reg_data.word0 = | 6773 | if (lpfc_readl(phba->sli4_hba.u.if_type2. |
6764 | readl(phba->sli4_hba.u.if_type2. | 6774 | STATUSregaddr, ®_data.word0)) { |
6765 | STATUSregaddr); | 6775 | rc = -ENODEV; |
6776 | break; | ||
6777 | } | ||
6766 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) | 6778 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) |
6767 | break; | 6779 | break; |
6768 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { | 6780 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { |
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6783 | } | 6795 | } |
6784 | 6796 | ||
6785 | /* Detect any port errors. */ | 6797 | /* Detect any port errors. */ |
6786 | reg_data.word0 = readl(phba->sli4_hba.u.if_type2. | 6798 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
6787 | STATUSregaddr); | 6799 | ®_data.word0)) { |
6800 | rc = -ENODEV; | ||
6801 | break; | ||
6802 | } | ||
6788 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || | 6803 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || |
6789 | (rdy_chk >= 1000)) { | 6804 | (rdy_chk >= 1000)) { |
6790 | phba->work_status[0] = readl( | 6805 | phba->work_status[0] = readl( |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf34178b80b..2b962b020cf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ | 1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
1515 | struct lpfc_pde5 *pde5 = NULL; | 1515 | struct lpfc_pde5 *pde5 = NULL; |
1516 | struct lpfc_pde6 *pde6 = NULL; | 1516 | struct lpfc_pde6 *pde6 = NULL; |
1517 | struct ulp_bde64 *prot_bde = NULL; | 1517 | struct lpfc_pde7 *pde7 = NULL; |
1518 | dma_addr_t dataphysaddr, protphysaddr; | 1518 | dma_addr_t dataphysaddr, protphysaddr; |
1519 | unsigned short curr_data = 0, curr_prot = 0; | 1519 | unsigned short curr_data = 0, curr_prot = 0; |
1520 | unsigned int split_offset, protgroup_len; | 1520 | unsigned int split_offset; |
1521 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; | ||
1521 | unsigned int protgrp_blks, protgrp_bytes; | 1522 | unsigned int protgrp_blks, protgrp_bytes; |
1522 | unsigned int remainder, subtotal; | 1523 | unsigned int remainder, subtotal; |
1523 | int status; | 1524 | int status; |
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1585 | bpl++; | 1586 | bpl++; |
1586 | 1587 | ||
1587 | /* setup the first BDE that points to protection buffer */ | 1588 | /* setup the first BDE that points to protection buffer */ |
1588 | prot_bde = (struct ulp_bde64 *) bpl; | 1589 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
1589 | protphysaddr = sg_dma_address(sgpe); | 1590 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
1590 | prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1591 | prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1592 | protgroup_len = sg_dma_len(sgpe); | ||
1593 | 1591 | ||
1594 | /* must be integer multiple of the DIF block length */ | 1592 | /* must be integer multiple of the DIF block length */ |
1595 | BUG_ON(protgroup_len % 8); | 1593 | BUG_ON(protgroup_len % 8); |
1596 | 1594 | ||
1595 | pde7 = (struct lpfc_pde7 *) bpl; | ||
1596 | memset(pde7, 0, sizeof(struct lpfc_pde7)); | ||
1597 | bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); | ||
1598 | |||
1599 | pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1600 | pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1601 | |||
1597 | protgrp_blks = protgroup_len / 8; | 1602 | protgrp_blks = protgroup_len / 8; |
1598 | protgrp_bytes = protgrp_blks * blksize; | 1603 | protgrp_bytes = protgrp_blks * blksize; |
1599 | 1604 | ||
1600 | prot_bde->tus.f.bdeSize = protgroup_len; | 1605 | /* check if this pde is crossing the 4K boundary; if so split */ |
1601 | prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; | 1606 | if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { |
1602 | prot_bde->tus.w = le32_to_cpu(bpl->tus.w); | 1607 | protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); |
1608 | protgroup_offset += protgroup_remainder; | ||
1609 | protgrp_blks = protgroup_remainder / 8; | ||
1610 | protgrp_bytes = protgroup_remainder * blksize; | ||
1611 | } else { | ||
1612 | protgroup_offset = 0; | ||
1613 | curr_prot++; | ||
1614 | } | ||
1603 | 1615 | ||
1604 | curr_prot++; | ||
1605 | num_bde++; | 1616 | num_bde++; |
1606 | 1617 | ||
1607 | /* setup BDE's for data blocks associated with DIF data */ | 1618 | /* setup BDE's for data blocks associated with DIF data */ |
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1653 | 1664 | ||
1654 | } | 1665 | } |
1655 | 1666 | ||
1667 | if (protgroup_offset) { | ||
1668 | /* update the reference tag */ | ||
1669 | reftag += protgrp_blks; | ||
1670 | bpl++; | ||
1671 | continue; | ||
1672 | } | ||
1673 | |||
1656 | /* are we done ? */ | 1674 | /* are we done ? */ |
1657 | if (curr_prot == protcnt) { | 1675 | if (curr_prot == protcnt) { |
1658 | alldone = 1; | 1676 | alldone = 1; |
@@ -1675,6 +1693,7 @@ out: | |||
1675 | 1693 | ||
1676 | return num_bde; | 1694 | return num_bde; |
1677 | } | 1695 | } |
1696 | |||
1678 | /* | 1697 | /* |
1679 | * Given a SCSI command that supports DIF, determine composition of protection | 1698 | * Given a SCSI command that supports DIF, determine composition of protection |
1680 | * groups involved in setting up buffer lists | 1699 | * groups involved in setting up buffer lists |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ee0374a990..4746dcd756d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3477 | int retval = 0; | 3477 | int retval = 0; |
3478 | 3478 | ||
3479 | /* Read the HBA Host Status Register */ | 3479 | /* Read the HBA Host Status Register */ |
3480 | status = readl(phba->HSregaddr); | 3480 | if (lpfc_readl(phba->HSregaddr, &status)) |
3481 | return 1; | ||
3481 | 3482 | ||
3482 | /* | 3483 | /* |
3483 | * Check status register every 100ms for 5 retries, then every | 3484 | * Check status register every 100ms for 5 retries, then every |
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3502 | lpfc_sli_brdrestart(phba); | 3503 | lpfc_sli_brdrestart(phba); |
3503 | } | 3504 | } |
3504 | /* Read the HBA Host Status Register */ | 3505 | /* Read the HBA Host Status Register */ |
3505 | status = readl(phba->HSregaddr); | 3506 | if (lpfc_readl(phba->HSregaddr, &status)) { |
3507 | retval = 1; | ||
3508 | break; | ||
3509 | } | ||
3506 | } | 3510 | } |
3507 | 3511 | ||
3508 | /* Check to see if any errors occurred during init */ | 3512 | /* Check to see if any errors occurred during init */ |
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3584 | uint32_t __iomem *resp_buf; | 3588 | uint32_t __iomem *resp_buf; |
3585 | uint32_t __iomem *mbox_buf; | 3589 | uint32_t __iomem *mbox_buf; |
3586 | volatile uint32_t mbox; | 3590 | volatile uint32_t mbox; |
3587 | uint32_t hc_copy; | 3591 | uint32_t hc_copy, ha_copy, resp_data; |
3588 | int i; | 3592 | int i; |
3589 | uint8_t hdrtype; | 3593 | uint8_t hdrtype; |
3590 | 3594 | ||
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3601 | resp_buf = phba->MBslimaddr; | 3605 | resp_buf = phba->MBslimaddr; |
3602 | 3606 | ||
3603 | /* Disable the error attention */ | 3607 | /* Disable the error attention */ |
3604 | hc_copy = readl(phba->HCregaddr); | 3608 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
3609 | return; | ||
3605 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); | 3610 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); |
3606 | readl(phba->HCregaddr); /* flush */ | 3611 | readl(phba->HCregaddr); /* flush */ |
3607 | phba->link_flag |= LS_IGNORE_ERATT; | 3612 | phba->link_flag |= LS_IGNORE_ERATT; |
3608 | 3613 | ||
3609 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3614 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3615 | return; | ||
3616 | if (ha_copy & HA_ERATT) { | ||
3610 | /* Clear Chip error bit */ | 3617 | /* Clear Chip error bit */ |
3611 | writel(HA_ERATT, phba->HAregaddr); | 3618 | writel(HA_ERATT, phba->HAregaddr); |
3612 | phba->pport->stopped = 1; | 3619 | phba->pport->stopped = 1; |
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3620 | mbox_buf = phba->MBslimaddr; | 3627 | mbox_buf = phba->MBslimaddr; |
3621 | writel(mbox, mbox_buf); | 3628 | writel(mbox, mbox_buf); |
3622 | 3629 | ||
3623 | for (i = 0; | 3630 | for (i = 0; i < 50; i++) { |
3624 | readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) | 3631 | if (lpfc_readl((resp_buf + 1), &resp_data)) |
3625 | mdelay(1); | 3632 | return; |
3626 | 3633 | if (resp_data != ~(BARRIER_TEST_PATTERN)) | |
3627 | if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { | 3634 | mdelay(1); |
3635 | else | ||
3636 | break; | ||
3637 | } | ||
3638 | resp_data = 0; | ||
3639 | if (lpfc_readl((resp_buf + 1), &resp_data)) | ||
3640 | return; | ||
3641 | if (resp_data != ~(BARRIER_TEST_PATTERN)) { | ||
3628 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || | 3642 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || |
3629 | phba->pport->stopped) | 3643 | phba->pport->stopped) |
3630 | goto restore_hc; | 3644 | goto restore_hc; |
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3633 | } | 3647 | } |
3634 | 3648 | ||
3635 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; | 3649 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; |
3636 | for (i = 0; readl(resp_buf) != mbox && i < 500; i++) | 3650 | resp_data = 0; |
3637 | mdelay(1); | 3651 | for (i = 0; i < 500; i++) { |
3652 | if (lpfc_readl(resp_buf, &resp_data)) | ||
3653 | return; | ||
3654 | if (resp_data != mbox) | ||
3655 | mdelay(1); | ||
3656 | else | ||
3657 | break; | ||
3658 | } | ||
3638 | 3659 | ||
3639 | clear_errat: | 3660 | clear_errat: |
3640 | 3661 | ||
3641 | while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) | 3662 | while (++i < 500) { |
3642 | mdelay(1); | 3663 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3664 | return; | ||
3665 | if (!(ha_copy & HA_ERATT)) | ||
3666 | mdelay(1); | ||
3667 | else | ||
3668 | break; | ||
3669 | } | ||
3643 | 3670 | ||
3644 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3671 | if (readl(phba->HAregaddr) & HA_ERATT) { |
3645 | writel(HA_ERATT, phba->HAregaddr); | 3672 | writel(HA_ERATT, phba->HAregaddr); |
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3686 | 3713 | ||
3687 | /* Disable the error attention */ | 3714 | /* Disable the error attention */ |
3688 | spin_lock_irq(&phba->hbalock); | 3715 | spin_lock_irq(&phba->hbalock); |
3689 | status = readl(phba->HCregaddr); | 3716 | if (lpfc_readl(phba->HCregaddr, &status)) { |
3717 | spin_unlock_irq(&phba->hbalock); | ||
3718 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3719 | return 1; | ||
3720 | } | ||
3690 | status &= ~HC_ERINT_ENA; | 3721 | status &= ~HC_ERINT_ENA; |
3691 | writel(status, phba->HCregaddr); | 3722 | writel(status, phba->HCregaddr); |
3692 | readl(phba->HCregaddr); /* flush */ | 3723 | readl(phba->HCregaddr); /* flush */ |
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3720 | * 3 seconds we still set HBA_ERROR state because the status of the | 3751 | * 3 seconds we still set HBA_ERROR state because the status of the |
3721 | * board is now undefined. | 3752 | * board is now undefined. |
3722 | */ | 3753 | */ |
3723 | ha_copy = readl(phba->HAregaddr); | 3754 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3724 | 3755 | return 1; | |
3725 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { | 3756 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { |
3726 | mdelay(100); | 3757 | mdelay(100); |
3727 | ha_copy = readl(phba->HAregaddr); | 3758 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3759 | return 1; | ||
3728 | } | 3760 | } |
3729 | 3761 | ||
3730 | del_timer_sync(&psli->mbox_tmo); | 3762 | del_timer_sync(&psli->mbox_tmo); |
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4018 | uint32_t status, i = 0; | 4050 | uint32_t status, i = 0; |
4019 | 4051 | ||
4020 | /* Read the HBA Host Status Register */ | 4052 | /* Read the HBA Host Status Register */ |
4021 | status = readl(phba->HSregaddr); | 4053 | if (lpfc_readl(phba->HSregaddr, &status)) |
4054 | return -EIO; | ||
4022 | 4055 | ||
4023 | /* Check status register to see what current state is */ | 4056 | /* Check status register to see what current state is */ |
4024 | i = 0; | 4057 | i = 0; |
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4073 | lpfc_sli_brdrestart(phba); | 4106 | lpfc_sli_brdrestart(phba); |
4074 | } | 4107 | } |
4075 | /* Read the HBA Host Status Register */ | 4108 | /* Read the HBA Host Status Register */ |
4076 | status = readl(phba->HSregaddr); | 4109 | if (lpfc_readl(phba->HSregaddr, &status)) |
4110 | return -EIO; | ||
4077 | } | 4111 | } |
4078 | 4112 | ||
4079 | /* Check to see if any errors occurred during init */ | 4113 | /* Check to see if any errors occurred during init */ |
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5136 | MAILBOX_t *mb; | 5170 | MAILBOX_t *mb; |
5137 | struct lpfc_sli *psli = &phba->sli; | 5171 | struct lpfc_sli *psli = &phba->sli; |
5138 | uint32_t status, evtctr; | 5172 | uint32_t status, evtctr; |
5139 | uint32_t ha_copy; | 5173 | uint32_t ha_copy, hc_copy; |
5140 | int i; | 5174 | int i; |
5141 | unsigned long timeout; | 5175 | unsigned long timeout; |
5142 | unsigned long drvr_flag = 0; | 5176 | unsigned long drvr_flag = 0; |
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5202 | goto out_not_finished; | 5236 | goto out_not_finished; |
5203 | } | 5237 | } |
5204 | 5238 | ||
5205 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 5239 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { |
5206 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 5240 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || |
5207 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 5241 | !(hc_copy & HC_MBINT_ENA)) { |
5208 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 5242 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
5243 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
5209 | "(%d):2528 Mailbox command x%x cannot " | 5244 | "(%d):2528 Mailbox command x%x cannot " |
5210 | "issue Data: x%x x%x\n", | 5245 | "issue Data: x%x x%x\n", |
5211 | pmbox->vport ? pmbox->vport->vpi : 0, | 5246 | pmbox->vport ? pmbox->vport->vpi : 0, |
5212 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); | 5247 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); |
5213 | goto out_not_finished; | 5248 | goto out_not_finished; |
5249 | } | ||
5214 | } | 5250 | } |
5215 | 5251 | ||
5216 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | 5252 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5408 | word0 = le32_to_cpu(word0); | 5444 | word0 = le32_to_cpu(word0); |
5409 | } else { | 5445 | } else { |
5410 | /* First read mbox status word */ | 5446 | /* First read mbox status word */ |
5411 | word0 = readl(phba->MBslimaddr); | 5447 | if (lpfc_readl(phba->MBslimaddr, &word0)) { |
5448 | spin_unlock_irqrestore(&phba->hbalock, | ||
5449 | drvr_flag); | ||
5450 | goto out_not_finished; | ||
5451 | } | ||
5412 | } | 5452 | } |
5413 | 5453 | ||
5414 | /* Read the HBA Host Attention Register */ | 5454 | /* Read the HBA Host Attention Register */ |
5415 | ha_copy = readl(phba->HAregaddr); | 5455 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5456 | spin_unlock_irqrestore(&phba->hbalock, | ||
5457 | drvr_flag); | ||
5458 | goto out_not_finished; | ||
5459 | } | ||
5416 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, | 5460 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, |
5417 | mb->mbxCommand) * | 5461 | mb->mbxCommand) * |
5418 | 1000) + jiffies; | 5462 | 1000) + jiffies; |
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5463 | word0 = readl(phba->MBslimaddr); | 5507 | word0 = readl(phba->MBslimaddr); |
5464 | } | 5508 | } |
5465 | /* Read the HBA Host Attention Register */ | 5509 | /* Read the HBA Host Attention Register */ |
5466 | ha_copy = readl(phba->HAregaddr); | 5510 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5511 | spin_unlock_irqrestore(&phba->hbalock, | ||
5512 | drvr_flag); | ||
5513 | goto out_not_finished; | ||
5514 | } | ||
5467 | } | 5515 | } |
5468 | 5516 | ||
5469 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 5517 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6263 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 6311 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
6264 | else | 6312 | else |
6265 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 6313 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
6266 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6267 | /* swap the size field back to the cpu so we | 6314 | /* swap the size field back to the cpu so we |
6268 | * can assign it to the sgl. | 6315 | * can assign it to the sgl. |
6269 | */ | 6316 | */ |
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6283 | bf_set(lpfc_sli4_sge_offset, sgl, offset); | 6330 | bf_set(lpfc_sli4_sge_offset, sgl, offset); |
6284 | offset += bde.tus.f.bdeSize; | 6331 | offset += bde.tus.f.bdeSize; |
6285 | } | 6332 | } |
6333 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6286 | bpl++; | 6334 | bpl++; |
6287 | sgl++; | 6335 | sgl++; |
6288 | } | 6336 | } |
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6528 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | 6576 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / |
6529 | sizeof(struct ulp_bde64); | 6577 | sizeof(struct ulp_bde64); |
6530 | for (i = 0; i < numBdes; i++) { | 6578 | for (i = 0; i < numBdes; i++) { |
6531 | if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6532 | break; | ||
6533 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); | 6579 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); |
6580 | if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6581 | break; | ||
6534 | xmit_len += bde.tus.f.bdeSize; | 6582 | xmit_len += bde.tus.f.bdeSize; |
6535 | } | 6583 | } |
6536 | /* word3 iocb=IO_TAG wqe=request_payload_len */ | 6584 | /* word3 iocb=IO_TAG wqe=request_payload_len */ |
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6620 | xritag = 0; | 6668 | xritag = 0; |
6621 | break; | 6669 | break; |
6622 | case CMD_XMIT_BLS_RSP64_CX: | 6670 | case CMD_XMIT_BLS_RSP64_CX: |
6623 | /* As BLS ABTS-ACC WQE is very different from other WQEs, | 6671 | /* As BLS ABTS RSP WQE is very different from other WQEs, |
6624 | * we re-construct this WQE here based on information in | 6672 | * we re-construct this WQE here based on information in |
6625 | * iocbq from scratch. | 6673 | * iocbq from scratch. |
6626 | */ | 6674 | */ |
6627 | memset(wqe, 0, sizeof(union lpfc_wqe)); | 6675 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
6628 | /* OX_ID is invariable to who sent ABTS to CT exchange */ | 6676 | /* OX_ID is invariable to who sent ABTS to CT exchange */ |
6629 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, | 6677 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, |
6630 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); | 6678 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); |
6631 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == | 6679 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == |
6632 | LPFC_ABTS_UNSOL_INT) { | 6680 | LPFC_ABTS_UNSOL_INT) { |
6633 | /* ABTS sent by initiator to CT exchange, the | 6681 | /* ABTS sent by initiator to CT exchange, the |
6634 | * RX_ID field will be filled with the newly | 6682 | * RX_ID field will be filled with the newly |
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6642 | * RX_ID from ABTS. | 6690 | * RX_ID from ABTS. |
6643 | */ | 6691 | */ |
6644 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, | 6692 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, |
6645 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); | 6693 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); |
6646 | } | 6694 | } |
6647 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); | 6695 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); |
6648 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); | 6696 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); |
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6653 | LPFC_WQE_LENLOC_NONE); | 6701 | LPFC_WQE_LENLOC_NONE); |
6654 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ | 6702 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ |
6655 | command_type = OTHER_COMMAND; | 6703 | command_type = OTHER_COMMAND; |
6704 | if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { | ||
6705 | bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, | ||
6706 | bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); | ||
6707 | bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, | ||
6708 | bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); | ||
6709 | bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, | ||
6710 | bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); | ||
6711 | } | ||
6712 | |||
6656 | break; | 6713 | break; |
6657 | case CMD_XRI_ABORTED_CX: | 6714 | case CMD_XRI_ABORTED_CX: |
6658 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 6715 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
6701 | 6758 | ||
6702 | if (piocb->sli4_xritag == NO_XRI) { | 6759 | if (piocb->sli4_xritag == NO_XRI) { |
6703 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | 6760 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
6704 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | 6761 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || |
6762 | piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) | ||
6705 | sglq = NULL; | 6763 | sglq = NULL; |
6706 | else { | 6764 | else { |
6707 | if (pring->txq_cnt) { | 6765 | if (pring->txq_cnt) { |
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8194 | piocb->iocb_flag &= ~LPFC_IO_WAKE; | 8252 | piocb->iocb_flag &= ~LPFC_IO_WAKE; |
8195 | 8253 | ||
8196 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8254 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8197 | creg_val = readl(phba->HCregaddr); | 8255 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8256 | return IOCB_ERROR; | ||
8198 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 8257 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
8199 | writel(creg_val, phba->HCregaddr); | 8258 | writel(creg_val, phba->HCregaddr); |
8200 | readl(phba->HCregaddr); /* flush */ | 8259 | readl(phba->HCregaddr); /* flush */ |
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8236 | } | 8295 | } |
8237 | 8296 | ||
8238 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8297 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8239 | creg_val = readl(phba->HCregaddr); | 8298 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8299 | return IOCB_ERROR; | ||
8240 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 8300 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
8241 | writel(creg_val, phba->HCregaddr); | 8301 | writel(creg_val, phba->HCregaddr); |
8242 | readl(phba->HCregaddr); /* flush */ | 8302 | readl(phba->HCregaddr); /* flush */ |
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8387 | uint32_t ha_copy; | 8447 | uint32_t ha_copy; |
8388 | 8448 | ||
8389 | /* Read chip Host Attention (HA) register */ | 8449 | /* Read chip Host Attention (HA) register */ |
8390 | ha_copy = readl(phba->HAregaddr); | 8450 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8451 | goto unplug_err; | ||
8452 | |||
8391 | if (ha_copy & HA_ERATT) { | 8453 | if (ha_copy & HA_ERATT) { |
8392 | /* Read host status register to retrieve error event */ | 8454 | /* Read host status register to retrieve error event */ |
8393 | lpfc_sli_read_hs(phba); | 8455 | if (lpfc_sli_read_hs(phba)) |
8456 | goto unplug_err; | ||
8394 | 8457 | ||
8395 | /* Check if there is a deferred error condition is active */ | 8458 | /* Check if there is a deferred error condition is active */ |
8396 | if ((HS_FFER1 & phba->work_hs) && | 8459 | if ((HS_FFER1 & phba->work_hs) && |
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8409 | return 1; | 8472 | return 1; |
8410 | } | 8473 | } |
8411 | return 0; | 8474 | return 0; |
8475 | |||
8476 | unplug_err: | ||
8477 | /* Set the driver HS work bitmap */ | ||
8478 | phba->work_hs |= UNPLUG_ERR; | ||
8479 | /* Set the driver HA work bitmap */ | ||
8480 | phba->work_ha |= HA_ERATT; | ||
8481 | /* Indicate polling handles this ERATT */ | ||
8482 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8483 | return 1; | ||
8412 | } | 8484 | } |
8413 | 8485 | ||
8414 | /** | 8486 | /** |
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8436 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); | 8508 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
8437 | switch (if_type) { | 8509 | switch (if_type) { |
8438 | case LPFC_SLI_INTF_IF_TYPE_0: | 8510 | case LPFC_SLI_INTF_IF_TYPE_0: |
8439 | uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); | 8511 | if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, |
8440 | uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); | 8512 | &uerr_sta_lo) || |
8513 | lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, | ||
8514 | &uerr_sta_hi)) { | ||
8515 | phba->work_hs |= UNPLUG_ERR; | ||
8516 | phba->work_ha |= HA_ERATT; | ||
8517 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8518 | return 1; | ||
8519 | } | ||
8441 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || | 8520 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
8442 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { | 8521 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
8443 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8522 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8456 | } | 8535 | } |
8457 | break; | 8536 | break; |
8458 | case LPFC_SLI_INTF_IF_TYPE_2: | 8537 | case LPFC_SLI_INTF_IF_TYPE_2: |
8459 | portstat_reg.word0 = | 8538 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
8460 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 8539 | &portstat_reg.word0) || |
8461 | portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); | 8540 | lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
8541 | &portsmphr)){ | ||
8542 | phba->work_hs |= UNPLUG_ERR; | ||
8543 | phba->work_ha |= HA_ERATT; | ||
8544 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8545 | return 1; | ||
8546 | } | ||
8462 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { | 8547 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { |
8463 | phba->work_status[0] = | 8548 | phba->work_status[0] = |
8464 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); | 8549 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); |
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8639 | return IRQ_NONE; | 8724 | return IRQ_NONE; |
8640 | /* Need to read HA REG for slow-path events */ | 8725 | /* Need to read HA REG for slow-path events */ |
8641 | spin_lock_irqsave(&phba->hbalock, iflag); | 8726 | spin_lock_irqsave(&phba->hbalock, iflag); |
8642 | ha_copy = readl(phba->HAregaddr); | 8727 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8728 | goto unplug_error; | ||
8643 | /* If somebody is waiting to handle an eratt don't process it | 8729 | /* If somebody is waiting to handle an eratt don't process it |
8644 | * here. The brdkill function will do this. | 8730 | * here. The brdkill function will do this. |
8645 | */ | 8731 | */ |
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8665 | } | 8751 | } |
8666 | 8752 | ||
8667 | /* Clear up only attention source related to slow-path */ | 8753 | /* Clear up only attention source related to slow-path */ |
8668 | hc_copy = readl(phba->HCregaddr); | 8754 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
8755 | goto unplug_error; | ||
8756 | |||
8669 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | | 8757 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | |
8670 | HC_LAINT_ENA | HC_ERINT_ENA), | 8758 | HC_LAINT_ENA | HC_ERINT_ENA), |
8671 | phba->HCregaddr); | 8759 | phba->HCregaddr); |
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8688 | */ | 8776 | */ |
8689 | spin_lock_irqsave(&phba->hbalock, iflag); | 8777 | spin_lock_irqsave(&phba->hbalock, iflag); |
8690 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; | 8778 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; |
8691 | control = readl(phba->HCregaddr); | 8779 | if (lpfc_readl(phba->HCregaddr, &control)) |
8780 | goto unplug_error; | ||
8692 | control &= ~HC_LAINT_ENA; | 8781 | control &= ~HC_LAINT_ENA; |
8693 | writel(control, phba->HCregaddr); | 8782 | writel(control, phba->HCregaddr); |
8694 | readl(phba->HCregaddr); /* flush */ | 8783 | readl(phba->HCregaddr); /* flush */ |
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8708 | status >>= (4*LPFC_ELS_RING); | 8797 | status >>= (4*LPFC_ELS_RING); |
8709 | if (status & HA_RXMASK) { | 8798 | if (status & HA_RXMASK) { |
8710 | spin_lock_irqsave(&phba->hbalock, iflag); | 8799 | spin_lock_irqsave(&phba->hbalock, iflag); |
8711 | control = readl(phba->HCregaddr); | 8800 | if (lpfc_readl(phba->HCregaddr, &control)) |
8801 | goto unplug_error; | ||
8712 | 8802 | ||
8713 | lpfc_debugfs_slow_ring_trc(phba, | 8803 | lpfc_debugfs_slow_ring_trc(phba, |
8714 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", | 8804 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", |
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8741 | } | 8831 | } |
8742 | spin_lock_irqsave(&phba->hbalock, iflag); | 8832 | spin_lock_irqsave(&phba->hbalock, iflag); |
8743 | if (work_ha_copy & HA_ERATT) { | 8833 | if (work_ha_copy & HA_ERATT) { |
8744 | lpfc_sli_read_hs(phba); | 8834 | if (lpfc_sli_read_hs(phba)) |
8835 | goto unplug_error; | ||
8745 | /* | 8836 | /* |
8746 | * Check if there is a deferred error condition | 8837 | * Check if there is a deferred error condition |
8747 | * is active | 8838 | * is active |
@@ -8872,6 +8963,9 @@ send_current_mbox: | |||
8872 | lpfc_worker_wake_up(phba); | 8963 | lpfc_worker_wake_up(phba); |
8873 | } | 8964 | } |
8874 | return IRQ_HANDLED; | 8965 | return IRQ_HANDLED; |
8966 | unplug_error: | ||
8967 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
8968 | return IRQ_HANDLED; | ||
8875 | 8969 | ||
8876 | } /* lpfc_sli_sp_intr_handler */ | 8970 | } /* lpfc_sli_sp_intr_handler */ |
8877 | 8971 | ||
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) | |||
8919 | if (lpfc_intr_state_check(phba)) | 9013 | if (lpfc_intr_state_check(phba)) |
8920 | return IRQ_NONE; | 9014 | return IRQ_NONE; |
8921 | /* Need to read HA REG for FCP ring and other ring events */ | 9015 | /* Need to read HA REG for FCP ring and other ring events */ |
8922 | ha_copy = readl(phba->HAregaddr); | 9016 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
9017 | return IRQ_HANDLED; | ||
8923 | /* Clear up only attention source related to fast-path */ | 9018 | /* Clear up only attention source related to fast-path */ |
8924 | spin_lock_irqsave(&phba->hbalock, iflag); | 9019 | spin_lock_irqsave(&phba->hbalock, iflag); |
8925 | /* | 9020 | /* |
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9004 | return IRQ_NONE; | 9099 | return IRQ_NONE; |
9005 | 9100 | ||
9006 | spin_lock(&phba->hbalock); | 9101 | spin_lock(&phba->hbalock); |
9007 | phba->ha_copy = readl(phba->HAregaddr); | 9102 | if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { |
9103 | spin_unlock(&phba->hbalock); | ||
9104 | return IRQ_HANDLED; | ||
9105 | } | ||
9106 | |||
9008 | if (unlikely(!phba->ha_copy)) { | 9107 | if (unlikely(!phba->ha_copy)) { |
9009 | spin_unlock(&phba->hbalock); | 9108 | spin_unlock(&phba->hbalock); |
9010 | return IRQ_NONE; | 9109 | return IRQ_NONE; |
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9026 | } | 9125 | } |
9027 | 9126 | ||
9028 | /* Clear attention sources except link and error attentions */ | 9127 | /* Clear attention sources except link and error attentions */ |
9029 | hc_copy = readl(phba->HCregaddr); | 9128 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) { |
9129 | spin_unlock(&phba->hbalock); | ||
9130 | return IRQ_HANDLED; | ||
9131 | } | ||
9030 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | 9132 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA |
9031 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), | 9133 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), |
9032 | phba->HCregaddr); | 9134 | phba->HCregaddr); |
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10403 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10505 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10404 | hw_page_size = SLI4_PAGE_SIZE; | 10506 | hw_page_size = SLI4_PAGE_SIZE; |
10405 | 10507 | ||
10406 | |||
10407 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 10508 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10408 | if (!mbox) | 10509 | if (!mbox) |
10409 | return -ENOMEM; | 10510 | return -ENOMEM; |
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10413 | LPFC_MBOX_OPCODE_CQ_CREATE, | 10514 | LPFC_MBOX_OPCODE_CQ_CREATE, |
10414 | length, LPFC_SLI4_MBX_EMBED); | 10515 | length, LPFC_SLI4_MBX_EMBED); |
10415 | cq_create = &mbox->u.mqe.un.cq_create; | 10516 | cq_create = &mbox->u.mqe.un.cq_create; |
10517 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10416 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, | 10518 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, |
10417 | cq->page_count); | 10519 | cq->page_count); |
10418 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); | 10520 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); |
10419 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); | 10521 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); |
10420 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); | 10522 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10523 | phba->sli4_hba.pc_sli4_params.cqv); | ||
10524 | if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { | ||
10525 | bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, | ||
10526 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10527 | bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, | ||
10528 | eq->queue_id); | ||
10529 | } else { | ||
10530 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, | ||
10531 | eq->queue_id); | ||
10532 | } | ||
10421 | switch (cq->entry_count) { | 10533 | switch (cq->entry_count) { |
10422 | default: | 10534 | default: |
10423 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10535 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10449 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10561 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10450 | 10562 | ||
10451 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10563 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10452 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10453 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10564 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10454 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10565 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10455 | if (shdr_status || shdr_add_status || rc) { | 10566 | if (shdr_status || shdr_add_status || rc) { |
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10515 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); | 10626 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); |
10516 | switch (mq->entry_count) { | 10627 | switch (mq->entry_count) { |
10517 | case 16: | 10628 | case 16: |
10518 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10629 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10519 | LPFC_MQ_CNT_16); | 10630 | LPFC_MQ_RING_SIZE_16); |
10520 | break; | 10631 | break; |
10521 | case 32: | 10632 | case 32: |
10522 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10633 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10523 | LPFC_MQ_CNT_32); | 10634 | LPFC_MQ_RING_SIZE_32); |
10524 | break; | 10635 | break; |
10525 | case 64: | 10636 | case 64: |
10526 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10637 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10527 | LPFC_MQ_CNT_64); | 10638 | LPFC_MQ_RING_SIZE_64); |
10528 | break; | 10639 | break; |
10529 | case 128: | 10640 | case 128: |
10530 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10641 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10531 | LPFC_MQ_CNT_128); | 10642 | LPFC_MQ_RING_SIZE_128); |
10532 | break; | 10643 | break; |
10533 | } | 10644 | } |
10534 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10645 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10586 | length, LPFC_SLI4_MBX_EMBED); | 10697 | length, LPFC_SLI4_MBX_EMBED); |
10587 | 10698 | ||
10588 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; | 10699 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; |
10700 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10589 | bf_set(lpfc_mbx_mq_create_ext_num_pages, | 10701 | bf_set(lpfc_mbx_mq_create_ext_num_pages, |
10590 | &mq_create_ext->u.request, mq->page_count); | 10702 | &mq_create_ext->u.request, mq->page_count); |
10591 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, | 10703 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, |
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10598 | &mq_create_ext->u.request, 1); | 10710 | &mq_create_ext->u.request, 1); |
10599 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, | 10711 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, |
10600 | &mq_create_ext->u.request, 1); | 10712 | &mq_create_ext->u.request, 1); |
10601 | bf_set(lpfc_mq_context_cq_id, | ||
10602 | &mq_create_ext->u.request.context, cq->queue_id); | ||
10603 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); | 10713 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); |
10714 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10715 | phba->sli4_hba.pc_sli4_params.mqv); | ||
10716 | if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) | ||
10717 | bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, | ||
10718 | cq->queue_id); | ||
10719 | else | ||
10720 | bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, | ||
10721 | cq->queue_id); | ||
10604 | switch (mq->entry_count) { | 10722 | switch (mq->entry_count) { |
10605 | default: | 10723 | default: |
10606 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10724 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10610 | return -EINVAL; | 10728 | return -EINVAL; |
10611 | /* otherwise default to smallest count (drop through) */ | 10729 | /* otherwise default to smallest count (drop through) */ |
10612 | case 16: | 10730 | case 16: |
10613 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10731 | bf_set(lpfc_mq_context_ring_size, |
10614 | LPFC_MQ_CNT_16); | 10732 | &mq_create_ext->u.request.context, |
10733 | LPFC_MQ_RING_SIZE_16); | ||
10615 | break; | 10734 | break; |
10616 | case 32: | 10735 | case 32: |
10617 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10736 | bf_set(lpfc_mq_context_ring_size, |
10618 | LPFC_MQ_CNT_32); | 10737 | &mq_create_ext->u.request.context, |
10738 | LPFC_MQ_RING_SIZE_32); | ||
10619 | break; | 10739 | break; |
10620 | case 64: | 10740 | case 64: |
10621 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10741 | bf_set(lpfc_mq_context_ring_size, |
10622 | LPFC_MQ_CNT_64); | 10742 | &mq_create_ext->u.request.context, |
10743 | LPFC_MQ_RING_SIZE_64); | ||
10623 | break; | 10744 | break; |
10624 | case 128: | 10745 | case 128: |
10625 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10746 | bf_set(lpfc_mq_context_ring_size, |
10626 | LPFC_MQ_CNT_128); | 10747 | &mq_create_ext->u.request.context, |
10748 | LPFC_MQ_RING_SIZE_128); | ||
10627 | break; | 10749 | break; |
10628 | } | 10750 | } |
10629 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10751 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10634 | putPaddrHigh(dmabuf->phys); | 10756 | putPaddrHigh(dmabuf->phys); |
10635 | } | 10757 | } |
10636 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10758 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10637 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10638 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, | 10759 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, |
10639 | &mq_create_ext->u.response); | 10760 | &mq_create_ext->u.response); |
10640 | if (rc != MBX_SUCCESS) { | 10761 | if (rc != MBX_SUCCESS) { |
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10711 | uint32_t shdr_status, shdr_add_status; | 10832 | uint32_t shdr_status, shdr_add_status; |
10712 | union lpfc_sli4_cfg_shdr *shdr; | 10833 | union lpfc_sli4_cfg_shdr *shdr; |
10713 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; | 10834 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
10835 | struct dma_address *page; | ||
10714 | 10836 | ||
10715 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10837 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10716 | hw_page_size = SLI4_PAGE_SIZE; | 10838 | hw_page_size = SLI4_PAGE_SIZE; |
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10724 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, | 10846 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, |
10725 | length, LPFC_SLI4_MBX_EMBED); | 10847 | length, LPFC_SLI4_MBX_EMBED); |
10726 | wq_create = &mbox->u.mqe.un.wq_create; | 10848 | wq_create = &mbox->u.mqe.un.wq_create; |
10849 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10727 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, | 10850 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, |
10728 | wq->page_count); | 10851 | wq->page_count); |
10729 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, | 10852 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, |
10730 | cq->queue_id); | 10853 | cq->queue_id); |
10854 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10855 | phba->sli4_hba.pc_sli4_params.wqv); | ||
10856 | if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { | ||
10857 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, | ||
10858 | wq->entry_count); | ||
10859 | switch (wq->entry_size) { | ||
10860 | default: | ||
10861 | case 64: | ||
10862 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10863 | &wq_create->u.request_1, | ||
10864 | LPFC_WQ_WQE_SIZE_64); | ||
10865 | break; | ||
10866 | case 128: | ||
10867 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10868 | &wq_create->u.request_1, | ||
10869 | LPFC_WQ_WQE_SIZE_128); | ||
10870 | break; | ||
10871 | } | ||
10872 | bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, | ||
10873 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10874 | page = wq_create->u.request_1.page; | ||
10875 | } else { | ||
10876 | page = wq_create->u.request.page; | ||
10877 | } | ||
10731 | list_for_each_entry(dmabuf, &wq->page_list, list) { | 10878 | list_for_each_entry(dmabuf, &wq->page_list, list) { |
10732 | memset(dmabuf->virt, 0, hw_page_size); | 10879 | memset(dmabuf->virt, 0, hw_page_size); |
10733 | wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 10880 | page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); |
10734 | putPaddrLow(dmabuf->phys); | 10881 | page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); |
10735 | wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
10736 | putPaddrHigh(dmabuf->phys); | ||
10737 | } | 10882 | } |
10738 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10883 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10739 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10884 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10740 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10741 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10885 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10742 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10886 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10743 | if (shdr_status || shdr_add_status || rc) { | 10887 | if (shdr_status || shdr_add_status || rc) { |
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10815 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 10959 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10816 | length, LPFC_SLI4_MBX_EMBED); | 10960 | length, LPFC_SLI4_MBX_EMBED); |
10817 | rq_create = &mbox->u.mqe.un.rq_create; | 10961 | rq_create = &mbox->u.mqe.un.rq_create; |
10818 | switch (hrq->entry_count) { | 10962 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; |
10819 | default: | 10963 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10820 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10964 | phba->sli4_hba.pc_sli4_params.rqv); |
10821 | "2535 Unsupported RQ count. (%d)\n", | 10965 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10822 | hrq->entry_count); | 10966 | bf_set(lpfc_rq_context_rqe_count_1, |
10823 | if (hrq->entry_count < 512) | 10967 | &rq_create->u.request.context, |
10824 | return -EINVAL; | 10968 | hrq->entry_count); |
10825 | /* otherwise default to smallest count (drop through) */ | 10969 | rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; |
10826 | case 512: | 10970 | } else { |
10827 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10971 | switch (hrq->entry_count) { |
10828 | LPFC_RQ_RING_SIZE_512); | 10972 | default: |
10829 | break; | 10973 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10830 | case 1024: | 10974 | "2535 Unsupported RQ count. (%d)\n", |
10831 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10975 | hrq->entry_count); |
10832 | LPFC_RQ_RING_SIZE_1024); | 10976 | if (hrq->entry_count < 512) |
10833 | break; | 10977 | return -EINVAL; |
10834 | case 2048: | 10978 | /* otherwise default to smallest count (drop through) */ |
10835 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10979 | case 512: |
10836 | LPFC_RQ_RING_SIZE_2048); | 10980 | bf_set(lpfc_rq_context_rqe_count, |
10837 | break; | 10981 | &rq_create->u.request.context, |
10838 | case 4096: | 10982 | LPFC_RQ_RING_SIZE_512); |
10839 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10983 | break; |
10840 | LPFC_RQ_RING_SIZE_4096); | 10984 | case 1024: |
10841 | break; | 10985 | bf_set(lpfc_rq_context_rqe_count, |
10986 | &rq_create->u.request.context, | ||
10987 | LPFC_RQ_RING_SIZE_1024); | ||
10988 | break; | ||
10989 | case 2048: | ||
10990 | bf_set(lpfc_rq_context_rqe_count, | ||
10991 | &rq_create->u.request.context, | ||
10992 | LPFC_RQ_RING_SIZE_2048); | ||
10993 | break; | ||
10994 | case 4096: | ||
10995 | bf_set(lpfc_rq_context_rqe_count, | ||
10996 | &rq_create->u.request.context, | ||
10997 | LPFC_RQ_RING_SIZE_4096); | ||
10998 | break; | ||
10999 | } | ||
11000 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11001 | LPFC_HDR_BUF_SIZE); | ||
10842 | } | 11002 | } |
10843 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11003 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10844 | cq->queue_id); | 11004 | cq->queue_id); |
10845 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11005 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10846 | hrq->page_count); | 11006 | hrq->page_count); |
10847 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10848 | LPFC_HDR_BUF_SIZE); | ||
10849 | list_for_each_entry(dmabuf, &hrq->page_list, list) { | 11007 | list_for_each_entry(dmabuf, &hrq->page_list, list) { |
10850 | memset(dmabuf->virt, 0, hw_page_size); | 11008 | memset(dmabuf->virt, 0, hw_page_size); |
10851 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11009 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10855 | } | 11013 | } |
10856 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 11014 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10857 | /* The IOCTL status is embedded in the mailbox subheader. */ | 11015 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10858 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; | ||
10859 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 11016 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10860 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 11017 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10861 | if (shdr_status || shdr_add_status || rc) { | 11018 | if (shdr_status || shdr_add_status || rc) { |
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10881 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | 11038 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
10882 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 11039 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10883 | length, LPFC_SLI4_MBX_EMBED); | 11040 | length, LPFC_SLI4_MBX_EMBED); |
10884 | switch (drq->entry_count) { | 11041 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10885 | default: | 11042 | phba->sli4_hba.pc_sli4_params.rqv); |
10886 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 11043 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10887 | "2536 Unsupported RQ count. (%d)\n", | 11044 | bf_set(lpfc_rq_context_rqe_count_1, |
10888 | drq->entry_count); | 11045 | &rq_create->u.request.context, |
10889 | if (drq->entry_count < 512) | 11046 | hrq->entry_count); |
10890 | return -EINVAL; | 11047 | rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; |
10891 | /* otherwise default to smallest count (drop through) */ | 11048 | } else { |
10892 | case 512: | 11049 | switch (drq->entry_count) { |
10893 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11050 | default: |
10894 | LPFC_RQ_RING_SIZE_512); | 11051 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10895 | break; | 11052 | "2536 Unsupported RQ count. (%d)\n", |
10896 | case 1024: | 11053 | drq->entry_count); |
10897 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11054 | if (drq->entry_count < 512) |
10898 | LPFC_RQ_RING_SIZE_1024); | 11055 | return -EINVAL; |
10899 | break; | 11056 | /* otherwise default to smallest count (drop through) */ |
10900 | case 2048: | 11057 | case 512: |
10901 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11058 | bf_set(lpfc_rq_context_rqe_count, |
10902 | LPFC_RQ_RING_SIZE_2048); | 11059 | &rq_create->u.request.context, |
10903 | break; | 11060 | LPFC_RQ_RING_SIZE_512); |
10904 | case 4096: | 11061 | break; |
10905 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11062 | case 1024: |
10906 | LPFC_RQ_RING_SIZE_4096); | 11063 | bf_set(lpfc_rq_context_rqe_count, |
10907 | break; | 11064 | &rq_create->u.request.context, |
11065 | LPFC_RQ_RING_SIZE_1024); | ||
11066 | break; | ||
11067 | case 2048: | ||
11068 | bf_set(lpfc_rq_context_rqe_count, | ||
11069 | &rq_create->u.request.context, | ||
11070 | LPFC_RQ_RING_SIZE_2048); | ||
11071 | break; | ||
11072 | case 4096: | ||
11073 | bf_set(lpfc_rq_context_rqe_count, | ||
11074 | &rq_create->u.request.context, | ||
11075 | LPFC_RQ_RING_SIZE_4096); | ||
11076 | break; | ||
11077 | } | ||
11078 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11079 | LPFC_DATA_BUF_SIZE); | ||
10908 | } | 11080 | } |
10909 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11081 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10910 | cq->queue_id); | 11082 | cq->queue_id); |
10911 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11083 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10912 | drq->page_count); | 11084 | drq->page_count); |
10913 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10914 | LPFC_DATA_BUF_SIZE); | ||
10915 | list_for_each_entry(dmabuf, &drq->page_list, list) { | 11085 | list_for_each_entry(dmabuf, &drq->page_list, list) { |
10916 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11086 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
10917 | putPaddrLow(dmabuf->phys); | 11087 | putPaddrLow(dmabuf->phys); |
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11580 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; | 11750 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; |
11581 | char *type_names[] = FC_TYPE_NAMES_INIT; | 11751 | char *type_names[] = FC_TYPE_NAMES_INIT; |
11582 | struct fc_vft_header *fc_vft_hdr; | 11752 | struct fc_vft_header *fc_vft_hdr; |
11753 | uint32_t *header = (uint32_t *) fc_hdr; | ||
11583 | 11754 | ||
11584 | switch (fc_hdr->fh_r_ctl) { | 11755 | switch (fc_hdr->fh_r_ctl) { |
11585 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ | 11756 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ |
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11628 | default: | 11799 | default: |
11629 | goto drop; | 11800 | goto drop; |
11630 | } | 11801 | } |
11802 | |||
11631 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 11803 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
11632 | "2538 Received frame rctl:%s type:%s\n", | 11804 | "2538 Received frame rctl:%s type:%s " |
11805 | "Frame Data:%08x %08x %08x %08x %08x %08x\n", | ||
11633 | rctl_names[fc_hdr->fh_r_ctl], | 11806 | rctl_names[fc_hdr->fh_r_ctl], |
11634 | type_names[fc_hdr->fh_type]); | 11807 | type_names[fc_hdr->fh_type], |
11808 | be32_to_cpu(header[0]), be32_to_cpu(header[1]), | ||
11809 | be32_to_cpu(header[2]), be32_to_cpu(header[3]), | ||
11810 | be32_to_cpu(header[4]), be32_to_cpu(header[5])); | ||
11635 | return 0; | 11811 | return 0; |
11636 | drop: | 11812 | drop: |
11637 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | 11813 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, |
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, | |||
11928 | } | 12104 | } |
11929 | 12105 | ||
11930 | /** | 12106 | /** |
11931 | * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler | 12107 | * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler |
11932 | * @phba: Pointer to HBA context object. | 12108 | * @phba: Pointer to HBA context object. |
11933 | * @cmd_iocbq: pointer to the command iocbq structure. | 12109 | * @cmd_iocbq: pointer to the command iocbq structure. |
11934 | * @rsp_iocbq: pointer to the response iocbq structure. | 12110 | * @rsp_iocbq: pointer to the response iocbq structure. |
11935 | * | 12111 | * |
11936 | * This function handles the sequence abort accept iocb command complete | 12112 | * This function handles the sequence abort response iocb command complete |
11937 | * event. It properly releases the memory allocated to the sequence abort | 12113 | * event. It properly releases the memory allocated to the sequence abort |
11938 | * accept iocb. | 12114 | * accept iocb. |
11939 | **/ | 12115 | **/ |
11940 | static void | 12116 | static void |
11941 | lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | 12117 | lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, |
11942 | struct lpfc_iocbq *cmd_iocbq, | 12118 | struct lpfc_iocbq *cmd_iocbq, |
11943 | struct lpfc_iocbq *rsp_iocbq) | 12119 | struct lpfc_iocbq *rsp_iocbq) |
11944 | { | 12120 | { |
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | |||
11947 | } | 12123 | } |
11948 | 12124 | ||
11949 | /** | 12125 | /** |
11950 | * lpfc_sli4_seq_abort_acc - Accept sequence abort | 12126 | * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort |
11951 | * @phba: Pointer to HBA context object. | 12127 | * @phba: Pointer to HBA context object. |
11952 | * @fc_hdr: pointer to a FC frame header. | 12128 | * @fc_hdr: pointer to a FC frame header. |
11953 | * | 12129 | * |
11954 | * This function sends a basic accept to a previous unsol sequence abort | 12130 | * This function sends a basic response to a previous unsol sequence abort |
11955 | * event after aborting the sequence handling. | 12131 | * event after aborting the sequence handling. |
11956 | **/ | 12132 | **/ |
11957 | static void | 12133 | static void |
11958 | lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | 12134 | lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, |
11959 | struct fc_frame_header *fc_hdr) | 12135 | struct fc_frame_header *fc_hdr) |
11960 | { | 12136 | { |
11961 | struct lpfc_iocbq *ctiocb = NULL; | 12137 | struct lpfc_iocbq *ctiocb = NULL; |
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11963 | uint16_t oxid, rxid; | 12139 | uint16_t oxid, rxid; |
11964 | uint32_t sid, fctl; | 12140 | uint32_t sid, fctl; |
11965 | IOCB_t *icmd; | 12141 | IOCB_t *icmd; |
12142 | int rc; | ||
11966 | 12143 | ||
11967 | if (!lpfc_is_link_up(phba)) | 12144 | if (!lpfc_is_link_up(phba)) |
11968 | return; | 12145 | return; |
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11983 | + phba->sli4_hba.max_cfg_param.xri_base)) | 12160 | + phba->sli4_hba.max_cfg_param.xri_base)) |
11984 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); | 12161 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); |
11985 | 12162 | ||
11986 | /* Allocate buffer for acc iocb */ | 12163 | /* Allocate buffer for rsp iocb */ |
11987 | ctiocb = lpfc_sli_get_iocbq(phba); | 12164 | ctiocb = lpfc_sli_get_iocbq(phba); |
11988 | if (!ctiocb) | 12165 | if (!ctiocb) |
11989 | return; | 12166 | return; |
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
12008 | 12185 | ||
12009 | ctiocb->iocb_cmpl = NULL; | 12186 | ctiocb->iocb_cmpl = NULL; |
12010 | ctiocb->vport = phba->pport; | 12187 | ctiocb->vport = phba->pport; |
12011 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; | 12188 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; |
12189 | ctiocb->sli4_xritag = NO_XRI; | ||
12190 | |||
12191 | /* If the oxid maps to the FCP XRI range or if it is out of range, | ||
12192 | * send a BLS_RJT. The driver no longer has that exchange. | ||
12193 | * Override the IOCB for a BA_RJT. | ||
12194 | */ | ||
12195 | if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + | ||
12196 | phba->sli4_hba.max_cfg_param.xri_base) || | ||
12197 | oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + | ||
12198 | phba->sli4_hba.max_cfg_param.xri_base)) { | ||
12199 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; | ||
12200 | bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); | ||
12201 | bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); | ||
12202 | bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); | ||
12203 | } | ||
12012 | 12204 | ||
12013 | if (fctl & FC_FC_EX_CTX) { | 12205 | if (fctl & FC_FC_EX_CTX) { |
12014 | /* ABTS sent by responder to CT exchange, construction | 12206 | /* ABTS sent by responder to CT exchange, construction |
12015 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG | 12207 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG |
12016 | * field and RX_ID from ABTS for RX_ID field. | 12208 | * field and RX_ID from ABTS for RX_ID field. |
12017 | */ | 12209 | */ |
12018 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); | 12210 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); |
12019 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); | 12211 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); |
12020 | ctiocb->sli4_xritag = oxid; | ||
12021 | } else { | 12212 | } else { |
12022 | /* ABTS sent by initiator to CT exchange, construction | 12213 | /* ABTS sent by initiator to CT exchange, construction |
12023 | * of BA_ACC will need to allocate a new XRI as for the | 12214 | * of BA_ACC will need to allocate a new XRI as for the |
12024 | * XRI_TAG and RX_ID fields. | 12215 | * XRI_TAG and RX_ID fields. |
12025 | */ | 12216 | */ |
12026 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); | 12217 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); |
12027 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); | 12218 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); |
12028 | ctiocb->sli4_xritag = NO_XRI; | ||
12029 | } | 12219 | } |
12030 | bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); | 12220 | bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); |
12031 | 12221 | ||
12032 | /* Xmit CT abts accept on exchange <xid> */ | 12222 | /* Xmit CT abts response on exchange <xid> */ |
12033 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 12223 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
12034 | "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", | 12224 | "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", |
12035 | CMD_XMIT_BLS_RSP64_CX, phba->link_state); | 12225 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); |
12036 | lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | 12226 | |
12227 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
12228 | if (rc == IOCB_ERROR) { | ||
12229 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, | ||
12230 | "2925 Failed to issue CT ABTS RSP x%x on " | ||
12231 | "xri x%x, Data x%x\n", | ||
12232 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, | ||
12233 | phba->link_state); | ||
12234 | lpfc_sli_release_iocbq(phba, ctiocb); | ||
12235 | } | ||
12037 | } | 12236 | } |
12038 | 12237 | ||
12039 | /** | 12238 | /** |
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, | |||
12081 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 12280 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
12082 | } | 12281 | } |
12083 | /* Send basic accept (BA_ACC) to the abort requester */ | 12282 | /* Send basic accept (BA_ACC) to the abort requester */ |
12084 | lpfc_sli4_seq_abort_acc(phba, &fc_hdr); | 12283 | lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); |
12085 | } | 12284 | } |
12086 | 12285 | ||
12087 | /** | 12286 | /** |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 595056b8960..1a3cbf88f2c 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0a4d376dbca..2404d1d6556 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.21" | 21 | #define LPFC_DRIVER_VERSION "8.3.22" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index e8a6f1cf1e4..5e001ffd4c1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -1748,6 +1748,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) | |||
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /** | 1750 | /** |
1751 | * _base_display_hp_branding - Display branding string | ||
1752 | * @ioc: per adapter object | ||
1753 | * | ||
1754 | * Return nothing. | ||
1755 | */ | ||
1756 | static void | ||
1757 | _base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc) | ||
1758 | { | ||
1759 | if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) | ||
1760 | return; | ||
1761 | |||
1762 | switch (ioc->pdev->device) { | ||
1763 | case MPI2_MFGPAGE_DEVID_SAS2004: | ||
1764 | switch (ioc->pdev->subsystem_device) { | ||
1765 | case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: | ||
1766 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1767 | MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); | ||
1768 | break; | ||
1769 | default: | ||
1770 | break; | ||
1771 | } | ||
1772 | case MPI2_MFGPAGE_DEVID_SAS2308_2: | ||
1773 | switch (ioc->pdev->subsystem_device) { | ||
1774 | case MPT2SAS_HP_2_4_INTERNAL_SSDID: | ||
1775 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1776 | MPT2SAS_HP_2_4_INTERNAL_BRANDING); | ||
1777 | break; | ||
1778 | case MPT2SAS_HP_2_4_EXTERNAL_SSDID: | ||
1779 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1780 | MPT2SAS_HP_2_4_EXTERNAL_BRANDING); | ||
1781 | break; | ||
1782 | case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: | ||
1783 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1784 | MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); | ||
1785 | break; | ||
1786 | case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: | ||
1787 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1788 | MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); | ||
1789 | break; | ||
1790 | default: | ||
1791 | break; | ||
1792 | } | ||
1793 | default: | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | /** | ||
1751 | * _base_display_ioc_capabilities - Disply IOC's capabilities. | 1799 | * _base_display_ioc_capabilities - Disply IOC's capabilities. |
1752 | * @ioc: per adapter object | 1800 | * @ioc: per adapter object |
1753 | * | 1801 | * |
@@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
1778 | 1826 | ||
1779 | _base_display_dell_branding(ioc); | 1827 | _base_display_dell_branding(ioc); |
1780 | _base_display_intel_branding(ioc); | 1828 | _base_display_intel_branding(ioc); |
1829 | _base_display_hp_branding(ioc); | ||
1781 | 1830 | ||
1782 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); | 1831 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); |
1783 | 1832 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index a3f8aa9baea..500328245f6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -168,6 +168,26 @@ | |||
168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E | 168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E |
169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F | 169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F |
170 | 170 | ||
171 | |||
172 | /* | ||
173 | * HP HBA branding | ||
174 | */ | ||
175 | #define MPT2SAS_HP_3PAR_SSVID 0x1590 | ||
176 | #define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter" | ||
177 | #define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter" | ||
178 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter" | ||
179 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter" | ||
180 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter" | ||
181 | |||
182 | /* | ||
183 | * HO HBA SSDIDs | ||
184 | */ | ||
185 | #define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 | ||
186 | #define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 | ||
187 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 | ||
188 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 | ||
189 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 | ||
190 | |||
171 | /* | 191 | /* |
172 | * per target private data | 192 | * per target private data |
173 | */ | 193 | */ |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 19ad34f381a..938d045e418 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { | |||
663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, | 663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, |
664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, | 664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, |
665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, | 665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, |
666 | { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, | ||
667 | { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, | ||
668 | { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, | ||
669 | { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, | ||
670 | { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, | ||
671 | { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, | ||
672 | { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, | ||
666 | 673 | ||
667 | { } /* terminate list */ | 674 | { } /* terminate list */ |
668 | }; | 675 | }; |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 2fc0045b1a5..c1f8d1b150f 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 | 53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define ISP4XXX_PCI_FN_1 0x1 | ||
57 | #define ISP4XXX_PCI_FN_2 0x3 | ||
58 | |||
56 | #define QLA_SUCCESS 0 | 59 | #define QLA_SUCCESS 0 |
57 | #define QLA_ERROR 1 | 60 | #define QLA_ERROR 1 |
58 | 61 | ||
@@ -233,9 +236,6 @@ struct ddb_entry { | |||
233 | 236 | ||
234 | unsigned long flags; /* DDB Flags */ | 237 | unsigned long flags; /* DDB Flags */ |
235 | 238 | ||
236 | unsigned long dev_scan_wait_to_start_relogin; | ||
237 | unsigned long dev_scan_wait_to_complete_relogin; | ||
238 | |||
239 | uint16_t fw_ddb_index; /* DDB firmware index */ | 239 | uint16_t fw_ddb_index; /* DDB firmware index */ |
240 | uint16_t options; | 240 | uint16_t options; |
241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ | 241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ |
@@ -289,8 +289,6 @@ struct ddb_entry { | |||
289 | * DDB flags. | 289 | * DDB flags. |
290 | */ | 290 | */ |
291 | #define DF_RELOGIN 0 /* Relogin to device */ | 291 | #define DF_RELOGIN 0 /* Relogin to device */ |
292 | #define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL | ||
293 | * logged it out */ | ||
294 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ | 292 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ |
295 | #define DF_FO_MASKED 3 | 293 | #define DF_FO_MASKED 3 |
296 | 294 | ||
@@ -376,7 +374,7 @@ struct scsi_qla_host { | |||
376 | #define AF_LINK_UP 8 /* 0x00000100 */ | 374 | #define AF_LINK_UP 8 /* 0x00000100 */ |
377 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ | 375 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ |
378 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ | 376 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ |
379 | #define AF_HBA_GOING_AWAY 12 /* 0x00001000 */ | 377 | #define AF_HA_REMOVAL 12 /* 0x00001000 */ |
380 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ | 378 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ |
381 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ | 379 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ |
382 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ | 380 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ |
@@ -479,7 +477,6 @@ struct scsi_qla_host { | |||
479 | uint32_t timer_active; | 477 | uint32_t timer_active; |
480 | 478 | ||
481 | /* Recovery Timers */ | 479 | /* Recovery Timers */ |
482 | uint32_t discovery_wait; | ||
483 | atomic_t check_relogin_timeouts; | 480 | atomic_t check_relogin_timeouts; |
484 | uint32_t retry_reset_ha_cnt; | 481 | uint32_t retry_reset_ha_cnt; |
485 | uint32_t isp_reset_timer; /* reset test timer */ | 482 | uint32_t isp_reset_timer; /* reset test timer */ |
@@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) | |||
765 | /* Defines for process_aen() */ | 762 | /* Defines for process_aen() */ |
766 | #define PROCESS_ALL_AENS 0 | 763 | #define PROCESS_ALL_AENS 0 |
767 | #define FLUSH_DDB_CHANGED_AENS 1 | 764 | #define FLUSH_DDB_CHANGED_AENS 1 |
768 | #define RELOGIN_DDB_CHANGED_AENS 2 | ||
769 | 765 | ||
770 | #endif /*_QLA4XXX_H */ | 766 | #endif /*_QLA4XXX_H */ |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c1985792f03..31e2bf97198 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -455,6 +455,7 @@ struct addr_ctrl_blk { | |||
455 | uint8_t res0; /* 07 */ | 455 | uint8_t res0; /* 07 */ |
456 | uint16_t eth_mtu_size; /* 08-09 */ | 456 | uint16_t eth_mtu_size; /* 08-09 */ |
457 | uint16_t add_fw_options; /* 0A-0B */ | 457 | uint16_t add_fw_options; /* 0A-0B */ |
458 | #define SERIALIZE_TASK_MGMT 0x0400 | ||
458 | 459 | ||
459 | uint8_t hb_interval; /* 0C */ | 460 | uint8_t hb_interval; /* 0C */ |
460 | uint8_t inst_num; /* 0D */ | 461 | uint8_t inst_num; /* 0D */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 8fad99b7eef..cc53e3fbd78 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); | |||
136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); | 136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); |
137 | 137 | ||
138 | extern int ql4xextended_error_logging; | 138 | extern int ql4xextended_error_logging; |
139 | extern int ql4xdiscoverywait; | ||
140 | extern int ql4xdontresethba; | 139 | extern int ql4xdontresethba; |
141 | extern int ql4xenablemsix; | 140 | extern int ql4xenablemsix; |
142 | 141 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1629c48c35e..bbb2e903d38 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) | |||
723 | return relogin; | 723 | return relogin; |
724 | } | 724 | } |
725 | 725 | ||
726 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
727 | { | ||
728 | unsigned long wtime; | ||
729 | |||
730 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
731 | * Auto connect. We are basically doing get_firmware_ddb() | ||
732 | * to determine whether we need to log back in or not. | ||
733 | * Trying to do a set ddb before we have processed 0x8014 | ||
734 | * will result in another set_ddb() for the same ddb. In other | ||
735 | * words there will be stale entries in the aen_q. | ||
736 | */ | ||
737 | wtime = jiffies + (2 * HZ); | ||
738 | do { | ||
739 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
740 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
741 | return; | ||
742 | |||
743 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
744 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
745 | |||
746 | msleep(1000); | ||
747 | } while (!time_after_eq(jiffies, wtime)); | ||
748 | } | ||
749 | |||
726 | /** | 750 | /** |
727 | * qla4xxx_configure_ddbs - builds driver ddb list | 751 | * qla4xxx_build_ddb_list - builds driver ddb list |
728 | * @ha: Pointer to host adapter structure. | 752 | * @ha: Pointer to host adapter structure. |
729 | * | 753 | * |
730 | * This routine searches for all valid firmware ddb entries and builds | 754 | * This routine searches for all valid firmware ddb entries and builds |
731 | * an internal ddb list. Ddbs that are considered valid are those with | 755 | * an internal ddb list. Ddbs that are considered valid are those with |
732 | * a device state of SESSION_ACTIVE. | 756 | * a device state of SESSION_ACTIVE. |
757 | * A relogin (set_ddb) is issued for DDBs that are not online. | ||
733 | **/ | 758 | **/ |
734 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | 759 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) |
735 | { | 760 | { |
@@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | |||
744 | uint32_t ipv6_device; | 769 | uint32_t ipv6_device; |
745 | uint32_t new_tgt; | 770 | uint32_t new_tgt; |
746 | 771 | ||
772 | qla4xxx_flush_AENS(ha); | ||
773 | |||
747 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), | 774 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), |
748 | &fw_ddb_entry_dma, GFP_KERNEL); | 775 | &fw_ddb_entry_dma, GFP_KERNEL); |
749 | if (fw_ddb_entry == NULL) { | 776 | if (fw_ddb_entry == NULL) { |
@@ -847,144 +874,6 @@ exit_build_ddb_list_no_free: | |||
847 | return status; | 874 | return status; |
848 | } | 875 | } |
849 | 876 | ||
850 | struct qla4_relog_scan { | ||
851 | int halt_wait; | ||
852 | uint32_t conn_err; | ||
853 | uint32_t fw_ddb_index; | ||
854 | uint32_t next_fw_ddb_index; | ||
855 | uint32_t fw_ddb_device_state; | ||
856 | }; | ||
857 | |||
858 | static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) | ||
859 | { | ||
860 | struct ddb_entry *ddb_entry; | ||
861 | |||
862 | if (qla4_is_relogin_allowed(ha, rs->conn_err)) { | ||
863 | /* We either have a device that is in | ||
864 | * the process of relogging in or a | ||
865 | * device that is waiting to be | ||
866 | * relogged in */ | ||
867 | rs->halt_wait = 0; | ||
868 | |||
869 | ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, | ||
870 | rs->fw_ddb_index); | ||
871 | if (ddb_entry == NULL) | ||
872 | return QLA_ERROR; | ||
873 | |||
874 | if (ddb_entry->dev_scan_wait_to_start_relogin != 0 | ||
875 | && time_after_eq(jiffies, | ||
876 | ddb_entry-> | ||
877 | dev_scan_wait_to_start_relogin)) | ||
878 | { | ||
879 | ddb_entry->dev_scan_wait_to_start_relogin = 0; | ||
880 | qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); | ||
881 | } | ||
882 | } | ||
883 | return QLA_SUCCESS; | ||
884 | } | ||
885 | |||
886 | static int qla4_scan_for_relogin(struct scsi_qla_host *ha, | ||
887 | struct qla4_relog_scan *rs) | ||
888 | { | ||
889 | int error; | ||
890 | |||
891 | /* scan for relogins | ||
892 | * ----------------- */ | ||
893 | for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; | ||
894 | rs->fw_ddb_index = rs->next_fw_ddb_index) { | ||
895 | if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, | ||
896 | NULL, &rs->next_fw_ddb_index, | ||
897 | &rs->fw_ddb_device_state, | ||
898 | &rs->conn_err, NULL, NULL) | ||
899 | == QLA_ERROR) | ||
900 | return QLA_ERROR; | ||
901 | |||
902 | if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) | ||
903 | rs->halt_wait = 0; | ||
904 | |||
905 | if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || | ||
906 | rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { | ||
907 | error = qla4_test_rdy(ha, rs); | ||
908 | if (error) | ||
909 | return error; | ||
910 | } | ||
911 | |||
912 | /* We know we've reached the last device when | ||
913 | * next_fw_ddb_index is 0 */ | ||
914 | if (rs->next_fw_ddb_index == 0) | ||
915 | break; | ||
916 | } | ||
917 | return QLA_SUCCESS; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * qla4xxx_devices_ready - wait for target devices to be logged in | ||
922 | * @ha: pointer to adapter structure | ||
923 | * | ||
924 | * This routine waits up to ql4xdiscoverywait seconds | ||
925 | * F/W database during driver load time. | ||
926 | **/ | ||
927 | static int qla4xxx_devices_ready(struct scsi_qla_host *ha) | ||
928 | { | ||
929 | int error; | ||
930 | unsigned long discovery_wtime; | ||
931 | struct qla4_relog_scan rs; | ||
932 | |||
933 | discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); | ||
934 | |||
935 | DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); | ||
936 | do { | ||
937 | /* poll for AEN. */ | ||
938 | qla4xxx_get_firmware_state(ha); | ||
939 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { | ||
940 | /* Set time-between-relogin timer */ | ||
941 | qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); | ||
942 | } | ||
943 | |||
944 | /* if no relogins active or needed, halt discvery wait */ | ||
945 | rs.halt_wait = 1; | ||
946 | |||
947 | error = qla4_scan_for_relogin(ha, &rs); | ||
948 | |||
949 | if (rs.halt_wait) { | ||
950 | DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " | ||
951 | "Ready.\n", ha->host_no, __func__)); | ||
952 | return QLA_SUCCESS; | ||
953 | } | ||
954 | |||
955 | msleep(2000); | ||
956 | } while (!time_after_eq(jiffies, discovery_wtime)); | ||
957 | |||
958 | DEBUG3(qla4xxx_get_conn_event_log(ha)); | ||
959 | |||
960 | return QLA_SUCCESS; | ||
961 | } | ||
962 | |||
963 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
964 | { | ||
965 | unsigned long wtime; | ||
966 | |||
967 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
968 | * Auto connect. We are basically doing get_firmware_ddb() | ||
969 | * to determine whether we need to log back in or not. | ||
970 | * Trying to do a set ddb before we have processed 0x8014 | ||
971 | * will result in another set_ddb() for the same ddb. In other | ||
972 | * words there will be stale entries in the aen_q. | ||
973 | */ | ||
974 | wtime = jiffies + (2 * HZ); | ||
975 | do { | ||
976 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
977 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
978 | return; | ||
979 | |||
980 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
981 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
982 | |||
983 | msleep(1000); | ||
984 | } while (!time_after_eq(jiffies, wtime)); | ||
985 | |||
986 | } | ||
987 | |||
988 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | 877 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) |
989 | { | 878 | { |
990 | uint16_t fw_ddb_index; | 879 | uint16_t fw_ddb_index; |
@@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | |||
996 | 885 | ||
997 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) | 886 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) |
998 | ha->fw_ddb_index_map[fw_ddb_index] = | 887 | ha->fw_ddb_index_map[fw_ddb_index] = |
999 | (struct ddb_entry *)INVALID_ENTRY; | 888 | (struct ddb_entry *)INVALID_ENTRY; |
1000 | 889 | ||
1001 | ha->tot_ddbs = 0; | 890 | ha->tot_ddbs = 0; |
1002 | 891 | ||
1003 | qla4xxx_flush_AENS(ha); | 892 | /* Perform device discovery and build ddb list. */ |
1004 | 893 | status = qla4xxx_build_ddb_list(ha); | |
1005 | /* Wait for an AEN */ | ||
1006 | qla4xxx_devices_ready(ha); | ||
1007 | |||
1008 | /* | ||
1009 | * First perform device discovery for active | ||
1010 | * fw ddb indexes and build | ||
1011 | * ddb list. | ||
1012 | */ | ||
1013 | if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) | ||
1014 | return status; | ||
1015 | |||
1016 | /* | ||
1017 | * Targets can come online after the inital discovery, so processing | ||
1018 | * the aens here will catch them. | ||
1019 | */ | ||
1020 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
1021 | qla4xxx_process_aen(ha, PROCESS_ALL_AENS); | ||
1022 | 894 | ||
1023 | return status; | 895 | return status; |
1024 | } | 896 | } |
@@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1537 | uint32_t state, uint32_t conn_err) | 1409 | uint32_t state, uint32_t conn_err) |
1538 | { | 1410 | { |
1539 | struct ddb_entry * ddb_entry; | 1411 | struct ddb_entry * ddb_entry; |
1540 | uint32_t old_fw_ddb_device_state; | ||
1541 | 1412 | ||
1542 | /* check for out of range index */ | 1413 | /* check for out of range index */ |
1543 | if (fw_ddb_index >= MAX_DDB_ENTRIES) | 1414 | if (fw_ddb_index >= MAX_DDB_ENTRIES) |
@@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1553 | } | 1424 | } |
1554 | 1425 | ||
1555 | /* Device already exists in our database. */ | 1426 | /* Device already exists in our database. */ |
1556 | old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; | ||
1557 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " | 1427 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " |
1558 | "index [%d]\n", ha->host_no, __func__, | 1428 | "index [%d]\n", ha->host_no, __func__, |
1559 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); | 1429 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); |
1560 | if (old_fw_ddb_device_state == state && | ||
1561 | state == DDB_DS_SESSION_ACTIVE) { | ||
1562 | if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { | ||
1563 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1564 | iscsi_unblock_session(ddb_entry->sess); | ||
1565 | } | ||
1566 | return QLA_SUCCESS; | ||
1567 | } | ||
1568 | 1430 | ||
1569 | ddb_entry->fw_ddb_device_state = state; | 1431 | ddb_entry->fw_ddb_device_state = state; |
1570 | /* Device is back online. */ | 1432 | /* Device is back online. */ |
1571 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1433 | if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && |
1434 | (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { | ||
1572 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | 1435 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); |
1573 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1436 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1574 | atomic_set(&ddb_entry->relogin_timer, 0); | 1437 | atomic_set(&ddb_entry->relogin_timer, 0); |
1575 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1438 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
1576 | clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); | ||
1577 | iscsi_unblock_session(ddb_entry->sess); | 1439 | iscsi_unblock_session(ddb_entry->sess); |
1578 | iscsi_session_event(ddb_entry->sess, | 1440 | iscsi_session_event(ddb_entry->sess, |
1579 | ISCSI_KEVENT_CREATE_SESSION); | 1441 | ISCSI_KEVENT_CREATE_SESSION); |
@@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1581 | * Change the lun state to READY in case the lun TIMEOUT before | 1443 | * Change the lun state to READY in case the lun TIMEOUT before |
1582 | * the device came back. | 1444 | * the device came back. |
1583 | */ | 1445 | */ |
1584 | } else { | 1446 | } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { |
1585 | /* Device went away, mark device missing */ | 1447 | /* Device went away, mark device missing */ |
1586 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { | 1448 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { |
1587 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " | 1449 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " |
@@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1598 | */ | 1460 | */ |
1599 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && | 1461 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && |
1600 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && | 1462 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && |
1601 | !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && | ||
1602 | qla4_is_relogin_allowed(ha, conn_err)) { | 1463 | qla4_is_relogin_allowed(ha, conn_err)) { |
1603 | /* | 1464 | /* |
1604 | * This triggers a relogin. After the relogin_timer | 1465 | * This triggers a relogin. After the relogin_timer |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 03e028e6e80..2f40ac761cd 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) | |||
801 | &ha->reg->ctrl_status); | 801 | &ha->reg->ctrl_status); |
802 | readl(&ha->reg->ctrl_status); | 802 | readl(&ha->reg->ctrl_status); |
803 | 803 | ||
804 | if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | 804 | if (!test_bit(AF_HA_REMOVAL, &ha->flags)) |
805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); | 805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); |
806 | 806 | ||
807 | break; | 807 | break; |
@@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) | |||
1008 | mbox_sts[0], mbox_sts[2], | 1008 | mbox_sts[0], mbox_sts[2], |
1009 | mbox_sts[3])); | 1009 | mbox_sts[3])); |
1010 | break; | 1010 | break; |
1011 | } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { | ||
1012 | /* for use during init time, we only want to | ||
1013 | * relogin non-active ddbs */ | ||
1014 | struct ddb_entry *ddb_entry; | ||
1015 | |||
1016 | ddb_entry = | ||
1017 | /* FIXME: name length? */ | ||
1018 | qla4xxx_lookup_ddb_by_fw_index(ha, | ||
1019 | mbox_sts[2]); | ||
1020 | if (!ddb_entry) | ||
1021 | break; | ||
1022 | |||
1023 | ddb_entry->dev_scan_wait_to_complete_relogin = | ||
1024 | 0; | ||
1025 | ddb_entry->dev_scan_wait_to_start_relogin = | ||
1026 | jiffies + | ||
1027 | ((ddb_entry->default_time2wait + | ||
1028 | 4) * HZ); | ||
1029 | |||
1030 | DEBUG2(printk("scsi%ld: ddb [%d] initiate" | ||
1031 | " RELOGIN after %d seconds\n", | ||
1032 | ha->host_no, | ||
1033 | ddb_entry->fw_ddb_index, | ||
1034 | ddb_entry->default_time2wait + | ||
1035 | 4)); | ||
1036 | break; | ||
1037 | } | 1011 | } |
1038 | 1012 | case PROCESS_ALL_AENS: | |
1013 | default: | ||
1039 | if (mbox_sts[1] == 0) { /* Global DB change. */ | 1014 | if (mbox_sts[1] == 0) { /* Global DB change. */ |
1040 | qla4xxx_reinitialize_ddb_list(ha); | 1015 | qla4xxx_reinitialize_ddb_list(ha); |
1041 | } else if (mbox_sts[1] == 1) { /* Specific device. */ | 1016 | } else if (mbox_sts[1] == 1) { /* Specific device. */ |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index f65626aec7c..f9d81c8372c 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
32 | u_long wait_count; | 32 | u_long wait_count; |
33 | uint32_t intr_status; | 33 | uint32_t intr_status; |
34 | unsigned long flags = 0; | 34 | unsigned long flags = 0; |
35 | uint32_t dev_state; | ||
35 | 36 | ||
36 | /* Make sure that pointers are valid */ | 37 | /* Make sure that pointers are valid */ |
37 | if (!mbx_cmd || !mbx_sts) { | 38 | if (!mbx_cmd || !mbx_sts) { |
@@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
40 | return status; | 41 | return status; |
41 | } | 42 | } |
42 | 43 | ||
43 | if (is_qla8022(ha) && | 44 | if (is_qla8022(ha)) { |
44 | test_bit(AF_FW_RECOVERY, &ha->flags)) { | 45 | if (test_bit(AF_FW_RECOVERY, &ha->flags)) { |
45 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " | 46 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " |
46 | "completing mbx cmd as firmware recovery detected\n", | 47 | "prematurely completing mbx cmd as firmware " |
47 | ha->host_no, __func__)); | 48 | "recovery detected\n", ha->host_no, __func__)); |
48 | return status; | 49 | return status; |
50 | } | ||
51 | /* Do not send any mbx cmd if h/w is in failed state*/ | ||
52 | qla4_8xxx_idc_lock(ha); | ||
53 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
54 | qla4_8xxx_idc_unlock(ha); | ||
55 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
56 | ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " | ||
57 | "failed state, do not send any mailbox commands\n", | ||
58 | ha->host_no, __func__); | ||
59 | return status; | ||
60 | } | ||
49 | } | 61 | } |
50 | 62 | ||
51 | if ((is_aer_supported(ha)) && | 63 | if ((is_aer_supported(ha)) && |
@@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
139 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && | 151 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && |
140 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && | 152 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && |
141 | test_bit(AF_ONLINE, &ha->flags) && | 153 | test_bit(AF_ONLINE, &ha->flags) && |
142 | !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | 154 | !test_bit(AF_HA_REMOVAL, &ha->flags)) { |
143 | /* Do not poll for completion. Use completion queue */ | 155 | /* Do not poll for completion. Use completion queue */ |
144 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); | 156 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); |
145 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); | 157 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); |
@@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, | |||
395 | /*memcpy(ha->alias, init_fw_cb->Alias, | 407 | /*memcpy(ha->alias, init_fw_cb->Alias, |
396 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ | 408 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ |
397 | 409 | ||
398 | /* Save Command Line Paramater info */ | ||
399 | ha->discovery_wait = ql4xdiscoverywait; | ||
400 | |||
401 | if (ha->acb_version == ACB_SUPPORTED) { | 410 | if (ha->acb_version == ACB_SUPPORTED) { |
402 | ha->ipv6_options = init_fw_cb->ipv6_opts; | 411 | ha->ipv6_options = init_fw_cb->ipv6_opts; |
403 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; | 412 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; |
@@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) | |||
467 | 476 | ||
468 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); | 477 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); |
469 | 478 | ||
479 | /* Set bit for "serialize task mgmt" all other bits need to be zero */ | ||
480 | init_fw_cb->add_fw_options = 0; | ||
481 | init_fw_cb->add_fw_options |= | ||
482 | __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); | ||
483 | |||
470 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) | 484 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) |
471 | != QLA_SUCCESS) { | 485 | != QLA_SUCCESS) { |
472 | DEBUG2(printk(KERN_WARNING | 486 | DEBUG2(printk(KERN_WARNING |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 3d5ef2df413..35381cb0936 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) | |||
2304 | void | 2304 | void |
2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) | 2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) |
2306 | { | 2306 | { |
2307 | if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) | 2307 | if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) |
2308 | qla4_8xxx_mbx_intr_disable(ha); | 2308 | qla4_8xxx_mbx_intr_disable(ha); |
2309 | 2309 | ||
2310 | spin_lock_irq(&ha->hardware_lock); | 2310 | spin_lock_irq(&ha->hardware_lock); |
2311 | /* BIT 10 - set */ | 2311 | /* BIT 10 - set */ |
2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); | 2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); |
2313 | spin_unlock_irq(&ha->hardware_lock); | 2313 | spin_unlock_irq(&ha->hardware_lock); |
2314 | clear_bit(AF_INTERRUPTS_ON, &ha->flags); | ||
2315 | } | 2314 | } |
2316 | 2315 | ||
2317 | struct ql4_init_msix_entry { | 2316 | struct ql4_init_msix_entry { |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 967836ef5ab..a4acb0dd7be 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep; | |||
29 | /* | 29 | /* |
30 | * Module parameter information and variables | 30 | * Module parameter information and variables |
31 | */ | 31 | */ |
32 | int ql4xdiscoverywait = 60; | ||
33 | module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR); | ||
34 | MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); | ||
35 | |||
36 | int ql4xdontresethba = 0; | 32 | int ql4xdontresethba = 0; |
37 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); | 33 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); |
38 | MODULE_PARM_DESC(ql4xdontresethba, | 34 | MODULE_PARM_DESC(ql4xdontresethba, |
@@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix, | |||
55 | " 2 = enable MSI interrupt mechanism."); | 51 | " 2 = enable MSI interrupt mechanism."); |
56 | 52 | ||
57 | #define QL4_DEF_QDEPTH 32 | 53 | #define QL4_DEF_QDEPTH 32 |
54 | static int ql4xmaxqdepth = QL4_DEF_QDEPTH; | ||
55 | module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); | ||
56 | MODULE_PARM_DESC(ql4xmaxqdepth, | ||
57 | "Maximum queue depth to report for target devices.\n" | ||
58 | " Default: 32."); | ||
59 | |||
60 | static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; | ||
61 | module_param(ql4xsess_recovery_tmo, int, S_IRUGO); | ||
62 | MODULE_PARM_DESC(ql4xsess_recovery_tmo, | ||
63 | "Target Session Recovery Timeout.\n" | ||
64 | " Default: 30 sec."); | ||
58 | 65 | ||
59 | /* | 66 | /* |
60 | * SCSI host template entry points | 67 | * SCSI host template entry points |
@@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) | |||
165 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " | 172 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " |
166 | "of (%d) secs exhausted, marking device DEAD.\n", | 173 | "of (%d) secs exhausted, marking device DEAD.\n", |
167 | ha->host_no, __func__, ddb_entry->fw_ddb_index, | 174 | ha->host_no, __func__, ddb_entry->fw_ddb_index, |
168 | QL4_SESS_RECOVERY_TMO)); | 175 | ddb_entry->sess->recovery_tmo)); |
169 | } | 176 | } |
170 | } | 177 | } |
171 | 178 | ||
@@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) | |||
295 | { | 302 | { |
296 | int err; | 303 | int err; |
297 | 304 | ||
298 | ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; | 305 | ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; |
299 | 306 | ||
300 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); | 307 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); |
301 | if (err) { | 308 | if (err) { |
@@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) | |||
753 | if (!pci_channel_offline(ha->pdev)) | 760 | if (!pci_channel_offline(ha->pdev)) |
754 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | 761 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); |
755 | 762 | ||
756 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | ||
757 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", | ||
758 | __func__)); | ||
759 | return; | ||
760 | } | ||
761 | |||
762 | if (is_qla8022(ha)) { | 763 | if (is_qla8022(ha)) { |
763 | qla4_8xxx_watchdog(ha); | 764 | qla4_8xxx_watchdog(ha); |
764 | } | 765 | } |
@@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) | |||
1067 | 1068 | ||
1068 | /* Disable the board */ | 1069 | /* Disable the board */ |
1069 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); | 1070 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); |
1070 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | ||
1071 | 1071 | ||
1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); | 1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); |
1073 | qla4xxx_mark_all_devices_missing(ha); | 1073 | qla4xxx_mark_all_devices_missing(ha); |
@@ -1218,6 +1218,27 @@ recover_ha_init_adapter: | |||
1218 | return status; | 1218 | return status; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) | ||
1222 | { | ||
1223 | struct ddb_entry *ddb_entry, *dtemp; | ||
1224 | |||
1225 | list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { | ||
1226 | if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || | ||
1227 | (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { | ||
1228 | if (ddb_entry->fw_ddb_device_state == | ||
1229 | DDB_DS_SESSION_ACTIVE) { | ||
1230 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1231 | ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" | ||
1232 | " marked ONLINE\n", ha->host_no, __func__, | ||
1233 | ddb_entry->fw_ddb_index); | ||
1234 | |||
1235 | iscsi_unblock_session(ddb_entry->sess); | ||
1236 | } else | ||
1237 | qla4xxx_relogin_device(ha, ddb_entry); | ||
1238 | } | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1221 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) | 1242 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) |
1222 | { | 1243 | { |
1223 | if (ha->dpc_thread && | 1244 | if (ha->dpc_thread && |
@@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work) | |||
1259 | goto do_dpc_exit; | 1280 | goto do_dpc_exit; |
1260 | } | 1281 | } |
1261 | 1282 | ||
1262 | /* HBA is in the process of being permanently disabled. | ||
1263 | * Don't process anything */ | ||
1264 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | ||
1265 | return; | ||
1266 | |||
1267 | if (is_qla8022(ha)) { | 1283 | if (is_qla8022(ha)) { |
1268 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { | 1284 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { |
1269 | qla4_8xxx_idc_lock(ha); | 1285 | qla4_8xxx_idc_lock(ha); |
@@ -1331,13 +1347,7 @@ dpc_post_reset_ha: | |||
1331 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { | 1347 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { |
1332 | if (!test_bit(AF_LINK_UP, &ha->flags)) { | 1348 | if (!test_bit(AF_LINK_UP, &ha->flags)) { |
1333 | /* ---- link down? --- */ | 1349 | /* ---- link down? --- */ |
1334 | list_for_each_entry_safe(ddb_entry, dtemp, | 1350 | qla4xxx_mark_all_devices_missing(ha); |
1335 | &ha->ddb_list, list) { | ||
1336 | if (atomic_read(&ddb_entry->state) == | ||
1337 | DDB_STATE_ONLINE) | ||
1338 | qla4xxx_mark_device_missing(ha, | ||
1339 | ddb_entry); | ||
1340 | } | ||
1341 | } else { | 1351 | } else { |
1342 | /* ---- link up? --- * | 1352 | /* ---- link up? --- * |
1343 | * F/W will auto login to all devices ONLY ONCE after | 1353 | * F/W will auto login to all devices ONLY ONCE after |
@@ -1346,30 +1356,7 @@ dpc_post_reset_ha: | |||
1346 | * manually relogin to devices when recovering from | 1356 | * manually relogin to devices when recovering from |
1347 | * connection failures, logouts, expired KATO, etc. */ | 1357 | * connection failures, logouts, expired KATO, etc. */ |
1348 | 1358 | ||
1349 | list_for_each_entry_safe(ddb_entry, dtemp, | 1359 | qla4xxx_relogin_all_devices(ha); |
1350 | &ha->ddb_list, list) { | ||
1351 | if ((atomic_read(&ddb_entry->state) == | ||
1352 | DDB_STATE_MISSING) || | ||
1353 | (atomic_read(&ddb_entry->state) == | ||
1354 | DDB_STATE_DEAD)) { | ||
1355 | if (ddb_entry->fw_ddb_device_state == | ||
1356 | DDB_DS_SESSION_ACTIVE) { | ||
1357 | atomic_set(&ddb_entry->state, | ||
1358 | DDB_STATE_ONLINE); | ||
1359 | ql4_printk(KERN_INFO, ha, | ||
1360 | "scsi%ld: %s: ddb[%d]" | ||
1361 | " marked ONLINE\n", | ||
1362 | ha->host_no, __func__, | ||
1363 | ddb_entry->fw_ddb_index); | ||
1364 | |||
1365 | iscsi_unblock_session( | ||
1366 | ddb_entry->sess); | ||
1367 | } else | ||
1368 | qla4xxx_relogin_device( | ||
1369 | ha, ddb_entry); | ||
1370 | } | ||
1371 | |||
1372 | } | ||
1373 | } | 1360 | } |
1374 | } | 1361 | } |
1375 | 1362 | ||
@@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1630 | uint8_t init_retry_count = 0; | 1617 | uint8_t init_retry_count = 0; |
1631 | char buf[34]; | 1618 | char buf[34]; |
1632 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; | 1619 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; |
1620 | uint32_t dev_state; | ||
1633 | 1621 | ||
1634 | if (pci_enable_device(pdev)) | 1622 | if (pci_enable_device(pdev)) |
1635 | return -1; | 1623 | return -1; |
@@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1713 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); | 1701 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); |
1714 | while ((!test_bit(AF_ONLINE, &ha->flags)) && | 1702 | while ((!test_bit(AF_ONLINE, &ha->flags)) && |
1715 | init_retry_count++ < MAX_INIT_RETRIES) { | 1703 | init_retry_count++ < MAX_INIT_RETRIES) { |
1704 | |||
1705 | if (is_qla8022(ha)) { | ||
1706 | qla4_8xxx_idc_lock(ha); | ||
1707 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
1708 | qla4_8xxx_idc_unlock(ha); | ||
1709 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
1710 | ql4_printk(KERN_WARNING, ha, "%s: don't retry " | ||
1711 | "initialize adapter. H/W is in failed state\n", | ||
1712 | __func__); | ||
1713 | break; | ||
1714 | } | ||
1715 | } | ||
1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " | 1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " |
1717 | "(%d)\n", __func__, init_retry_count)); | 1717 | "(%d)\n", __func__, init_retry_count)); |
1718 | 1718 | ||
@@ -1815,6 +1815,44 @@ probe_disable_device: | |||
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | /** | 1817 | /** |
1818 | * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize | ||
1819 | * @ha: pointer to adapter structure | ||
1820 | * | ||
1821 | * Mark the other ISP-4xxx port to indicate that the driver is being removed, | ||
1822 | * so that the other port will not re-initialize while in the process of | ||
1823 | * removing the ha due to driver unload or hba hotplug. | ||
1824 | **/ | ||
1825 | static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) | ||
1826 | { | ||
1827 | struct scsi_qla_host *other_ha = NULL; | ||
1828 | struct pci_dev *other_pdev = NULL; | ||
1829 | int fn = ISP4XXX_PCI_FN_2; | ||
1830 | |||
1831 | /*iscsi function numbers for ISP4xxx is 1 and 3*/ | ||
1832 | if (PCI_FUNC(ha->pdev->devfn) & BIT_1) | ||
1833 | fn = ISP4XXX_PCI_FN_1; | ||
1834 | |||
1835 | other_pdev = | ||
1836 | pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), | ||
1837 | ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), | ||
1838 | fn)); | ||
1839 | |||
1840 | /* Get other_ha if other_pdev is valid and state is enable*/ | ||
1841 | if (other_pdev) { | ||
1842 | if (atomic_read(&other_pdev->enable_cnt)) { | ||
1843 | other_ha = pci_get_drvdata(other_pdev); | ||
1844 | if (other_ha) { | ||
1845 | set_bit(AF_HA_REMOVAL, &other_ha->flags); | ||
1846 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " | ||
1847 | "Prevent %s reinit\n", __func__, | ||
1848 | dev_name(&other_ha->pdev->dev))); | ||
1849 | } | ||
1850 | } | ||
1851 | pci_dev_put(other_pdev); | ||
1852 | } | ||
1853 | } | ||
1854 | |||
1855 | /** | ||
1818 | * qla4xxx_remove_adapter - calback function to remove adapter. | 1856 | * qla4xxx_remove_adapter - calback function to remove adapter. |
1819 | * @pci_dev: PCI device pointer | 1857 | * @pci_dev: PCI device pointer |
1820 | **/ | 1858 | **/ |
@@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) | |||
1824 | 1862 | ||
1825 | ha = pci_get_drvdata(pdev); | 1863 | ha = pci_get_drvdata(pdev); |
1826 | 1864 | ||
1827 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | 1865 | if (!is_qla8022(ha)) |
1866 | qla4xxx_prevent_other_port_reinit(ha); | ||
1828 | 1867 | ||
1829 | /* remove devs from iscsi_sessions to scsi_devices */ | 1868 | /* remove devs from iscsi_sessions to scsi_devices */ |
1830 | qla4xxx_free_ddb_list(ha); | 1869 | qla4xxx_free_ddb_list(ha); |
@@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev) | |||
1868 | { | 1907 | { |
1869 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); | 1908 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); |
1870 | struct ddb_entry *ddb = sess->dd_data; | 1909 | struct ddb_entry *ddb = sess->dd_data; |
1910 | int queue_depth = QL4_DEF_QDEPTH; | ||
1871 | 1911 | ||
1872 | sdev->hostdata = ddb; | 1912 | sdev->hostdata = ddb; |
1873 | sdev->tagged_supported = 1; | 1913 | sdev->tagged_supported = 1; |
1874 | scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); | 1914 | |
1915 | if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) | ||
1916 | queue_depth = ql4xmaxqdepth; | ||
1917 | |||
1918 | scsi_activate_tcq(sdev, queue_depth); | ||
1875 | return 0; | 1919 | return 0; |
1876 | } | 1920 | } |
1877 | 1921 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 8475b308e01..60315576940 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -5,4 +5,4 @@ | |||
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k5" | 8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k6" |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b4218390941..3fd16d7212d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev, \ | |||
1917 | #define iscsi_priv_session_rw_attr(field, format) \ | 1917 | #define iscsi_priv_session_rw_attr(field, format) \ |
1918 | iscsi_priv_session_attr_show(field, format) \ | 1918 | iscsi_priv_session_attr_show(field, format) \ |
1919 | iscsi_priv_session_attr_store(field) \ | 1919 | iscsi_priv_session_attr_store(field) \ |
1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ | 1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ |
1921 | show_priv_session_##field, \ | 1921 | show_priv_session_##field, \ |
1922 | store_priv_session_##field) | 1922 | store_priv_session_##field) |
1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); | 1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7ff61d76b4c..b61ebec6bca 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2027 | int old_rcd = sdkp->RCD; | 2027 | int old_rcd = sdkp->RCD; |
2028 | int old_dpofua = sdkp->DPOFUA; | 2028 | int old_dpofua = sdkp->DPOFUA; |
2029 | 2029 | ||
2030 | if (sdp->skip_ms_page_8) { | 2030 | if (sdp->skip_ms_page_8) |
2031 | if (sdp->type == TYPE_RBC) | 2031 | goto defaults; |
2032 | goto defaults; | 2032 | |
2033 | else { | 2033 | if (sdp->type == TYPE_RBC) { |
2034 | modepage = 0x3F; | ||
2035 | dbd = 0; | ||
2036 | } | ||
2037 | } else if (sdp->type == TYPE_RBC) { | ||
2038 | modepage = 6; | 2034 | modepage = 6; |
2039 | dbd = 8; | 2035 | dbd = 8; |
2040 | } else { | 2036 | } else { |
@@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2062 | */ | 2058 | */ |
2063 | if (len < 3) | 2059 | if (len < 3) |
2064 | goto bad_sense; | 2060 | goto bad_sense; |
2065 | else if (len > SD_BUF_SIZE) { | 2061 | if (len > 20) |
2066 | sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " | 2062 | len = 20; |
2067 | "data from %d to %d bytes\n", len, SD_BUF_SIZE); | 2063 | |
2068 | len = SD_BUF_SIZE; | 2064 | /* Take headers and block descriptors into account */ |
2069 | } | 2065 | len += data.header_length + data.block_descriptor_length; |
2066 | if (len > SD_BUF_SIZE) | ||
2067 | goto bad_sense; | ||
2070 | 2068 | ||
2071 | /* Get the data */ | 2069 | /* Get the data */ |
2072 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); | 2070 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); |
@@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2074 | if (scsi_status_is_good(res)) { | 2072 | if (scsi_status_is_good(res)) { |
2075 | int offset = data.header_length + data.block_descriptor_length; | 2073 | int offset = data.header_length + data.block_descriptor_length; |
2076 | 2074 | ||
2077 | while (offset < len) { | 2075 | if (offset >= SD_BUF_SIZE - 2) { |
2078 | u8 page_code = buffer[offset] & 0x3F; | 2076 | sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); |
2079 | u8 spf = buffer[offset] & 0x40; | 2077 | goto defaults; |
2080 | |||
2081 | if (page_code == 8 || page_code == 6) { | ||
2082 | /* We're interested only in the first 3 bytes. | ||
2083 | */ | ||
2084 | if (len - offset <= 2) { | ||
2085 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2086 | "mode parameter data\n"); | ||
2087 | goto defaults; | ||
2088 | } else { | ||
2089 | modepage = page_code; | ||
2090 | goto Page_found; | ||
2091 | } | ||
2092 | } else { | ||
2093 | /* Go to the next page */ | ||
2094 | if (spf && len - offset > 3) | ||
2095 | offset += 4 + (buffer[offset+2] << 8) + | ||
2096 | buffer[offset+3]; | ||
2097 | else if (!spf && len - offset > 1) | ||
2098 | offset += 2 + buffer[offset+1]; | ||
2099 | else { | ||
2100 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2101 | "mode parameter data\n"); | ||
2102 | goto defaults; | ||
2103 | } | ||
2104 | } | ||
2105 | } | 2078 | } |
2106 | 2079 | ||
2107 | if (modepage == 0x3F) { | 2080 | if ((buffer[offset] & 0x3f) != modepage) { |
2108 | sd_printk(KERN_ERR, sdkp, "No Caching mode page " | ||
2109 | "present\n"); | ||
2110 | goto defaults; | ||
2111 | } else if ((buffer[offset] & 0x3f) != modepage) { | ||
2112 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); | 2081 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); |
2113 | goto defaults; | 2082 | goto defaults; |
2114 | } | 2083 | } |
2115 | Page_found: | 2084 | |
2116 | if (modepage == 8) { | 2085 | if (modepage == 8) { |
2117 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); | 2086 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); |
2118 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); | 2087 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a86f82..eb7a3e85304 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -35,9 +35,11 @@ | |||
35 | 35 | ||
36 | struct ses_device { | 36 | struct ses_device { |
37 | unsigned char *page1; | 37 | unsigned char *page1; |
38 | unsigned char *page1_types; | ||
38 | unsigned char *page2; | 39 | unsigned char *page2; |
39 | unsigned char *page10; | 40 | unsigned char *page10; |
40 | short page1_len; | 41 | short page1_len; |
42 | short page1_num_types; | ||
41 | short page2_len; | 43 | short page2_len; |
42 | short page10_len; | 44 | short page10_len; |
43 | }; | 45 | }; |
@@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, | |||
110 | int i, j, count = 0, descriptor = ecomp->number; | 112 | int i, j, count = 0, descriptor = ecomp->number; |
111 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 113 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
112 | struct ses_device *ses_dev = edev->scratch; | 114 | struct ses_device *ses_dev = edev->scratch; |
113 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 115 | unsigned char *type_ptr = ses_dev->page1_types; |
114 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 116 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
115 | 117 | ||
116 | /* Clear everything */ | 118 | /* Clear everything */ |
117 | memset(desc_ptr, 0, ses_dev->page2_len - 8); | 119 | memset(desc_ptr, 0, ses_dev->page2_len - 8); |
118 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 120 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
119 | for (j = 0; j < type_ptr[1]; j++) { | 121 | for (j = 0; j < type_ptr[1]; j++) { |
120 | desc_ptr += 4; | 122 | desc_ptr += 4; |
121 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 123 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, | |||
140 | int i, j, count = 0, descriptor = ecomp->number; | 142 | int i, j, count = 0, descriptor = ecomp->number; |
141 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 143 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
142 | struct ses_device *ses_dev = edev->scratch; | 144 | struct ses_device *ses_dev = edev->scratch; |
143 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 145 | unsigned char *type_ptr = ses_dev->page1_types; |
144 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 146 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
145 | 147 | ||
146 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 148 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
147 | 149 | ||
148 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 150 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
149 | for (j = 0; j < type_ptr[1]; j++) { | 151 | for (j = 0; j < type_ptr[1]; j++) { |
150 | desc_ptr += 4; | 152 | desc_ptr += 4; |
151 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 153 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
358 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; | 360 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; |
359 | int i, j, page7_len, len, components; | 361 | int i, j, page7_len, len, components; |
360 | struct ses_device *ses_dev = edev->scratch; | 362 | struct ses_device *ses_dev = edev->scratch; |
361 | int types = ses_dev->page1[10]; | 363 | int types = ses_dev->page1_num_types; |
362 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); | 364 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); |
363 | 365 | ||
364 | if (!hdr_buf) | 366 | if (!hdr_buf) |
@@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
390 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 392 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
391 | /* skip past overall descriptor */ | 393 | /* skip past overall descriptor */ |
392 | desc_ptr += len + 4; | 394 | desc_ptr += len + 4; |
393 | if (ses_dev->page10) | ||
394 | addl_desc_ptr = ses_dev->page10 + 8; | ||
395 | } | 395 | } |
396 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 396 | if (ses_dev->page10) |
397 | addl_desc_ptr = ses_dev->page10 + 8; | ||
398 | type_ptr = ses_dev->page1_types; | ||
397 | components = 0; | 399 | components = 0; |
398 | for (i = 0; i < types; i++, type_ptr += 4) { | 400 | for (i = 0; i < types; i++, type_ptr += 4) { |
399 | for (j = 0; j < type_ptr[1]; j++) { | 401 | for (j = 0; j < type_ptr[1]; j++) { |
@@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev, | |||
503 | u32 result; | 505 | u32 result; |
504 | int i, types, len, components = 0; | 506 | int i, types, len, components = 0; |
505 | int err = -ENOMEM; | 507 | int err = -ENOMEM; |
508 | int num_enclosures; | ||
506 | struct enclosure_device *edev; | 509 | struct enclosure_device *edev; |
507 | struct ses_component *scomp = NULL; | 510 | struct ses_component *scomp = NULL; |
508 | 511 | ||
@@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev, | |||
530 | if (result) | 533 | if (result) |
531 | goto recv_failed; | 534 | goto recv_failed; |
532 | 535 | ||
533 | if (hdr_buf[1] != 0) { | ||
534 | /* FIXME: need subenclosure support; I've just never | ||
535 | * seen a device with subenclosures and it makes the | ||
536 | * traversal routines more complex */ | ||
537 | sdev_printk(KERN_ERR, sdev, | ||
538 | "FIXME driver has no support for subenclosures (%d)\n", | ||
539 | hdr_buf[1]); | ||
540 | goto err_free; | ||
541 | } | ||
542 | |||
543 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 536 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
544 | buf = kzalloc(len, GFP_KERNEL); | 537 | buf = kzalloc(len, GFP_KERNEL); |
545 | if (!buf) | 538 | if (!buf) |
@@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev, | |||
549 | if (result) | 542 | if (result) |
550 | goto recv_failed; | 543 | goto recv_failed; |
551 | 544 | ||
552 | types = buf[10]; | 545 | types = 0; |
553 | 546 | ||
554 | type_ptr = buf + 12 + buf[11]; | 547 | /* we always have one main enclosure and the rest are referred |
548 | * to as secondary subenclosures */ | ||
549 | num_enclosures = buf[1] + 1; | ||
555 | 550 | ||
556 | for (i = 0; i < types; i++, type_ptr += 4) { | 551 | /* begin at the enclosure descriptor */ |
552 | type_ptr = buf + 8; | ||
553 | /* skip all the enclosure descriptors */ | ||
554 | for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { | ||
555 | types += type_ptr[2]; | ||
556 | type_ptr += type_ptr[3] + 4; | ||
557 | } | ||
558 | |||
559 | ses_dev->page1_types = type_ptr; | ||
560 | ses_dev->page1_num_types = types; | ||
561 | |||
562 | for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { | ||
557 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | 563 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
558 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 564 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) |
559 | components += type_ptr[1]; | 565 | components += type_ptr[1]; |
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 2fac3be209a..9ef2dbbfa62 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig | |||
@@ -29,4 +29,6 @@ config TCM_PSCSI | |||
29 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered | 29 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered |
30 | passthrough access to Linux/SCSI device | 30 | passthrough access to Linux/SCSI device |
31 | 31 | ||
32 | source "drivers/target/loopback/Kconfig" | ||
33 | |||
32 | endif | 34 | endif |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 973bb190ef5..1178bbfc68f 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -1,4 +1,3 @@ | |||
1 | EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/ | ||
2 | 1 | ||
3 | target_core_mod-y := target_core_configfs.o \ | 2 | target_core_mod-y := target_core_configfs.o \ |
4 | target_core_device.o \ | 3 | target_core_device.o \ |
@@ -13,7 +12,8 @@ target_core_mod-y := target_core_configfs.o \ | |||
13 | target_core_transport.o \ | 12 | target_core_transport.o \ |
14 | target_core_cdb.o \ | 13 | target_core_cdb.o \ |
15 | target_core_ua.o \ | 14 | target_core_ua.o \ |
16 | target_core_rd.o | 15 | target_core_rd.o \ |
16 | target_core_stat.o | ||
17 | 17 | ||
18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | 18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o |
19 | 19 | ||
@@ -21,3 +21,6 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | |||
21 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o | 21 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o |
22 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o | 22 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o |
23 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o | 23 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o |
24 | |||
25 | # Fabric modules | ||
26 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ | ||
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig new file mode 100644 index 00000000000..57dcbc2d711 --- /dev/null +++ b/drivers/target/loopback/Kconfig | |||
@@ -0,0 +1,11 @@ | |||
1 | config LOOPBACK_TARGET | ||
2 | tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module" | ||
3 | help | ||
4 | Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD | ||
5 | fabric loopback module. | ||
6 | |||
7 | config LOOPBACK_TARGET_CDB_DEBUG | ||
8 | bool "TCM loopback fabric module CDB debug code" | ||
9 | depends on LOOPBACK_TARGET | ||
10 | help | ||
11 | Say Y here to enable the TCM loopback fabric module CDB debug code | ||
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile new file mode 100644 index 00000000000..6abebdf9565 --- /dev/null +++ b/drivers/target/loopback/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o | |||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c new file mode 100644 index 00000000000..aed4e464d31 --- /dev/null +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -0,0 +1,1579 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * This file contains the Linux/SCSI LLD virtual SCSI initiator driver | ||
4 | * for emulated SAS initiator ports | ||
5 | * | ||
6 | * © Copyright 2011 RisingTide Systems LLC. | ||
7 | * | ||
8 | * Licensed to the Linux Foundation under the General Public License (GPL) version 2. | ||
9 | * | ||
10 | * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | ****************************************************************************/ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/configfs.h> | ||
29 | #include <scsi/scsi.h> | ||
30 | #include <scsi/scsi_tcq.h> | ||
31 | #include <scsi/scsi_host.h> | ||
32 | #include <scsi/scsi_device.h> | ||
33 | #include <scsi/scsi_cmnd.h> | ||
34 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | ||
35 | |||
36 | #include <target/target_core_base.h> | ||
37 | #include <target/target_core_transport.h> | ||
38 | #include <target/target_core_fabric_ops.h> | ||
39 | #include <target/target_core_fabric_configfs.h> | ||
40 | #include <target/target_core_fabric_lib.h> | ||
41 | #include <target/target_core_configfs.h> | ||
42 | #include <target/target_core_device.h> | ||
43 | #include <target/target_core_tpg.h> | ||
44 | #include <target/target_core_tmr.h> | ||
45 | |||
46 | #include "tcm_loop.h" | ||
47 | |||
48 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) | ||
49 | |||
50 | /* Local pointer to allocated TCM configfs fabric module */ | ||
51 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; | ||
52 | |||
53 | static struct kmem_cache *tcm_loop_cmd_cache; | ||
54 | |||
55 | static int tcm_loop_hba_no_cnt; | ||
56 | |||
57 | /* | ||
58 | * Allocate a tcm_loop cmd descriptor from target_core_mod code | ||
59 | * | ||
60 | * Can be called from interrupt context in tcm_loop_queuecommand() below | ||
61 | */ | ||
62 | static struct se_cmd *tcm_loop_allocate_core_cmd( | ||
63 | struct tcm_loop_hba *tl_hba, | ||
64 | struct se_portal_group *se_tpg, | ||
65 | struct scsi_cmnd *sc) | ||
66 | { | ||
67 | struct se_cmd *se_cmd; | ||
68 | struct se_session *se_sess; | ||
69 | struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; | ||
70 | struct tcm_loop_cmd *tl_cmd; | ||
71 | int sam_task_attr; | ||
72 | |||
73 | if (!tl_nexus) { | ||
74 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" | ||
75 | " does not exist\n"); | ||
76 | set_host_byte(sc, DID_ERROR); | ||
77 | return NULL; | ||
78 | } | ||
79 | se_sess = tl_nexus->se_sess; | ||
80 | |||
81 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); | ||
82 | if (!tl_cmd) { | ||
83 | printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); | ||
84 | set_host_byte(sc, DID_ERROR); | ||
85 | return NULL; | ||
86 | } | ||
87 | se_cmd = &tl_cmd->tl_se_cmd; | ||
88 | /* | ||
89 | * Save the pointer to struct scsi_cmnd *sc | ||
90 | */ | ||
91 | tl_cmd->sc = sc; | ||
92 | /* | ||
93 | * Locate the SAM Task Attr from struct scsi_cmnd * | ||
94 | */ | ||
95 | if (sc->device->tagged_supported) { | ||
96 | switch (sc->tag) { | ||
97 | case HEAD_OF_QUEUE_TAG: | ||
98 | sam_task_attr = TASK_ATTR_HOQ; | ||
99 | break; | ||
100 | case ORDERED_QUEUE_TAG: | ||
101 | sam_task_attr = TASK_ATTR_ORDERED; | ||
102 | break; | ||
103 | default: | ||
104 | sam_task_attr = TASK_ATTR_SIMPLE; | ||
105 | break; | ||
106 | } | ||
107 | } else | ||
108 | sam_task_attr = TASK_ATTR_SIMPLE; | ||
109 | |||
110 | /* | ||
111 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | ||
112 | */ | ||
113 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | ||
114 | scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, | ||
115 | &tl_cmd->tl_sense_buf[0]); | ||
116 | |||
117 | /* | ||
118 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi | ||
119 | */ | ||
120 | if (scsi_bidi_cmnd(sc)) | ||
121 | T_TASK(se_cmd)->t_tasks_bidi = 1; | ||
122 | /* | ||
123 | * Locate the struct se_lun pointer and attach it to struct se_cmd | ||
124 | */ | ||
125 | if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) { | ||
126 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
127 | set_host_byte(sc, DID_NO_CONNECT); | ||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | transport_device_setup_cmd(se_cmd); | ||
132 | return se_cmd; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Called by struct target_core_fabric_ops->new_cmd_map() | ||
137 | * | ||
138 | * Always called in process context. A non zero return value | ||
139 | * here will signal to handle an exception based on the return code. | ||
140 | */ | ||
141 | static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | ||
142 | { | ||
143 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
144 | struct tcm_loop_cmd, tl_se_cmd); | ||
145 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
146 | void *mem_ptr, *mem_bidi_ptr = NULL; | ||
147 | u32 sg_no_bidi = 0; | ||
148 | int ret; | ||
149 | /* | ||
150 | * Allocate the necessary tasks to complete the received CDB+data | ||
151 | */ | ||
152 | ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); | ||
153 | if (ret == -1) { | ||
154 | /* Out of Resources */ | ||
155 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
156 | } else if (ret == -2) { | ||
157 | /* | ||
158 | * Handle case for SAM_STAT_RESERVATION_CONFLICT | ||
159 | */ | ||
160 | if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) | ||
161 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
162 | /* | ||
163 | * Otherwise, return SAM_STAT_CHECK_CONDITION and return | ||
164 | * sense data. | ||
165 | */ | ||
166 | return PYX_TRANSPORT_USE_SENSE_REASON; | ||
167 | } | ||
168 | /* | ||
169 | * Setup the struct scatterlist memory from the received | ||
170 | * struct scsi_cmnd. | ||
171 | */ | ||
172 | if (scsi_sg_count(sc)) { | ||
173 | se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; | ||
174 | mem_ptr = (void *)scsi_sglist(sc); | ||
175 | /* | ||
176 | * For BIDI commands, pass in the extra READ buffer | ||
177 | * to transport_generic_map_mem_to_cmd() below.. | ||
178 | */ | ||
179 | if (T_TASK(se_cmd)->t_tasks_bidi) { | ||
180 | struct scsi_data_buffer *sdb = scsi_in(sc); | ||
181 | |||
182 | mem_bidi_ptr = (void *)sdb->table.sgl; | ||
183 | sg_no_bidi = sdb->table.nents; | ||
184 | } | ||
185 | } else { | ||
186 | /* | ||
187 | * Used for DMA_NONE | ||
188 | */ | ||
189 | mem_ptr = NULL; | ||
190 | } | ||
191 | /* | ||
192 | * Map the SG memory into struct se_mem->page linked list using the same | ||
193 | * physical memory at sg->page_link. | ||
194 | */ | ||
195 | ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, | ||
196 | scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); | ||
197 | if (ret < 0) | ||
198 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Called from struct target_core_fabric_ops->check_stop_free() | ||
205 | */ | ||
206 | static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) | ||
207 | { | ||
208 | /* | ||
209 | * Do not release struct se_cmd's containing a valid TMR | ||
210 | * pointer. These will be released directly in tcm_loop_device_reset() | ||
211 | * with transport_generic_free_cmd(). | ||
212 | */ | ||
213 | if (se_cmd->se_tmr_req) | ||
214 | return; | ||
215 | /* | ||
216 | * Release the struct se_cmd, which will make a callback to release | ||
217 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() | ||
218 | */ | ||
219 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Called from struct target_core_fabric_ops->release_cmd_to_pool() | ||
224 | */ | ||
225 | static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) | ||
226 | { | ||
227 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
228 | struct tcm_loop_cmd, tl_se_cmd); | ||
229 | |||
230 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
231 | } | ||
232 | |||
233 | static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, | ||
234 | char **start, off_t offset, | ||
235 | int length, int inout) | ||
236 | { | ||
237 | return sprintf(buffer, "tcm_loop_proc_info()\n"); | ||
238 | } | ||
239 | |||
240 | static int tcm_loop_driver_probe(struct device *); | ||
241 | static int tcm_loop_driver_remove(struct device *); | ||
242 | |||
243 | static int pseudo_lld_bus_match(struct device *dev, | ||
244 | struct device_driver *dev_driver) | ||
245 | { | ||
246 | return 1; | ||
247 | } | ||
248 | |||
249 | static struct bus_type tcm_loop_lld_bus = { | ||
250 | .name = "tcm_loop_bus", | ||
251 | .match = pseudo_lld_bus_match, | ||
252 | .probe = tcm_loop_driver_probe, | ||
253 | .remove = tcm_loop_driver_remove, | ||
254 | }; | ||
255 | |||
256 | static struct device_driver tcm_loop_driverfs = { | ||
257 | .name = "tcm_loop", | ||
258 | .bus = &tcm_loop_lld_bus, | ||
259 | }; | ||
260 | /* | ||
261 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below | ||
262 | */ | ||
263 | struct device *tcm_loop_primary; | ||
264 | |||
265 | /* | ||
266 | * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and | ||
267 | * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() | ||
268 | */ | ||
269 | static int tcm_loop_change_queue_depth( | ||
270 | struct scsi_device *sdev, | ||
271 | int depth, | ||
272 | int reason) | ||
273 | { | ||
274 | switch (reason) { | ||
275 | case SCSI_QDEPTH_DEFAULT: | ||
276 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
277 | break; | ||
278 | case SCSI_QDEPTH_QFULL: | ||
279 | scsi_track_queue_full(sdev, depth); | ||
280 | break; | ||
281 | case SCSI_QDEPTH_RAMP_UP: | ||
282 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
283 | break; | ||
284 | default: | ||
285 | return -EOPNOTSUPP; | ||
286 | } | ||
287 | return sdev->queue_depth; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data | ||
292 | * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) | ||
293 | */ | ||
294 | static int tcm_loop_queuecommand( | ||
295 | struct Scsi_Host *sh, | ||
296 | struct scsi_cmnd *sc) | ||
297 | { | ||
298 | struct se_cmd *se_cmd; | ||
299 | struct se_portal_group *se_tpg; | ||
300 | struct tcm_loop_hba *tl_hba; | ||
301 | struct tcm_loop_tpg *tl_tpg; | ||
302 | |||
303 | TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" | ||
304 | " scsi_buf_len: %u\n", sc->device->host->host_no, | ||
305 | sc->device->id, sc->device->channel, sc->device->lun, | ||
306 | sc->cmnd[0], scsi_bufflen(sc)); | ||
307 | /* | ||
308 | * Locate the tcm_loop_hba_t pointer | ||
309 | */ | ||
310 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | ||
311 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | ||
312 | se_tpg = &tl_tpg->tl_se_tpg; | ||
313 | /* | ||
314 | * Determine the SAM Task Attribute and allocate tl_cmd and | ||
315 | * tl_cmd->tl_se_cmd from TCM infrastructure | ||
316 | */ | ||
317 | se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); | ||
318 | if (!se_cmd) { | ||
319 | sc->scsi_done(sc); | ||
320 | return 0; | ||
321 | } | ||
322 | /* | ||
323 | * Queue up the newly allocated to be processed in TCM thread context. | ||
324 | */ | ||
325 | transport_generic_handle_cdb_map(se_cmd); | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Called from SCSI EH process context to issue a LUN_RESET TMR | ||
331 | * to struct scsi_device | ||
332 | */ | ||
333 | static int tcm_loop_device_reset(struct scsi_cmnd *sc) | ||
334 | { | ||
335 | struct se_cmd *se_cmd = NULL; | ||
336 | struct se_portal_group *se_tpg; | ||
337 | struct se_session *se_sess; | ||
338 | struct tcm_loop_cmd *tl_cmd = NULL; | ||
339 | struct tcm_loop_hba *tl_hba; | ||
340 | struct tcm_loop_nexus *tl_nexus; | ||
341 | struct tcm_loop_tmr *tl_tmr = NULL; | ||
342 | struct tcm_loop_tpg *tl_tpg; | ||
343 | int ret = FAILED; | ||
344 | /* | ||
345 | * Locate the tcm_loop_hba_t pointer | ||
346 | */ | ||
347 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | ||
348 | /* | ||
349 | * Locate the tl_nexus and se_sess pointers | ||
350 | */ | ||
351 | tl_nexus = tl_hba->tl_nexus; | ||
352 | if (!tl_nexus) { | ||
353 | printk(KERN_ERR "Unable to perform device reset without" | ||
354 | " active I_T Nexus\n"); | ||
355 | return FAILED; | ||
356 | } | ||
357 | se_sess = tl_nexus->se_sess; | ||
358 | /* | ||
359 | * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id | ||
360 | */ | ||
361 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | ||
362 | se_tpg = &tl_tpg->tl_se_tpg; | ||
363 | |||
364 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); | ||
365 | if (!tl_cmd) { | ||
366 | printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); | ||
367 | return FAILED; | ||
368 | } | ||
369 | |||
370 | tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); | ||
371 | if (!tl_tmr) { | ||
372 | printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); | ||
373 | goto release; | ||
374 | } | ||
375 | init_waitqueue_head(&tl_tmr->tl_tmr_wait); | ||
376 | |||
377 | se_cmd = &tl_cmd->tl_se_cmd; | ||
378 | /* | ||
379 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | ||
380 | */ | ||
381 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, | ||
382 | DMA_NONE, TASK_ATTR_SIMPLE, | ||
383 | &tl_cmd->tl_sense_buf[0]); | ||
384 | /* | ||
385 | * Allocate the LUN_RESET TMR | ||
386 | */ | ||
387 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, | ||
388 | TMR_LUN_RESET); | ||
389 | if (!se_cmd->se_tmr_req) | ||
390 | goto release; | ||
391 | /* | ||
392 | * Locate the underlying TCM struct se_lun from sc->device->lun | ||
393 | */ | ||
394 | if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) | ||
395 | goto release; | ||
396 | /* | ||
397 | * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() | ||
398 | * to wake us up. | ||
399 | */ | ||
400 | transport_generic_handle_tmr(se_cmd); | ||
401 | wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); | ||
402 | /* | ||
403 | * The TMR LUN_RESET has completed, check the response status and | ||
404 | * then release allocations. | ||
405 | */ | ||
406 | ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? | ||
407 | SUCCESS : FAILED; | ||
408 | release: | ||
409 | if (se_cmd) | ||
410 | transport_generic_free_cmd(se_cmd, 1, 1, 0); | ||
411 | else | ||
412 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
413 | kfree(tl_tmr); | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | static int tcm_loop_slave_alloc(struct scsi_device *sd) | ||
418 | { | ||
419 | set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int tcm_loop_slave_configure(struct scsi_device *sd) | ||
424 | { | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static struct scsi_host_template tcm_loop_driver_template = { | ||
429 | .proc_info = tcm_loop_proc_info, | ||
430 | .proc_name = "tcm_loopback", | ||
431 | .name = "TCM_Loopback", | ||
432 | .queuecommand = tcm_loop_queuecommand, | ||
433 | .change_queue_depth = tcm_loop_change_queue_depth, | ||
434 | .eh_device_reset_handler = tcm_loop_device_reset, | ||
435 | .can_queue = TL_SCSI_CAN_QUEUE, | ||
436 | .this_id = -1, | ||
437 | .sg_tablesize = TL_SCSI_SG_TABLESIZE, | ||
438 | .cmd_per_lun = TL_SCSI_CMD_PER_LUN, | ||
439 | .max_sectors = TL_SCSI_MAX_SECTORS, | ||
440 | .use_clustering = DISABLE_CLUSTERING, | ||
441 | .slave_alloc = tcm_loop_slave_alloc, | ||
442 | .slave_configure = tcm_loop_slave_configure, | ||
443 | .module = THIS_MODULE, | ||
444 | }; | ||
445 | |||
446 | static int tcm_loop_driver_probe(struct device *dev) | ||
447 | { | ||
448 | struct tcm_loop_hba *tl_hba; | ||
449 | struct Scsi_Host *sh; | ||
450 | int error; | ||
451 | |||
452 | tl_hba = to_tcm_loop_hba(dev); | ||
453 | |||
454 | sh = scsi_host_alloc(&tcm_loop_driver_template, | ||
455 | sizeof(struct tcm_loop_hba)); | ||
456 | if (!sh) { | ||
457 | printk(KERN_ERR "Unable to allocate struct scsi_host\n"); | ||
458 | return -ENODEV; | ||
459 | } | ||
460 | tl_hba->sh = sh; | ||
461 | |||
462 | /* | ||
463 | * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata | ||
464 | */ | ||
465 | *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; | ||
466 | /* | ||
467 | * Setup single ID, Channel and LUN for now.. | ||
468 | */ | ||
469 | sh->max_id = 2; | ||
470 | sh->max_lun = 0; | ||
471 | sh->max_channel = 0; | ||
472 | sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; | ||
473 | |||
474 | error = scsi_add_host(sh, &tl_hba->dev); | ||
475 | if (error) { | ||
476 | printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); | ||
477 | scsi_host_put(sh); | ||
478 | return -ENODEV; | ||
479 | } | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int tcm_loop_driver_remove(struct device *dev) | ||
484 | { | ||
485 | struct tcm_loop_hba *tl_hba; | ||
486 | struct Scsi_Host *sh; | ||
487 | |||
488 | tl_hba = to_tcm_loop_hba(dev); | ||
489 | sh = tl_hba->sh; | ||
490 | |||
491 | scsi_remove_host(sh); | ||
492 | scsi_host_put(sh); | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static void tcm_loop_release_adapter(struct device *dev) | ||
497 | { | ||
498 | struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); | ||
499 | |||
500 | kfree(tl_hba); | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c | ||
505 | */ | ||
506 | static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) | ||
507 | { | ||
508 | int ret; | ||
509 | |||
510 | tl_hba->dev.bus = &tcm_loop_lld_bus; | ||
511 | tl_hba->dev.parent = tcm_loop_primary; | ||
512 | tl_hba->dev.release = &tcm_loop_release_adapter; | ||
513 | dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); | ||
514 | |||
515 | ret = device_register(&tl_hba->dev); | ||
516 | if (ret) { | ||
517 | printk(KERN_ERR "device_register() failed for" | ||
518 | " tl_hba->dev: %d\n", ret); | ||
519 | return -ENODEV; | ||
520 | } | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated | ||
527 | * tcm_loop SCSI bus. | ||
528 | */ | ||
529 | static int tcm_loop_alloc_core_bus(void) | ||
530 | { | ||
531 | int ret; | ||
532 | |||
533 | tcm_loop_primary = root_device_register("tcm_loop_0"); | ||
534 | if (IS_ERR(tcm_loop_primary)) { | ||
535 | printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); | ||
536 | return PTR_ERR(tcm_loop_primary); | ||
537 | } | ||
538 | |||
539 | ret = bus_register(&tcm_loop_lld_bus); | ||
540 | if (ret) { | ||
541 | printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); | ||
542 | goto dev_unreg; | ||
543 | } | ||
544 | |||
545 | ret = driver_register(&tcm_loop_driverfs); | ||
546 | if (ret) { | ||
547 | printk(KERN_ERR "driver_register() failed for" | ||
548 | "tcm_loop_driverfs\n"); | ||
549 | goto bus_unreg; | ||
550 | } | ||
551 | |||
552 | printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); | ||
553 | return ret; | ||
554 | |||
555 | bus_unreg: | ||
556 | bus_unregister(&tcm_loop_lld_bus); | ||
557 | dev_unreg: | ||
558 | root_device_unregister(tcm_loop_primary); | ||
559 | return ret; | ||
560 | } | ||
561 | |||
562 | static void tcm_loop_release_core_bus(void) | ||
563 | { | ||
564 | driver_unregister(&tcm_loop_driverfs); | ||
565 | bus_unregister(&tcm_loop_lld_bus); | ||
566 | root_device_unregister(tcm_loop_primary); | ||
567 | |||
568 | printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); | ||
569 | } | ||
570 | |||
571 | static char *tcm_loop_get_fabric_name(void) | ||
572 | { | ||
573 | return "loopback"; | ||
574 | } | ||
575 | |||
576 | static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
577 | { | ||
578 | struct tcm_loop_tpg *tl_tpg = | ||
579 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
580 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
581 | /* | ||
582 | * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() | ||
583 | * time based on the protocol dependent prefix of the passed configfs group. | ||
584 | * | ||
585 | * Based upon tl_proto_id, TCM_Loop emulates the requested fabric | ||
586 | * ProtocolID using target_core_fabric_lib.c symbols. | ||
587 | */ | ||
588 | switch (tl_hba->tl_proto_id) { | ||
589 | case SCSI_PROTOCOL_SAS: | ||
590 | return sas_get_fabric_proto_ident(se_tpg); | ||
591 | case SCSI_PROTOCOL_FCP: | ||
592 | return fc_get_fabric_proto_ident(se_tpg); | ||
593 | case SCSI_PROTOCOL_ISCSI: | ||
594 | return iscsi_get_fabric_proto_ident(se_tpg); | ||
595 | default: | ||
596 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
597 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
598 | break; | ||
599 | } | ||
600 | |||
601 | return sas_get_fabric_proto_ident(se_tpg); | ||
602 | } | ||
603 | |||
604 | static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) | ||
605 | { | ||
606 | struct tcm_loop_tpg *tl_tpg = | ||
607 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
608 | /* | ||
609 | * Return the passed NAA identifier for the SAS Target Port | ||
610 | */ | ||
611 | return &tl_tpg->tl_hba->tl_wwn_address[0]; | ||
612 | } | ||
613 | |||
614 | static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) | ||
615 | { | ||
616 | struct tcm_loop_tpg *tl_tpg = | ||
617 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
618 | /* | ||
619 | * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 | ||
620 | * to represent the SCSI Target Port. | ||
621 | */ | ||
622 | return tl_tpg->tl_tpgt; | ||
623 | } | ||
624 | |||
625 | static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) | ||
626 | { | ||
627 | return 1; | ||
628 | } | ||
629 | |||
630 | static u32 tcm_loop_get_pr_transport_id( | ||
631 | struct se_portal_group *se_tpg, | ||
632 | struct se_node_acl *se_nacl, | ||
633 | struct t10_pr_registration *pr_reg, | ||
634 | int *format_code, | ||
635 | unsigned char *buf) | ||
636 | { | ||
637 | struct tcm_loop_tpg *tl_tpg = | ||
638 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
639 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
640 | |||
641 | switch (tl_hba->tl_proto_id) { | ||
642 | case SCSI_PROTOCOL_SAS: | ||
643 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
644 | format_code, buf); | ||
645 | case SCSI_PROTOCOL_FCP: | ||
646 | return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
647 | format_code, buf); | ||
648 | case SCSI_PROTOCOL_ISCSI: | ||
649 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
650 | format_code, buf); | ||
651 | default: | ||
652 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
653 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
654 | break; | ||
655 | } | ||
656 | |||
657 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
658 | format_code, buf); | ||
659 | } | ||
660 | |||
661 | static u32 tcm_loop_get_pr_transport_id_len( | ||
662 | struct se_portal_group *se_tpg, | ||
663 | struct se_node_acl *se_nacl, | ||
664 | struct t10_pr_registration *pr_reg, | ||
665 | int *format_code) | ||
666 | { | ||
667 | struct tcm_loop_tpg *tl_tpg = | ||
668 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
669 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
670 | |||
671 | switch (tl_hba->tl_proto_id) { | ||
672 | case SCSI_PROTOCOL_SAS: | ||
673 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
674 | format_code); | ||
675 | case SCSI_PROTOCOL_FCP: | ||
676 | return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
677 | format_code); | ||
678 | case SCSI_PROTOCOL_ISCSI: | ||
679 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
680 | format_code); | ||
681 | default: | ||
682 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
683 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
684 | break; | ||
685 | } | ||
686 | |||
687 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
688 | format_code); | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
693 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
694 | */ | ||
695 | static char *tcm_loop_parse_pr_out_transport_id( | ||
696 | struct se_portal_group *se_tpg, | ||
697 | const char *buf, | ||
698 | u32 *out_tid_len, | ||
699 | char **port_nexus_ptr) | ||
700 | { | ||
701 | struct tcm_loop_tpg *tl_tpg = | ||
702 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
703 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
704 | |||
705 | switch (tl_hba->tl_proto_id) { | ||
706 | case SCSI_PROTOCOL_SAS: | ||
707 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
708 | port_nexus_ptr); | ||
709 | case SCSI_PROTOCOL_FCP: | ||
710 | return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
711 | port_nexus_ptr); | ||
712 | case SCSI_PROTOCOL_ISCSI: | ||
713 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
714 | port_nexus_ptr); | ||
715 | default: | ||
716 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
717 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
718 | break; | ||
719 | } | ||
720 | |||
721 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
722 | port_nexus_ptr); | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Returning (1) here allows for target_core_mod struct se_node_acl to be generated | ||
727 | * based upon the incoming fabric dependent SCSI Initiator Port | ||
728 | */ | ||
729 | static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) | ||
730 | { | ||
731 | return 1; | ||
732 | } | ||
733 | |||
734 | static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) | ||
735 | { | ||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | /* | ||
740 | * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for | ||
741 | * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest | ||
742 | */ | ||
743 | static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) | ||
744 | { | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will | ||
750 | * never be called for TCM_Loop by target_core_fabric_configfs.c code. | ||
751 | * It has been added here as a nop for target_fabric_tf_ops_check() | ||
752 | */ | ||
753 | static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) | ||
754 | { | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | ||
759 | struct se_portal_group *se_tpg) | ||
760 | { | ||
761 | struct tcm_loop_nacl *tl_nacl; | ||
762 | |||
763 | tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); | ||
764 | if (!tl_nacl) { | ||
765 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); | ||
766 | return NULL; | ||
767 | } | ||
768 | |||
769 | return &tl_nacl->se_node_acl; | ||
770 | } | ||
771 | |||
772 | static void tcm_loop_tpg_release_fabric_acl( | ||
773 | struct se_portal_group *se_tpg, | ||
774 | struct se_node_acl *se_nacl) | ||
775 | { | ||
776 | struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, | ||
777 | struct tcm_loop_nacl, se_node_acl); | ||
778 | |||
779 | kfree(tl_nacl); | ||
780 | } | ||
781 | |||
782 | static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) | ||
783 | { | ||
784 | return 1; | ||
785 | } | ||
786 | |||
787 | static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) | ||
788 | { | ||
789 | /* | ||
790 | * Since TCM_loop is already passing struct scatterlist data from | ||
791 | * struct scsi_cmnd, no more Linux/SCSI failure dependent state need | ||
792 | * to be handled here. | ||
793 | */ | ||
794 | return; | ||
795 | } | ||
796 | |||
797 | static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) | ||
798 | { | ||
799 | /* | ||
800 | * Assume struct scsi_cmnd is not in remove state.. | ||
801 | */ | ||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | static int tcm_loop_sess_logged_in(struct se_session *se_sess) | ||
806 | { | ||
807 | /* | ||
808 | * Assume that TL Nexus is always active | ||
809 | */ | ||
810 | return 1; | ||
811 | } | ||
812 | |||
813 | static u32 tcm_loop_sess_get_index(struct se_session *se_sess) | ||
814 | { | ||
815 | return 1; | ||
816 | } | ||
817 | |||
818 | static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) | ||
819 | { | ||
820 | return; | ||
821 | } | ||
822 | |||
823 | static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) | ||
824 | { | ||
825 | return 1; | ||
826 | } | ||
827 | |||
828 | static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) | ||
829 | { | ||
830 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
831 | struct tcm_loop_cmd, tl_se_cmd); | ||
832 | |||
833 | return tl_cmd->sc_cmd_state; | ||
834 | } | ||
835 | |||
836 | static int tcm_loop_shutdown_session(struct se_session *se_sess) | ||
837 | { | ||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | static void tcm_loop_close_session(struct se_session *se_sess) | ||
842 | { | ||
843 | return; | ||
844 | }; | ||
845 | |||
846 | static void tcm_loop_stop_session( | ||
847 | struct se_session *se_sess, | ||
848 | int sess_sleep, | ||
849 | int conn_sleep) | ||
850 | { | ||
851 | return; | ||
852 | } | ||
853 | |||
854 | static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) | ||
855 | { | ||
856 | return; | ||
857 | } | ||
858 | |||
859 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) | ||
860 | { | ||
861 | /* | ||
862 | * Since Linux/SCSI has already sent down a struct scsi_cmnd | ||
863 | * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array | ||
864 | * memory, and memory has already been mapped to struct se_cmd->t_mem_list | ||
865 | * format with transport_generic_map_mem_to_cmd(). | ||
866 | * | ||
867 | * We now tell TCM to add this WRITE CDB directly into the TCM storage | ||
868 | * object execution queue. | ||
869 | */ | ||
870 | transport_generic_process_write(se_cmd); | ||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) | ||
875 | { | ||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) | ||
880 | { | ||
881 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
882 | struct tcm_loop_cmd, tl_se_cmd); | ||
883 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
884 | |||
885 | TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" | ||
886 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | ||
887 | |||
888 | sc->result = SAM_STAT_GOOD; | ||
889 | set_host_byte(sc, DID_OK); | ||
890 | sc->scsi_done(sc); | ||
891 | return 0; | ||
892 | } | ||
893 | |||
894 | static int tcm_loop_queue_status(struct se_cmd *se_cmd) | ||
895 | { | ||
896 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
897 | struct tcm_loop_cmd, tl_se_cmd); | ||
898 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
899 | |||
900 | TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" | ||
901 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | ||
902 | |||
903 | if (se_cmd->sense_buffer && | ||
904 | ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || | ||
905 | (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { | ||
906 | |||
907 | memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, | ||
908 | SCSI_SENSE_BUFFERSIZE); | ||
909 | sc->result = SAM_STAT_CHECK_CONDITION; | ||
910 | set_driver_byte(sc, DRIVER_SENSE); | ||
911 | } else | ||
912 | sc->result = se_cmd->scsi_status; | ||
913 | |||
914 | set_host_byte(sc, DID_OK); | ||
915 | sc->scsi_done(sc); | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) | ||
920 | { | ||
921 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | ||
922 | struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; | ||
923 | /* | ||
924 | * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead | ||
925 | * and wake up the wait_queue_head_t in tcm_loop_device_reset() | ||
926 | */ | ||
927 | atomic_set(&tl_tmr->tmr_complete, 1); | ||
928 | wake_up(&tl_tmr->tl_tmr_wait); | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) | ||
933 | { | ||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static u16 tcm_loop_get_fabric_sense_len(void) | ||
938 | { | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | static u64 tcm_loop_pack_lun(unsigned int lun) | ||
943 | { | ||
944 | u64 result; | ||
945 | |||
946 | /* LSB of lun into byte 1 big-endian */ | ||
947 | result = ((lun & 0xff) << 8); | ||
948 | /* use flat space addressing method */ | ||
949 | result |= 0x40 | ((lun >> 8) & 0x3f); | ||
950 | |||
951 | return cpu_to_le64(result); | ||
952 | } | ||
953 | |||
954 | static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) | ||
955 | { | ||
956 | switch (tl_hba->tl_proto_id) { | ||
957 | case SCSI_PROTOCOL_SAS: | ||
958 | return "SAS"; | ||
959 | case SCSI_PROTOCOL_FCP: | ||
960 | return "FCP"; | ||
961 | case SCSI_PROTOCOL_ISCSI: | ||
962 | return "iSCSI"; | ||
963 | default: | ||
964 | break; | ||
965 | } | ||
966 | |||
967 | return "Unknown"; | ||
968 | } | ||
969 | |||
970 | /* Start items for tcm_loop_port_cit */ | ||
971 | |||
972 | static int tcm_loop_port_link( | ||
973 | struct se_portal_group *se_tpg, | ||
974 | struct se_lun *lun) | ||
975 | { | ||
976 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
977 | struct tcm_loop_tpg, tl_se_tpg); | ||
978 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
979 | |||
980 | atomic_inc(&tl_tpg->tl_tpg_port_count); | ||
981 | smp_mb__after_atomic_inc(); | ||
982 | /* | ||
983 | * Add Linux/SCSI struct scsi_device by HCTL | ||
984 | */ | ||
985 | scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); | ||
986 | |||
987 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); | ||
988 | return 0; | ||
989 | } | ||
990 | |||
991 | static void tcm_loop_port_unlink( | ||
992 | struct se_portal_group *se_tpg, | ||
993 | struct se_lun *se_lun) | ||
994 | { | ||
995 | struct scsi_device *sd; | ||
996 | struct tcm_loop_hba *tl_hba; | ||
997 | struct tcm_loop_tpg *tl_tpg; | ||
998 | |||
999 | tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); | ||
1000 | tl_hba = tl_tpg->tl_hba; | ||
1001 | |||
1002 | sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, | ||
1003 | se_lun->unpacked_lun); | ||
1004 | if (!sd) { | ||
1005 | printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" | ||
1006 | "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); | ||
1007 | return; | ||
1008 | } | ||
1009 | /* | ||
1010 | * Remove Linux/SCSI struct scsi_device by HCTL | ||
1011 | */ | ||
1012 | scsi_remove_device(sd); | ||
1013 | scsi_device_put(sd); | ||
1014 | |||
1015 | atomic_dec(&tl_tpg->tl_tpg_port_count); | ||
1016 | smp_mb__after_atomic_dec(); | ||
1017 | |||
1018 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); | ||
1019 | } | ||
1020 | |||
1021 | /* End items for tcm_loop_port_cit */ | ||
1022 | |||
1023 | /* Start items for tcm_loop_nexus_cit */ | ||
1024 | |||
1025 | static int tcm_loop_make_nexus( | ||
1026 | struct tcm_loop_tpg *tl_tpg, | ||
1027 | const char *name) | ||
1028 | { | ||
1029 | struct se_portal_group *se_tpg; | ||
1030 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
1031 | struct tcm_loop_nexus *tl_nexus; | ||
1032 | |||
1033 | if (tl_tpg->tl_hba->tl_nexus) { | ||
1034 | printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); | ||
1035 | return -EEXIST; | ||
1036 | } | ||
1037 | se_tpg = &tl_tpg->tl_se_tpg; | ||
1038 | |||
1039 | tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); | ||
1040 | if (!tl_nexus) { | ||
1041 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | /* | ||
1045 | * Initialize the struct se_session pointer | ||
1046 | */ | ||
1047 | tl_nexus->se_sess = transport_init_session(); | ||
1048 | if (!tl_nexus->se_sess) | ||
1049 | goto out; | ||
1050 | /* | ||
1051 | * Since we are running in 'demo mode' this call with generate a | ||
1052 | * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI | ||
1053 | * Initiator port name of the passed configfs group 'name'. | ||
1054 | */ | ||
1055 | tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( | ||
1056 | se_tpg, (unsigned char *)name); | ||
1057 | if (!tl_nexus->se_sess->se_node_acl) { | ||
1058 | transport_free_session(tl_nexus->se_sess); | ||
1059 | goto out; | ||
1060 | } | ||
1061 | /* | ||
1062 | * Now, register the SAS I_T Nexus as active with the call to | ||
1063 | * transport_register_session() | ||
1064 | */ | ||
1065 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | ||
1066 | tl_nexus->se_sess, (void *)tl_nexus); | ||
1067 | tl_tpg->tl_hba->tl_nexus = tl_nexus; | ||
1068 | printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | ||
1069 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | ||
1070 | name); | ||
1071 | return 0; | ||
1072 | |||
1073 | out: | ||
1074 | kfree(tl_nexus); | ||
1075 | return -ENOMEM; | ||
1076 | } | ||
1077 | |||
1078 | static int tcm_loop_drop_nexus( | ||
1079 | struct tcm_loop_tpg *tpg) | ||
1080 | { | ||
1081 | struct se_session *se_sess; | ||
1082 | struct tcm_loop_nexus *tl_nexus; | ||
1083 | struct tcm_loop_hba *tl_hba = tpg->tl_hba; | ||
1084 | |||
1085 | tl_nexus = tpg->tl_hba->tl_nexus; | ||
1086 | if (!tl_nexus) | ||
1087 | return -ENODEV; | ||
1088 | |||
1089 | se_sess = tl_nexus->se_sess; | ||
1090 | if (!se_sess) | ||
1091 | return -ENODEV; | ||
1092 | |||
1093 | if (atomic_read(&tpg->tl_tpg_port_count)) { | ||
1094 | printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" | ||
1095 | " active TPG port count: %d\n", | ||
1096 | atomic_read(&tpg->tl_tpg_port_count)); | ||
1097 | return -EPERM; | ||
1098 | } | ||
1099 | |||
1100 | printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" | ||
1101 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | ||
1102 | tl_nexus->se_sess->se_node_acl->initiatorname); | ||
1103 | /* | ||
1104 | * Release the SCSI I_T Nexus to the emulated SAS Target Port | ||
1105 | */ | ||
1106 | transport_deregister_session(tl_nexus->se_sess); | ||
1107 | tpg->tl_hba->tl_nexus = NULL; | ||
1108 | kfree(tl_nexus); | ||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* End items for tcm_loop_nexus_cit */ | ||
1113 | |||
1114 | static ssize_t tcm_loop_tpg_show_nexus( | ||
1115 | struct se_portal_group *se_tpg, | ||
1116 | char *page) | ||
1117 | { | ||
1118 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1119 | struct tcm_loop_tpg, tl_se_tpg); | ||
1120 | struct tcm_loop_nexus *tl_nexus; | ||
1121 | ssize_t ret; | ||
1122 | |||
1123 | tl_nexus = tl_tpg->tl_hba->tl_nexus; | ||
1124 | if (!tl_nexus) | ||
1125 | return -ENODEV; | ||
1126 | |||
1127 | ret = snprintf(page, PAGE_SIZE, "%s\n", | ||
1128 | tl_nexus->se_sess->se_node_acl->initiatorname); | ||
1129 | |||
1130 | return ret; | ||
1131 | } | ||
1132 | |||
1133 | static ssize_t tcm_loop_tpg_store_nexus( | ||
1134 | struct se_portal_group *se_tpg, | ||
1135 | const char *page, | ||
1136 | size_t count) | ||
1137 | { | ||
1138 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1139 | struct tcm_loop_tpg, tl_se_tpg); | ||
1140 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
1141 | unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; | ||
1142 | int ret; | ||
1143 | /* | ||
1144 | * Shutdown the active I_T nexus if 'NULL' is passed.. | ||
1145 | */ | ||
1146 | if (!strncmp(page, "NULL", 4)) { | ||
1147 | ret = tcm_loop_drop_nexus(tl_tpg); | ||
1148 | return (!ret) ? count : ret; | ||
1149 | } | ||
1150 | /* | ||
1151 | * Otherwise make sure the passed virtual Initiator port WWN matches | ||
1152 | * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call | ||
1153 | * tcm_loop_make_nexus() | ||
1154 | */ | ||
1155 | if (strlen(page) > TL_WWN_ADDR_LEN) { | ||
1156 | printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" | ||
1157 | " max: %d\n", page, TL_WWN_ADDR_LEN); | ||
1158 | return -EINVAL; | ||
1159 | } | ||
1160 | snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); | ||
1161 | |||
1162 | ptr = strstr(i_port, "naa."); | ||
1163 | if (ptr) { | ||
1164 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { | ||
1165 | printk(KERN_ERR "Passed SAS Initiator Port %s does not" | ||
1166 | " match target port protoid: %s\n", i_port, | ||
1167 | tcm_loop_dump_proto_id(tl_hba)); | ||
1168 | return -EINVAL; | ||
1169 | } | ||
1170 | port_ptr = &i_port[0]; | ||
1171 | goto check_newline; | ||
1172 | } | ||
1173 | ptr = strstr(i_port, "fc."); | ||
1174 | if (ptr) { | ||
1175 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { | ||
1176 | printk(KERN_ERR "Passed FCP Initiator Port %s does not" | ||
1177 | " match target port protoid: %s\n", i_port, | ||
1178 | tcm_loop_dump_proto_id(tl_hba)); | ||
1179 | return -EINVAL; | ||
1180 | } | ||
1181 | port_ptr = &i_port[3]; /* Skip over "fc." */ | ||
1182 | goto check_newline; | ||
1183 | } | ||
1184 | ptr = strstr(i_port, "iqn."); | ||
1185 | if (ptr) { | ||
1186 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { | ||
1187 | printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" | ||
1188 | " match target port protoid: %s\n", i_port, | ||
1189 | tcm_loop_dump_proto_id(tl_hba)); | ||
1190 | return -EINVAL; | ||
1191 | } | ||
1192 | port_ptr = &i_port[0]; | ||
1193 | goto check_newline; | ||
1194 | } | ||
1195 | printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" | ||
1196 | " %s\n", i_port); | ||
1197 | return -EINVAL; | ||
1198 | /* | ||
1199 | * Clear any trailing newline for the NAA WWN | ||
1200 | */ | ||
1201 | check_newline: | ||
1202 | if (i_port[strlen(i_port)-1] == '\n') | ||
1203 | i_port[strlen(i_port)-1] = '\0'; | ||
1204 | |||
1205 | ret = tcm_loop_make_nexus(tl_tpg, port_ptr); | ||
1206 | if (ret < 0) | ||
1207 | return ret; | ||
1208 | |||
1209 | return count; | ||
1210 | } | ||
1211 | |||
1212 | TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); | ||
1213 | |||
1214 | static struct configfs_attribute *tcm_loop_tpg_attrs[] = { | ||
1215 | &tcm_loop_tpg_nexus.attr, | ||
1216 | NULL, | ||
1217 | }; | ||
1218 | |||
1219 | /* Start items for tcm_loop_naa_cit */ | ||
1220 | |||
1221 | struct se_portal_group *tcm_loop_make_naa_tpg( | ||
1222 | struct se_wwn *wwn, | ||
1223 | struct config_group *group, | ||
1224 | const char *name) | ||
1225 | { | ||
1226 | struct tcm_loop_hba *tl_hba = container_of(wwn, | ||
1227 | struct tcm_loop_hba, tl_hba_wwn); | ||
1228 | struct tcm_loop_tpg *tl_tpg; | ||
1229 | char *tpgt_str, *end_ptr; | ||
1230 | int ret; | ||
1231 | unsigned short int tpgt; | ||
1232 | |||
1233 | tpgt_str = strstr(name, "tpgt_"); | ||
1234 | if (!tpgt_str) { | ||
1235 | printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" | ||
1236 | " group\n"); | ||
1237 | return ERR_PTR(-EINVAL); | ||
1238 | } | ||
1239 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ | ||
1240 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); | ||
1241 | |||
1242 | if (tpgt > TL_TPGS_PER_HBA) { | ||
1243 | printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" | ||
1244 | " %u\n", tpgt, TL_TPGS_PER_HBA); | ||
1245 | return ERR_PTR(-EINVAL); | ||
1246 | } | ||
1247 | tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; | ||
1248 | tl_tpg->tl_hba = tl_hba; | ||
1249 | tl_tpg->tl_tpgt = tpgt; | ||
1250 | /* | ||
1251 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | ||
1252 | */ | ||
1253 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, | ||
1254 | wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, | ||
1255 | TRANSPORT_TPG_TYPE_NORMAL); | ||
1256 | if (ret < 0) | ||
1257 | return ERR_PTR(-ENOMEM); | ||
1258 | |||
1259 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" | ||
1260 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | ||
1261 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | ||
1262 | |||
1263 | return &tl_tpg->tl_se_tpg; | ||
1264 | } | ||
1265 | |||
1266 | void tcm_loop_drop_naa_tpg( | ||
1267 | struct se_portal_group *se_tpg) | ||
1268 | { | ||
1269 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | ||
1270 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1271 | struct tcm_loop_tpg, tl_se_tpg); | ||
1272 | struct tcm_loop_hba *tl_hba; | ||
1273 | unsigned short tpgt; | ||
1274 | |||
1275 | tl_hba = tl_tpg->tl_hba; | ||
1276 | tpgt = tl_tpg->tl_tpgt; | ||
1277 | /* | ||
1278 | * Release the I_T Nexus for the Virtual SAS link if present | ||
1279 | */ | ||
1280 | tcm_loop_drop_nexus(tl_tpg); | ||
1281 | /* | ||
1282 | * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint | ||
1283 | */ | ||
1284 | core_tpg_deregister(se_tpg); | ||
1285 | |||
1286 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" | ||
1287 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | ||
1288 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | ||
1289 | } | ||
1290 | |||
1291 | /* End items for tcm_loop_naa_cit */ | ||
1292 | |||
1293 | /* Start items for tcm_loop_cit */ | ||
1294 | |||
1295 | struct se_wwn *tcm_loop_make_scsi_hba( | ||
1296 | struct target_fabric_configfs *tf, | ||
1297 | struct config_group *group, | ||
1298 | const char *name) | ||
1299 | { | ||
1300 | struct tcm_loop_hba *tl_hba; | ||
1301 | struct Scsi_Host *sh; | ||
1302 | char *ptr; | ||
1303 | int ret, off = 0; | ||
1304 | |||
1305 | tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); | ||
1306 | if (!tl_hba) { | ||
1307 | printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); | ||
1308 | return ERR_PTR(-ENOMEM); | ||
1309 | } | ||
1310 | /* | ||
1311 | * Determine the emulated Protocol Identifier and Target Port Name | ||
1312 | * based on the incoming configfs directory name. | ||
1313 | */ | ||
1314 | ptr = strstr(name, "naa."); | ||
1315 | if (ptr) { | ||
1316 | tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; | ||
1317 | goto check_len; | ||
1318 | } | ||
1319 | ptr = strstr(name, "fc."); | ||
1320 | if (ptr) { | ||
1321 | tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; | ||
1322 | off = 3; /* Skip over "fc." */ | ||
1323 | goto check_len; | ||
1324 | } | ||
1325 | ptr = strstr(name, "iqn."); | ||
1326 | if (ptr) { | ||
1327 | tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; | ||
1328 | goto check_len; | ||
1329 | } | ||
1330 | |||
1331 | printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" | ||
1332 | " %s\n", name); | ||
1333 | return ERR_PTR(-EINVAL); | ||
1334 | |||
1335 | check_len: | ||
1336 | if (strlen(name) > TL_WWN_ADDR_LEN) { | ||
1337 | printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" | ||
1338 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), | ||
1339 | TL_WWN_ADDR_LEN); | ||
1340 | kfree(tl_hba); | ||
1341 | return ERR_PTR(-EINVAL); | ||
1342 | } | ||
1343 | snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); | ||
1344 | |||
1345 | /* | ||
1346 | * Call device_register(tl_hba->dev) to register the emulated | ||
1347 | * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after | ||
1348 | * device_register() callbacks in tcm_loop_driver_probe() | ||
1349 | */ | ||
1350 | ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); | ||
1351 | if (ret) | ||
1352 | goto out; | ||
1353 | |||
1354 | sh = tl_hba->sh; | ||
1355 | tcm_loop_hba_no_cnt++; | ||
1356 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" | ||
1357 | " %s Address: %s at Linux/SCSI Host ID: %d\n", | ||
1358 | tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); | ||
1359 | |||
1360 | return &tl_hba->tl_hba_wwn; | ||
1361 | out: | ||
1362 | kfree(tl_hba); | ||
1363 | return ERR_PTR(ret); | ||
1364 | } | ||
1365 | |||
1366 | void tcm_loop_drop_scsi_hba( | ||
1367 | struct se_wwn *wwn) | ||
1368 | { | ||
1369 | struct tcm_loop_hba *tl_hba = container_of(wwn, | ||
1370 | struct tcm_loop_hba, tl_hba_wwn); | ||
1371 | int host_no = tl_hba->sh->host_no; | ||
1372 | /* | ||
1373 | * Call device_unregister() on the original tl_hba->dev. | ||
1374 | * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will | ||
1375 | * release *tl_hba; | ||
1376 | */ | ||
1377 | device_unregister(&tl_hba->dev); | ||
1378 | |||
1379 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" | ||
1380 | " SAS Address: %s at Linux/SCSI Host ID: %d\n", | ||
1381 | config_item_name(&wwn->wwn_group.cg_item), host_no); | ||
1382 | } | ||
1383 | |||
1384 | /* Start items for tcm_loop_cit */ | ||
1385 | static ssize_t tcm_loop_wwn_show_attr_version( | ||
1386 | struct target_fabric_configfs *tf, | ||
1387 | char *page) | ||
1388 | { | ||
1389 | return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); | ||
1390 | } | ||
1391 | |||
1392 | TF_WWN_ATTR_RO(tcm_loop, version); | ||
1393 | |||
1394 | static struct configfs_attribute *tcm_loop_wwn_attrs[] = { | ||
1395 | &tcm_loop_wwn_version.attr, | ||
1396 | NULL, | ||
1397 | }; | ||
1398 | |||
1399 | /* End items for tcm_loop_cit */ | ||
1400 | |||
1401 | static int tcm_loop_register_configfs(void) | ||
1402 | { | ||
1403 | struct target_fabric_configfs *fabric; | ||
1404 | struct config_group *tf_cg; | ||
1405 | int ret; | ||
1406 | /* | ||
1407 | * Set the TCM Loop HBA counter to zero | ||
1408 | */ | ||
1409 | tcm_loop_hba_no_cnt = 0; | ||
1410 | /* | ||
1411 | * Register the top level struct config_item_type with TCM core | ||
1412 | */ | ||
1413 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | ||
1414 | if (!fabric) { | ||
1415 | printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); | ||
1416 | return -1; | ||
1417 | } | ||
1418 | /* | ||
1419 | * Setup the fabric API of function pointers used by target_core_mod | ||
1420 | */ | ||
1421 | fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; | ||
1422 | fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; | ||
1423 | fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; | ||
1424 | fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; | ||
1425 | fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; | ||
1426 | fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; | ||
1427 | fabric->tf_ops.tpg_get_pr_transport_id_len = | ||
1428 | &tcm_loop_get_pr_transport_id_len; | ||
1429 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | ||
1430 | &tcm_loop_parse_pr_out_transport_id; | ||
1431 | fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; | ||
1432 | fabric->tf_ops.tpg_check_demo_mode_cache = | ||
1433 | &tcm_loop_check_demo_mode_cache; | ||
1434 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | ||
1435 | &tcm_loop_check_demo_mode_write_protect; | ||
1436 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | ||
1437 | &tcm_loop_check_prod_mode_write_protect; | ||
1438 | /* | ||
1439 | * The TCM loopback fabric module runs in demo-mode to a local | ||
1440 | * virtual SCSI device, so fabric dependent initator ACLs are | ||
1441 | * not required. | ||
1442 | */ | ||
1443 | fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; | ||
1444 | fabric->tf_ops.tpg_release_fabric_acl = | ||
1445 | &tcm_loop_tpg_release_fabric_acl; | ||
1446 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; | ||
1447 | /* | ||
1448 | * Since tcm_loop is mapping physical memory from Linux/SCSI | ||
1449 | * struct scatterlist arrays for each struct scsi_cmnd I/O, | ||
1450 | * we do not need TCM to allocate a iovec array for | ||
1451 | * virtual memory address mappings | ||
1452 | */ | ||
1453 | fabric->tf_ops.alloc_cmd_iovecs = NULL; | ||
1454 | /* | ||
1455 | * Used for setting up remaining TCM resources in process context | ||
1456 | */ | ||
1457 | fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; | ||
1458 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | ||
1459 | fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; | ||
1460 | fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; | ||
1461 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | ||
1462 | fabric->tf_ops.close_session = &tcm_loop_close_session; | ||
1463 | fabric->tf_ops.stop_session = &tcm_loop_stop_session; | ||
1464 | fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; | ||
1465 | fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; | ||
1466 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; | ||
1467 | fabric->tf_ops.sess_get_initiator_sid = NULL; | ||
1468 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; | ||
1469 | fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; | ||
1470 | /* | ||
1471 | * Not used for TCM loopback | ||
1472 | */ | ||
1473 | fabric->tf_ops.set_default_node_attributes = | ||
1474 | &tcm_loop_set_default_node_attributes; | ||
1475 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; | ||
1476 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; | ||
1477 | fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; | ||
1478 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; | ||
1479 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; | ||
1480 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | ||
1481 | fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; | ||
1482 | fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; | ||
1483 | fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; | ||
1484 | fabric->tf_ops.pack_lun = &tcm_loop_pack_lun; | ||
1485 | |||
1486 | tf_cg = &fabric->tf_group; | ||
1487 | /* | ||
1488 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | ||
1489 | */ | ||
1490 | fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; | ||
1491 | fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; | ||
1492 | fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; | ||
1493 | fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; | ||
1494 | /* | ||
1495 | * fabric_post_link() and fabric_pre_unlink() are used for | ||
1496 | * registration and release of TCM Loop Virtual SCSI LUNs. | ||
1497 | */ | ||
1498 | fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; | ||
1499 | fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; | ||
1500 | fabric->tf_ops.fabric_make_np = NULL; | ||
1501 | fabric->tf_ops.fabric_drop_np = NULL; | ||
1502 | /* | ||
1503 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
1504 | */ | ||
1505 | TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; | ||
1506 | TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; | ||
1507 | TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1508 | TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; | ||
1509 | TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1510 | /* | ||
1511 | * Once fabric->tf_ops has been setup, now register the fabric for | ||
1512 | * use within TCM | ||
1513 | */ | ||
1514 | ret = target_fabric_configfs_register(fabric); | ||
1515 | if (ret < 0) { | ||
1516 | printk(KERN_ERR "target_fabric_configfs_register() for" | ||
1517 | " TCM_Loop failed!\n"); | ||
1518 | target_fabric_configfs_free(fabric); | ||
1519 | return -1; | ||
1520 | } | ||
1521 | /* | ||
1522 | * Setup our local pointer to *fabric. | ||
1523 | */ | ||
1524 | tcm_loop_fabric_configfs = fabric; | ||
1525 | printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" | ||
1526 | " tcm_loop_fabric_configfs\n"); | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static void tcm_loop_deregister_configfs(void) | ||
1531 | { | ||
1532 | if (!tcm_loop_fabric_configfs) | ||
1533 | return; | ||
1534 | |||
1535 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); | ||
1536 | tcm_loop_fabric_configfs = NULL; | ||
1537 | printk(KERN_INFO "TCM_LOOP[0] - Cleared" | ||
1538 | " tcm_loop_fabric_configfs\n"); | ||
1539 | } | ||
1540 | |||
1541 | static int __init tcm_loop_fabric_init(void) | ||
1542 | { | ||
1543 | int ret; | ||
1544 | |||
1545 | tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", | ||
1546 | sizeof(struct tcm_loop_cmd), | ||
1547 | __alignof__(struct tcm_loop_cmd), | ||
1548 | 0, NULL); | ||
1549 | if (!tcm_loop_cmd_cache) { | ||
1550 | printk(KERN_ERR "kmem_cache_create() for" | ||
1551 | " tcm_loop_cmd_cache failed\n"); | ||
1552 | return -ENOMEM; | ||
1553 | } | ||
1554 | |||
1555 | ret = tcm_loop_alloc_core_bus(); | ||
1556 | if (ret) | ||
1557 | return ret; | ||
1558 | |||
1559 | ret = tcm_loop_register_configfs(); | ||
1560 | if (ret) { | ||
1561 | tcm_loop_release_core_bus(); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | |||
1565 | return 0; | ||
1566 | } | ||
1567 | |||
1568 | static void __exit tcm_loop_fabric_exit(void) | ||
1569 | { | ||
1570 | tcm_loop_deregister_configfs(); | ||
1571 | tcm_loop_release_core_bus(); | ||
1572 | kmem_cache_destroy(tcm_loop_cmd_cache); | ||
1573 | } | ||
1574 | |||
1575 | MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); | ||
1576 | MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); | ||
1577 | MODULE_LICENSE("GPL"); | ||
1578 | module_init(tcm_loop_fabric_init); | ||
1579 | module_exit(tcm_loop_fabric_exit); | ||
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h new file mode 100644 index 00000000000..7e9f7ab4554 --- /dev/null +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #define TCM_LOOP_VERSION "v2.1-rc1" | ||
2 | #define TL_WWN_ADDR_LEN 256 | ||
3 | #define TL_TPGS_PER_HBA 32 | ||
4 | /* | ||
5 | * Defaults for struct scsi_host_template tcm_loop_driver_template | ||
6 | * | ||
7 | * We use large can_queue and cmd_per_lun here and let TCM enforce | ||
8 | * the underlying se_device_t->queue_depth. | ||
9 | */ | ||
10 | #define TL_SCSI_CAN_QUEUE 1024 | ||
11 | #define TL_SCSI_CMD_PER_LUN 1024 | ||
12 | #define TL_SCSI_MAX_SECTORS 1024 | ||
13 | #define TL_SCSI_SG_TABLESIZE 256 | ||
14 | /* | ||
15 | * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len | ||
16 | */ | ||
17 | #define TL_SCSI_MAX_CMD_LEN 32 | ||
18 | |||
19 | #ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG | ||
20 | # define TL_CDB_DEBUG(x...) printk(KERN_INFO x) | ||
21 | #else | ||
22 | # define TL_CDB_DEBUG(x...) | ||
23 | #endif | ||
24 | |||
25 | struct tcm_loop_cmd { | ||
26 | /* State of Linux/SCSI CDB+Data descriptor */ | ||
27 | u32 sc_cmd_state; | ||
28 | /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ | ||
29 | struct scsi_cmnd *sc; | ||
30 | struct list_head *tl_cmd_list; | ||
31 | /* The TCM I/O descriptor that is accessed via container_of() */ | ||
32 | struct se_cmd tl_se_cmd; | ||
33 | /* Sense buffer that will be mapped into outgoing status */ | ||
34 | unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; | ||
35 | }; | ||
36 | |||
37 | struct tcm_loop_tmr { | ||
38 | atomic_t tmr_complete; | ||
39 | wait_queue_head_t tl_tmr_wait; | ||
40 | }; | ||
41 | |||
42 | struct tcm_loop_nexus { | ||
43 | int it_nexus_active; | ||
44 | /* | ||
45 | * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h | ||
46 | */ | ||
47 | struct scsi_host *sh; | ||
48 | /* | ||
49 | * Pointer to TCM session for I_T Nexus | ||
50 | */ | ||
51 | struct se_session *se_sess; | ||
52 | }; | ||
53 | |||
54 | struct tcm_loop_nacl { | ||
55 | struct se_node_acl se_node_acl; | ||
56 | }; | ||
57 | |||
58 | struct tcm_loop_tpg { | ||
59 | unsigned short tl_tpgt; | ||
60 | atomic_t tl_tpg_port_count; | ||
61 | struct se_portal_group tl_se_tpg; | ||
62 | struct tcm_loop_hba *tl_hba; | ||
63 | }; | ||
64 | |||
65 | struct tcm_loop_hba { | ||
66 | u8 tl_proto_id; | ||
67 | unsigned char tl_wwn_address[TL_WWN_ADDR_LEN]; | ||
68 | struct se_hba_s *se_hba; | ||
69 | struct se_lun *tl_hba_lun; | ||
70 | struct se_port *tl_hba_lun_sep; | ||
71 | struct se_device_s *se_dev_hba_ptr; | ||
72 | struct tcm_loop_nexus *tl_nexus; | ||
73 | struct device dev; | ||
74 | struct Scsi_Host *sh; | ||
75 | struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; | ||
76 | struct se_wwn tl_hba_wwn; | ||
77 | }; | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index caf8dc18ee0..a5f44a6e6e1 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -3,8 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This file contains ConfigFS logic for the Generic Target Engine project. | 4 | * This file contains ConfigFS logic for the Generic Target Engine project. |
5 | * | 5 | * |
6 | * Copyright (c) 2008-2010 Rising Tide Systems | 6 | * Copyright (c) 2008-2011 Rising Tide Systems |
7 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 7 | * Copyright (c) 2008-2011 Linux-iSCSI.org |
8 | * | 8 | * |
9 | * Nicholas A. Bellinger <nab@kernel.org> | 9 | * Nicholas A. Bellinger <nab@kernel.org> |
10 | * | 10 | * |
@@ -50,6 +50,7 @@ | |||
50 | #include "target_core_hba.h" | 50 | #include "target_core_hba.h" |
51 | #include "target_core_pr.h" | 51 | #include "target_core_pr.h" |
52 | #include "target_core_rd.h" | 52 | #include "target_core_rd.h" |
53 | #include "target_core_stat.h" | ||
53 | 54 | ||
54 | static struct list_head g_tf_list; | 55 | static struct list_head g_tf_list; |
55 | static struct mutex g_tf_lock; | 56 | static struct mutex g_tf_lock; |
@@ -1451,8 +1452,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1451 | size_t count) | 1452 | size_t count) |
1452 | { | 1453 | { |
1453 | struct se_device *dev; | 1454 | struct se_device *dev; |
1454 | unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL; | 1455 | unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; |
1455 | unsigned char *isid = NULL; | 1456 | unsigned char *t_fabric = NULL, *t_port = NULL; |
1456 | char *orig, *ptr, *arg_p, *opts; | 1457 | char *orig, *ptr, *arg_p, *opts; |
1457 | substring_t args[MAX_OPT_ARGS]; | 1458 | substring_t args[MAX_OPT_ARGS]; |
1458 | unsigned long long tmp_ll; | 1459 | unsigned long long tmp_ll; |
@@ -1488,9 +1489,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1488 | switch (token) { | 1489 | switch (token) { |
1489 | case Opt_initiator_fabric: | 1490 | case Opt_initiator_fabric: |
1490 | i_fabric = match_strdup(&args[0]); | 1491 | i_fabric = match_strdup(&args[0]); |
1492 | if (!i_fabric) { | ||
1493 | ret = -ENOMEM; | ||
1494 | goto out; | ||
1495 | } | ||
1491 | break; | 1496 | break; |
1492 | case Opt_initiator_node: | 1497 | case Opt_initiator_node: |
1493 | i_port = match_strdup(&args[0]); | 1498 | i_port = match_strdup(&args[0]); |
1499 | if (!i_port) { | ||
1500 | ret = -ENOMEM; | ||
1501 | goto out; | ||
1502 | } | ||
1494 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { | 1503 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { |
1495 | printk(KERN_ERR "APTPL metadata initiator_node=" | 1504 | printk(KERN_ERR "APTPL metadata initiator_node=" |
1496 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | 1505 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", |
@@ -1501,6 +1510,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1501 | break; | 1510 | break; |
1502 | case Opt_initiator_sid: | 1511 | case Opt_initiator_sid: |
1503 | isid = match_strdup(&args[0]); | 1512 | isid = match_strdup(&args[0]); |
1513 | if (!isid) { | ||
1514 | ret = -ENOMEM; | ||
1515 | goto out; | ||
1516 | } | ||
1504 | if (strlen(isid) > PR_REG_ISID_LEN) { | 1517 | if (strlen(isid) > PR_REG_ISID_LEN) { |
1505 | printk(KERN_ERR "APTPL metadata initiator_isid" | 1518 | printk(KERN_ERR "APTPL metadata initiator_isid" |
1506 | "= exceeds PR_REG_ISID_LEN: %d\n", | 1519 | "= exceeds PR_REG_ISID_LEN: %d\n", |
@@ -1511,6 +1524,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1511 | break; | 1524 | break; |
1512 | case Opt_sa_res_key: | 1525 | case Opt_sa_res_key: |
1513 | arg_p = match_strdup(&args[0]); | 1526 | arg_p = match_strdup(&args[0]); |
1527 | if (!arg_p) { | ||
1528 | ret = -ENOMEM; | ||
1529 | goto out; | ||
1530 | } | ||
1514 | ret = strict_strtoull(arg_p, 0, &tmp_ll); | 1531 | ret = strict_strtoull(arg_p, 0, &tmp_ll); |
1515 | if (ret < 0) { | 1532 | if (ret < 0) { |
1516 | printk(KERN_ERR "strict_strtoull() failed for" | 1533 | printk(KERN_ERR "strict_strtoull() failed for" |
@@ -1547,9 +1564,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1547 | */ | 1564 | */ |
1548 | case Opt_target_fabric: | 1565 | case Opt_target_fabric: |
1549 | t_fabric = match_strdup(&args[0]); | 1566 | t_fabric = match_strdup(&args[0]); |
1567 | if (!t_fabric) { | ||
1568 | ret = -ENOMEM; | ||
1569 | goto out; | ||
1570 | } | ||
1550 | break; | 1571 | break; |
1551 | case Opt_target_node: | 1572 | case Opt_target_node: |
1552 | t_port = match_strdup(&args[0]); | 1573 | t_port = match_strdup(&args[0]); |
1574 | if (!t_port) { | ||
1575 | ret = -ENOMEM; | ||
1576 | goto out; | ||
1577 | } | ||
1553 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { | 1578 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { |
1554 | printk(KERN_ERR "APTPL metadata target_node=" | 1579 | printk(KERN_ERR "APTPL metadata target_node=" |
1555 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | 1580 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", |
@@ -1592,6 +1617,11 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1592 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | 1617 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, |
1593 | res_holder, all_tg_pt, type); | 1618 | res_holder, all_tg_pt, type); |
1594 | out: | 1619 | out: |
1620 | kfree(i_fabric); | ||
1621 | kfree(i_port); | ||
1622 | kfree(isid); | ||
1623 | kfree(t_fabric); | ||
1624 | kfree(t_port); | ||
1595 | kfree(orig); | 1625 | kfree(orig); |
1596 | return (ret == 0) ? count : ret; | 1626 | return (ret == 0) ? count : ret; |
1597 | } | 1627 | } |
@@ -1798,7 +1828,9 @@ static ssize_t target_core_store_dev_enable( | |||
1798 | return -EINVAL; | 1828 | return -EINVAL; |
1799 | 1829 | ||
1800 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1830 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1801 | if (!(dev) || IS_ERR(dev)) | 1831 | if (IS_ERR(dev)) |
1832 | return PTR_ERR(dev); | ||
1833 | else if (!dev) | ||
1802 | return -EINVAL; | 1834 | return -EINVAL; |
1803 | 1835 | ||
1804 | se_dev->se_dev_ptr = dev; | 1836 | se_dev->se_dev_ptr = dev; |
@@ -2678,6 +2710,34 @@ static struct config_item_type target_core_alua_cit = { | |||
2678 | 2710 | ||
2679 | /* End functions for struct config_item_type target_core_alua_cit */ | 2711 | /* End functions for struct config_item_type target_core_alua_cit */ |
2680 | 2712 | ||
2713 | /* Start functions for struct config_item_type target_core_stat_cit */ | ||
2714 | |||
2715 | static struct config_group *target_core_stat_mkdir( | ||
2716 | struct config_group *group, | ||
2717 | const char *name) | ||
2718 | { | ||
2719 | return ERR_PTR(-ENOSYS); | ||
2720 | } | ||
2721 | |||
2722 | static void target_core_stat_rmdir( | ||
2723 | struct config_group *group, | ||
2724 | struct config_item *item) | ||
2725 | { | ||
2726 | return; | ||
2727 | } | ||
2728 | |||
2729 | static struct configfs_group_operations target_core_stat_group_ops = { | ||
2730 | .make_group = &target_core_stat_mkdir, | ||
2731 | .drop_item = &target_core_stat_rmdir, | ||
2732 | }; | ||
2733 | |||
2734 | static struct config_item_type target_core_stat_cit = { | ||
2735 | .ct_group_ops = &target_core_stat_group_ops, | ||
2736 | .ct_owner = THIS_MODULE, | ||
2737 | }; | ||
2738 | |||
2739 | /* End functions for struct config_item_type target_core_stat_cit */ | ||
2740 | |||
2681 | /* Start functions for struct config_item_type target_core_hba_cit */ | 2741 | /* Start functions for struct config_item_type target_core_hba_cit */ |
2682 | 2742 | ||
2683 | static struct config_group *target_core_make_subdev( | 2743 | static struct config_group *target_core_make_subdev( |
@@ -2690,10 +2750,12 @@ static struct config_group *target_core_make_subdev( | |||
2690 | struct config_item *hba_ci = &group->cg_item; | 2750 | struct config_item *hba_ci = &group->cg_item; |
2691 | struct se_hba *hba = item_to_hba(hba_ci); | 2751 | struct se_hba *hba = item_to_hba(hba_ci); |
2692 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; | 2752 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; |
2753 | struct config_group *dev_stat_grp = NULL; | ||
2754 | int errno = -ENOMEM, ret; | ||
2693 | 2755 | ||
2694 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | 2756 | ret = mutex_lock_interruptible(&hba->hba_access_mutex); |
2695 | return NULL; | 2757 | if (ret) |
2696 | 2758 | return ERR_PTR(ret); | |
2697 | /* | 2759 | /* |
2698 | * Locate the struct se_subsystem_api from parent's struct se_hba. | 2760 | * Locate the struct se_subsystem_api from parent's struct se_hba. |
2699 | */ | 2761 | */ |
@@ -2723,7 +2785,7 @@ static struct config_group *target_core_make_subdev( | |||
2723 | se_dev->se_dev_hba = hba; | 2785 | se_dev->se_dev_hba = hba; |
2724 | dev_cg = &se_dev->se_dev_group; | 2786 | dev_cg = &se_dev->se_dev_group; |
2725 | 2787 | ||
2726 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, | 2788 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, |
2727 | GFP_KERNEL); | 2789 | GFP_KERNEL); |
2728 | if (!(dev_cg->default_groups)) | 2790 | if (!(dev_cg->default_groups)) |
2729 | goto out; | 2791 | goto out; |
@@ -2755,13 +2817,17 @@ static struct config_group *target_core_make_subdev( | |||
2755 | &target_core_dev_wwn_cit); | 2817 | &target_core_dev_wwn_cit); |
2756 | config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, | 2818 | config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, |
2757 | "alua", &target_core_alua_tg_pt_gps_cit); | 2819 | "alua", &target_core_alua_tg_pt_gps_cit); |
2820 | config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, | ||
2821 | "statistics", &target_core_stat_cit); | ||
2822 | |||
2758 | dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; | 2823 | dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; |
2759 | dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; | 2824 | dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; |
2760 | dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; | 2825 | dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; |
2761 | dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; | 2826 | dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2762 | dev_cg->default_groups[4] = NULL; | 2827 | dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; |
2828 | dev_cg->default_groups[5] = NULL; | ||
2763 | /* | 2829 | /* |
2764 | * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp | 2830 | * Add core/$HBA/$DEV/alua/default_tg_pt_gp |
2765 | */ | 2831 | */ |
2766 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); | 2832 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); |
2767 | if (!(tg_pt_gp)) | 2833 | if (!(tg_pt_gp)) |
@@ -2781,6 +2847,17 @@ static struct config_group *target_core_make_subdev( | |||
2781 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; | 2847 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; |
2782 | tg_pt_gp_cg->default_groups[1] = NULL; | 2848 | tg_pt_gp_cg->default_groups[1] = NULL; |
2783 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; | 2849 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; |
2850 | /* | ||
2851 | * Add core/$HBA/$DEV/statistics/ default groups | ||
2852 | */ | ||
2853 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | ||
2854 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, | ||
2855 | GFP_KERNEL); | ||
2856 | if (!dev_stat_grp->default_groups) { | ||
2857 | printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); | ||
2858 | goto out; | ||
2859 | } | ||
2860 | target_stat_setup_dev_default_groups(se_dev); | ||
2784 | 2861 | ||
2785 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" | 2862 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" |
2786 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); | 2863 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); |
@@ -2792,6 +2869,8 @@ out: | |||
2792 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2869 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); |
2793 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2870 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; |
2794 | } | 2871 | } |
2872 | if (dev_stat_grp) | ||
2873 | kfree(dev_stat_grp->default_groups); | ||
2795 | if (tg_pt_gp_cg) | 2874 | if (tg_pt_gp_cg) |
2796 | kfree(tg_pt_gp_cg->default_groups); | 2875 | kfree(tg_pt_gp_cg->default_groups); |
2797 | if (dev_cg) | 2876 | if (dev_cg) |
@@ -2801,7 +2880,7 @@ out: | |||
2801 | kfree(se_dev); | 2880 | kfree(se_dev); |
2802 | unlock: | 2881 | unlock: |
2803 | mutex_unlock(&hba->hba_access_mutex); | 2882 | mutex_unlock(&hba->hba_access_mutex); |
2804 | return NULL; | 2883 | return ERR_PTR(errno); |
2805 | } | 2884 | } |
2806 | 2885 | ||
2807 | static void target_core_drop_subdev( | 2886 | static void target_core_drop_subdev( |
@@ -2813,7 +2892,7 @@ static void target_core_drop_subdev( | |||
2813 | struct se_hba *hba; | 2892 | struct se_hba *hba; |
2814 | struct se_subsystem_api *t; | 2893 | struct se_subsystem_api *t; |
2815 | struct config_item *df_item; | 2894 | struct config_item *df_item; |
2816 | struct config_group *dev_cg, *tg_pt_gp_cg; | 2895 | struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; |
2817 | int i; | 2896 | int i; |
2818 | 2897 | ||
2819 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2898 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
@@ -2825,6 +2904,14 @@ static void target_core_drop_subdev( | |||
2825 | list_del(&se_dev->g_se_dev_list); | 2904 | list_del(&se_dev->g_se_dev_list); |
2826 | spin_unlock(&se_global->g_device_lock); | 2905 | spin_unlock(&se_global->g_device_lock); |
2827 | 2906 | ||
2907 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | ||
2908 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { | ||
2909 | df_item = &dev_stat_grp->default_groups[i]->cg_item; | ||
2910 | dev_stat_grp->default_groups[i] = NULL; | ||
2911 | config_item_put(df_item); | ||
2912 | } | ||
2913 | kfree(dev_stat_grp->default_groups); | ||
2914 | |||
2828 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2915 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; |
2829 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { | 2916 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { |
2830 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; | 2917 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; |
@@ -3044,7 +3131,7 @@ static struct config_item_type target_core_cit = { | |||
3044 | 3131 | ||
3045 | /* Stop functions for struct config_item_type target_core_hba_cit */ | 3132 | /* Stop functions for struct config_item_type target_core_hba_cit */ |
3046 | 3133 | ||
3047 | static int target_core_init_configfs(void) | 3134 | static int __init target_core_init_configfs(void) |
3048 | { | 3135 | { |
3049 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 3136 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
3050 | struct config_group *lu_gp_cg = NULL; | 3137 | struct config_group *lu_gp_cg = NULL; |
@@ -3176,7 +3263,7 @@ out_global: | |||
3176 | return -1; | 3263 | return -1; |
3177 | } | 3264 | } |
3178 | 3265 | ||
3179 | static void target_core_exit_configfs(void) | 3266 | static void __exit target_core_exit_configfs(void) |
3180 | { | 3267 | { |
3181 | struct configfs_subsystem *subsys; | 3268 | struct configfs_subsystem *subsys; |
3182 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; | 3269 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 350ed401544..3fb8e32506e 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -589,6 +589,7 @@ static void core_export_port( | |||
589 | * Called with struct se_device->se_port_lock spinlock held. | 589 | * Called with struct se_device->se_port_lock spinlock held. |
590 | */ | 590 | */ |
591 | static void core_release_port(struct se_device *dev, struct se_port *port) | 591 | static void core_release_port(struct se_device *dev, struct se_port *port) |
592 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) | ||
592 | { | 593 | { |
593 | /* | 594 | /* |
594 | * Wait for any port reference for PR ALL_TG_PT=1 operation | 595 | * Wait for any port reference for PR ALL_TG_PT=1 operation |
@@ -779,49 +780,14 @@ void se_release_vpd_for_dev(struct se_device *dev) | |||
779 | return; | 780 | return; |
780 | } | 781 | } |
781 | 782 | ||
782 | /* | ||
783 | * Called with struct se_hba->device_lock held. | ||
784 | */ | ||
785 | void se_clear_dev_ports(struct se_device *dev) | ||
786 | { | ||
787 | struct se_hba *hba = dev->se_hba; | ||
788 | struct se_lun *lun; | ||
789 | struct se_portal_group *tpg; | ||
790 | struct se_port *sep, *sep_tmp; | ||
791 | |||
792 | spin_lock(&dev->se_port_lock); | ||
793 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
794 | spin_unlock(&dev->se_port_lock); | ||
795 | spin_unlock(&hba->device_lock); | ||
796 | |||
797 | lun = sep->sep_lun; | ||
798 | tpg = sep->sep_tpg; | ||
799 | spin_lock(&lun->lun_sep_lock); | ||
800 | if (lun->lun_se_dev == NULL) { | ||
801 | spin_unlock(&lun->lun_sep_lock); | ||
802 | continue; | ||
803 | } | ||
804 | spin_unlock(&lun->lun_sep_lock); | ||
805 | |||
806 | core_dev_del_lun(tpg, lun->unpacked_lun); | ||
807 | |||
808 | spin_lock(&hba->device_lock); | ||
809 | spin_lock(&dev->se_port_lock); | ||
810 | } | ||
811 | spin_unlock(&dev->se_port_lock); | ||
812 | |||
813 | return; | ||
814 | } | ||
815 | |||
816 | /* se_free_virtual_device(): | 783 | /* se_free_virtual_device(): |
817 | * | 784 | * |
818 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | 785 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. |
819 | */ | 786 | */ |
820 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | 787 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) |
821 | { | 788 | { |
822 | spin_lock(&hba->device_lock); | 789 | if (!list_empty(&dev->dev_sep_list)) |
823 | se_clear_dev_ports(dev); | 790 | dump_stack(); |
824 | spin_unlock(&hba->device_lock); | ||
825 | 791 | ||
826 | core_alua_free_lu_gp_mem(dev); | 792 | core_alua_free_lu_gp_mem(dev); |
827 | se_release_device_for_hba(dev); | 793 | se_release_device_for_hba(dev); |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index b65d1c8e774..07ab5a3bb8e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -4,10 +4,10 @@ | |||
4 | * This file contains generic fabric module configfs infrastructure for | 4 | * This file contains generic fabric module configfs infrastructure for |
5 | * TCM v4.x code | 5 | * TCM v4.x code |
6 | * | 6 | * |
7 | * Copyright (c) 2010 Rising Tide Systems | 7 | * Copyright (c) 2010,2011 Rising Tide Systems |
8 | * Copyright (c) 2010 Linux-iSCSI.org | 8 | * Copyright (c) 2010,2011 Linux-iSCSI.org |
9 | * | 9 | * |
10 | * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org> | 10 | * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
@@ -48,6 +48,7 @@ | |||
48 | #include "target_core_alua.h" | 48 | #include "target_core_alua.h" |
49 | #include "target_core_hba.h" | 49 | #include "target_core_hba.h" |
50 | #include "target_core_pr.h" | 50 | #include "target_core_pr.h" |
51 | #include "target_core_stat.h" | ||
51 | 52 | ||
52 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | 53 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ |
53 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | 54 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ |
@@ -241,6 +242,32 @@ TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, | |||
241 | 242 | ||
242 | /* End of tfc_tpg_mappedlun_cit */ | 243 | /* End of tfc_tpg_mappedlun_cit */ |
243 | 244 | ||
245 | /* Start of tfc_tpg_mappedlun_port_cit */ | ||
246 | |||
247 | static struct config_group *target_core_mappedlun_stat_mkdir( | ||
248 | struct config_group *group, | ||
249 | const char *name) | ||
250 | { | ||
251 | return ERR_PTR(-ENOSYS); | ||
252 | } | ||
253 | |||
254 | static void target_core_mappedlun_stat_rmdir( | ||
255 | struct config_group *group, | ||
256 | struct config_item *item) | ||
257 | { | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { | ||
262 | .make_group = target_core_mappedlun_stat_mkdir, | ||
263 | .drop_item = target_core_mappedlun_stat_rmdir, | ||
264 | }; | ||
265 | |||
266 | TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, | ||
267 | NULL); | ||
268 | |||
269 | /* End of tfc_tpg_mappedlun_port_cit */ | ||
270 | |||
244 | /* Start of tfc_tpg_nacl_attrib_cit */ | 271 | /* Start of tfc_tpg_nacl_attrib_cit */ |
245 | 272 | ||
246 | CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); | 273 | CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); |
@@ -294,6 +321,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
294 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 321 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
295 | struct se_lun_acl *lacl; | 322 | struct se_lun_acl *lacl; |
296 | struct config_item *acl_ci; | 323 | struct config_item *acl_ci; |
324 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | ||
297 | char *buf; | 325 | char *buf; |
298 | unsigned long mapped_lun; | 326 | unsigned long mapped_lun; |
299 | int ret = 0; | 327 | int ret = 0; |
@@ -330,15 +358,42 @@ static struct config_group *target_fabric_make_mappedlun( | |||
330 | 358 | ||
331 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, | 359 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, |
332 | config_item_name(acl_ci), &ret); | 360 | config_item_name(acl_ci), &ret); |
333 | if (!(lacl)) | 361 | if (!(lacl)) { |
362 | ret = -EINVAL; | ||
334 | goto out; | 363 | goto out; |
364 | } | ||
365 | |||
366 | lacl_cg = &lacl->se_lun_group; | ||
367 | lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
368 | GFP_KERNEL); | ||
369 | if (!lacl_cg->default_groups) { | ||
370 | printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); | ||
371 | ret = -ENOMEM; | ||
372 | goto out; | ||
373 | } | ||
335 | 374 | ||
336 | config_group_init_type_name(&lacl->se_lun_group, name, | 375 | config_group_init_type_name(&lacl->se_lun_group, name, |
337 | &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); | 376 | &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); |
377 | config_group_init_type_name(&lacl->ml_stat_grps.stat_group, | ||
378 | "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); | ||
379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; | ||
380 | lacl_cg->default_groups[1] = NULL; | ||
381 | |||
382 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | ||
384 | GFP_KERNEL); | ||
385 | if (!ml_stat_grp->default_groups) { | ||
386 | printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); | ||
387 | ret = -ENOMEM; | ||
388 | goto out; | ||
389 | } | ||
390 | target_stat_setup_mappedlun_default_groups(lacl); | ||
338 | 391 | ||
339 | kfree(buf); | 392 | kfree(buf); |
340 | return &lacl->se_lun_group; | 393 | return &lacl->se_lun_group; |
341 | out: | 394 | out: |
395 | if (lacl_cg) | ||
396 | kfree(lacl_cg->default_groups); | ||
342 | kfree(buf); | 397 | kfree(buf); |
343 | return ERR_PTR(ret); | 398 | return ERR_PTR(ret); |
344 | } | 399 | } |
@@ -347,6 +402,28 @@ static void target_fabric_drop_mappedlun( | |||
347 | struct config_group *group, | 402 | struct config_group *group, |
348 | struct config_item *item) | 403 | struct config_item *item) |
349 | { | 404 | { |
405 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
406 | struct se_lun_acl, se_lun_group); | ||
407 | struct config_item *df_item; | ||
408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | ||
409 | int i; | ||
410 | |||
411 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { | ||
413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; | ||
414 | ml_stat_grp->default_groups[i] = NULL; | ||
415 | config_item_put(df_item); | ||
416 | } | ||
417 | kfree(ml_stat_grp->default_groups); | ||
418 | |||
419 | lacl_cg = &lacl->se_lun_group; | ||
420 | for (i = 0; lacl_cg->default_groups[i]; i++) { | ||
421 | df_item = &lacl_cg->default_groups[i]->cg_item; | ||
422 | lacl_cg->default_groups[i] = NULL; | ||
423 | config_item_put(df_item); | ||
424 | } | ||
425 | kfree(lacl_cg->default_groups); | ||
426 | |||
350 | config_item_put(item); | 427 | config_item_put(item); |
351 | } | 428 | } |
352 | 429 | ||
@@ -376,6 +453,15 @@ TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, | |||
376 | 453 | ||
377 | /* End of tfc_tpg_nacl_base_cit */ | 454 | /* End of tfc_tpg_nacl_base_cit */ |
378 | 455 | ||
456 | /* Start of tfc_node_fabric_stats_cit */ | ||
457 | /* | ||
458 | * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group | ||
459 | * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] | ||
460 | */ | ||
461 | TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); | ||
462 | |||
463 | /* End of tfc_wwn_fabric_stats_cit */ | ||
464 | |||
379 | /* Start of tfc_tpg_nacl_cit */ | 465 | /* Start of tfc_tpg_nacl_cit */ |
380 | 466 | ||
381 | static struct config_group *target_fabric_make_nodeacl( | 467 | static struct config_group *target_fabric_make_nodeacl( |
@@ -402,7 +488,8 @@ static struct config_group *target_fabric_make_nodeacl( | |||
402 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; | 488 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; |
403 | nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; | 489 | nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; |
404 | nacl_cg->default_groups[2] = &se_nacl->acl_param_group; | 490 | nacl_cg->default_groups[2] = &se_nacl->acl_param_group; |
405 | nacl_cg->default_groups[3] = NULL; | 491 | nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group; |
492 | nacl_cg->default_groups[4] = NULL; | ||
406 | 493 | ||
407 | config_group_init_type_name(&se_nacl->acl_group, name, | 494 | config_group_init_type_name(&se_nacl->acl_group, name, |
408 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); | 495 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); |
@@ -412,6 +499,9 @@ static struct config_group *target_fabric_make_nodeacl( | |||
412 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); | 499 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); |
413 | config_group_init_type_name(&se_nacl->acl_param_group, "param", | 500 | config_group_init_type_name(&se_nacl->acl_param_group, "param", |
414 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); | 501 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); |
502 | config_group_init_type_name(&se_nacl->acl_fabric_stat_group, | ||
503 | "fabric_statistics", | ||
504 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); | ||
415 | 505 | ||
416 | return &se_nacl->acl_group; | 506 | return &se_nacl->acl_group; |
417 | } | 507 | } |
@@ -758,6 +848,31 @@ TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_at | |||
758 | 848 | ||
759 | /* End of tfc_tpg_port_cit */ | 849 | /* End of tfc_tpg_port_cit */ |
760 | 850 | ||
851 | /* Start of tfc_tpg_port_stat_cit */ | ||
852 | |||
853 | static struct config_group *target_core_port_stat_mkdir( | ||
854 | struct config_group *group, | ||
855 | const char *name) | ||
856 | { | ||
857 | return ERR_PTR(-ENOSYS); | ||
858 | } | ||
859 | |||
860 | static void target_core_port_stat_rmdir( | ||
861 | struct config_group *group, | ||
862 | struct config_item *item) | ||
863 | { | ||
864 | return; | ||
865 | } | ||
866 | |||
867 | static struct configfs_group_operations target_fabric_port_stat_group_ops = { | ||
868 | .make_group = target_core_port_stat_mkdir, | ||
869 | .drop_item = target_core_port_stat_rmdir, | ||
870 | }; | ||
871 | |||
872 | TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); | ||
873 | |||
874 | /* End of tfc_tpg_port_stat_cit */ | ||
875 | |||
761 | /* Start of tfc_tpg_lun_cit */ | 876 | /* Start of tfc_tpg_lun_cit */ |
762 | 877 | ||
763 | static struct config_group *target_fabric_make_lun( | 878 | static struct config_group *target_fabric_make_lun( |
@@ -768,7 +883,9 @@ static struct config_group *target_fabric_make_lun( | |||
768 | struct se_portal_group *se_tpg = container_of(group, | 883 | struct se_portal_group *se_tpg = container_of(group, |
769 | struct se_portal_group, tpg_lun_group); | 884 | struct se_portal_group, tpg_lun_group); |
770 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 885 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
886 | struct config_group *lun_cg = NULL, *port_stat_grp = NULL; | ||
771 | unsigned long unpacked_lun; | 887 | unsigned long unpacked_lun; |
888 | int errno; | ||
772 | 889 | ||
773 | if (strstr(name, "lun_") != name) { | 890 | if (strstr(name, "lun_") != name) { |
774 | printk(KERN_ERR "Unable to locate \'_\" in" | 891 | printk(KERN_ERR "Unable to locate \'_\" in" |
@@ -782,16 +899,64 @@ static struct config_group *target_fabric_make_lun( | |||
782 | if (!(lun)) | 899 | if (!(lun)) |
783 | return ERR_PTR(-EINVAL); | 900 | return ERR_PTR(-EINVAL); |
784 | 901 | ||
902 | lun_cg = &lun->lun_group; | ||
903 | lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
904 | GFP_KERNEL); | ||
905 | if (!lun_cg->default_groups) { | ||
906 | printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); | ||
907 | return ERR_PTR(-ENOMEM); | ||
908 | } | ||
909 | |||
785 | config_group_init_type_name(&lun->lun_group, name, | 910 | config_group_init_type_name(&lun->lun_group, name, |
786 | &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); | 911 | &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); |
912 | config_group_init_type_name(&lun->port_stat_grps.stat_group, | ||
913 | "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); | ||
914 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; | ||
915 | lun_cg->default_groups[1] = NULL; | ||
916 | |||
917 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
918 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | ||
919 | GFP_KERNEL); | ||
920 | if (!port_stat_grp->default_groups) { | ||
921 | printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); | ||
922 | errno = -ENOMEM; | ||
923 | goto out; | ||
924 | } | ||
925 | target_stat_setup_port_default_groups(lun); | ||
787 | 926 | ||
788 | return &lun->lun_group; | 927 | return &lun->lun_group; |
928 | out: | ||
929 | if (lun_cg) | ||
930 | kfree(lun_cg->default_groups); | ||
931 | return ERR_PTR(errno); | ||
789 | } | 932 | } |
790 | 933 | ||
791 | static void target_fabric_drop_lun( | 934 | static void target_fabric_drop_lun( |
792 | struct config_group *group, | 935 | struct config_group *group, |
793 | struct config_item *item) | 936 | struct config_item *item) |
794 | { | 937 | { |
938 | struct se_lun *lun = container_of(to_config_group(item), | ||
939 | struct se_lun, lun_group); | ||
940 | struct config_item *df_item; | ||
941 | struct config_group *lun_cg, *port_stat_grp; | ||
942 | int i; | ||
943 | |||
944 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
945 | for (i = 0; port_stat_grp->default_groups[i]; i++) { | ||
946 | df_item = &port_stat_grp->default_groups[i]->cg_item; | ||
947 | port_stat_grp->default_groups[i] = NULL; | ||
948 | config_item_put(df_item); | ||
949 | } | ||
950 | kfree(port_stat_grp->default_groups); | ||
951 | |||
952 | lun_cg = &lun->lun_group; | ||
953 | for (i = 0; lun_cg->default_groups[i]; i++) { | ||
954 | df_item = &lun_cg->default_groups[i]->cg_item; | ||
955 | lun_cg->default_groups[i] = NULL; | ||
956 | config_item_put(df_item); | ||
957 | } | ||
958 | kfree(lun_cg->default_groups); | ||
959 | |||
795 | config_item_put(item); | 960 | config_item_put(item); |
796 | } | 961 | } |
797 | 962 | ||
@@ -946,6 +1111,15 @@ TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, | |||
946 | 1111 | ||
947 | /* End of tfc_tpg_cit */ | 1112 | /* End of tfc_tpg_cit */ |
948 | 1113 | ||
1114 | /* Start of tfc_wwn_fabric_stats_cit */ | ||
1115 | /* | ||
1116 | * This is used as a placeholder for struct se_wwn->fabric_stat_group | ||
1117 | * to allow fabrics access to ->fabric_stat_group->default_groups[] | ||
1118 | */ | ||
1119 | TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); | ||
1120 | |||
1121 | /* End of tfc_wwn_fabric_stats_cit */ | ||
1122 | |||
949 | /* Start of tfc_wwn_cit */ | 1123 | /* Start of tfc_wwn_cit */ |
950 | 1124 | ||
951 | static struct config_group *target_fabric_make_wwn( | 1125 | static struct config_group *target_fabric_make_wwn( |
@@ -966,8 +1140,17 @@ static struct config_group *target_fabric_make_wwn( | |||
966 | return ERR_PTR(-EINVAL); | 1140 | return ERR_PTR(-EINVAL); |
967 | 1141 | ||
968 | wwn->wwn_tf = tf; | 1142 | wwn->wwn_tf = tf; |
1143 | /* | ||
1144 | * Setup default groups from pre-allocated wwn->wwn_default_groups | ||
1145 | */ | ||
1146 | wwn->wwn_group.default_groups = wwn->wwn_default_groups; | ||
1147 | wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; | ||
1148 | wwn->wwn_group.default_groups[1] = NULL; | ||
1149 | |||
969 | config_group_init_type_name(&wwn->wwn_group, name, | 1150 | config_group_init_type_name(&wwn->wwn_group, name, |
970 | &TF_CIT_TMPL(tf)->tfc_tpg_cit); | 1151 | &TF_CIT_TMPL(tf)->tfc_tpg_cit); |
1152 | config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", | ||
1153 | &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); | ||
971 | 1154 | ||
972 | return &wwn->wwn_group; | 1155 | return &wwn->wwn_group; |
973 | } | 1156 | } |
@@ -976,6 +1159,18 @@ static void target_fabric_drop_wwn( | |||
976 | struct config_group *group, | 1159 | struct config_group *group, |
977 | struct config_item *item) | 1160 | struct config_item *item) |
978 | { | 1161 | { |
1162 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
1163 | struct se_wwn, wwn_group); | ||
1164 | struct config_item *df_item; | ||
1165 | struct config_group *cg = &wwn->wwn_group; | ||
1166 | int i; | ||
1167 | |||
1168 | for (i = 0; cg->default_groups[i]; i++) { | ||
1169 | df_item = &cg->default_groups[i]->cg_item; | ||
1170 | cg->default_groups[i] = NULL; | ||
1171 | config_item_put(df_item); | ||
1172 | } | ||
1173 | |||
979 | config_item_put(item); | 1174 | config_item_put(item); |
980 | } | 1175 | } |
981 | 1176 | ||
@@ -1015,9 +1210,11 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) | |||
1015 | { | 1210 | { |
1016 | target_fabric_setup_discovery_cit(tf); | 1211 | target_fabric_setup_discovery_cit(tf); |
1017 | target_fabric_setup_wwn_cit(tf); | 1212 | target_fabric_setup_wwn_cit(tf); |
1213 | target_fabric_setup_wwn_fabric_stats_cit(tf); | ||
1018 | target_fabric_setup_tpg_cit(tf); | 1214 | target_fabric_setup_tpg_cit(tf); |
1019 | target_fabric_setup_tpg_base_cit(tf); | 1215 | target_fabric_setup_tpg_base_cit(tf); |
1020 | target_fabric_setup_tpg_port_cit(tf); | 1216 | target_fabric_setup_tpg_port_cit(tf); |
1217 | target_fabric_setup_tpg_port_stat_cit(tf); | ||
1021 | target_fabric_setup_tpg_lun_cit(tf); | 1218 | target_fabric_setup_tpg_lun_cit(tf); |
1022 | target_fabric_setup_tpg_np_cit(tf); | 1219 | target_fabric_setup_tpg_np_cit(tf); |
1023 | target_fabric_setup_tpg_np_base_cit(tf); | 1220 | target_fabric_setup_tpg_np_base_cit(tf); |
@@ -1028,7 +1225,9 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) | |||
1028 | target_fabric_setup_tpg_nacl_attrib_cit(tf); | 1225 | target_fabric_setup_tpg_nacl_attrib_cit(tf); |
1029 | target_fabric_setup_tpg_nacl_auth_cit(tf); | 1226 | target_fabric_setup_tpg_nacl_auth_cit(tf); |
1030 | target_fabric_setup_tpg_nacl_param_cit(tf); | 1227 | target_fabric_setup_tpg_nacl_param_cit(tf); |
1228 | target_fabric_setup_tpg_nacl_stat_cit(tf); | ||
1031 | target_fabric_setup_tpg_mappedlun_cit(tf); | 1229 | target_fabric_setup_tpg_mappedlun_cit(tf); |
1230 | target_fabric_setup_tpg_mappedlun_stat_cit(tf); | ||
1032 | 1231 | ||
1033 | return 0; | 1232 | return 0; |
1034 | } | 1233 | } |
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index a3c695adabe..d57ad672677 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <target/target_core_base.h> | 34 | #include <target/target_core_base.h> |
35 | #include <target/target_core_device.h> | 35 | #include <target/target_core_device.h> |
36 | #include <target/target_core_transport.h> | 36 | #include <target/target_core_transport.h> |
37 | #include <target/target_core_fabric_lib.h> | ||
37 | #include <target/target_core_fabric_ops.h> | 38 | #include <target/target_core_fabric_ops.h> |
38 | #include <target/target_core_configfs.h> | 39 | #include <target/target_core_configfs.h> |
39 | 40 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 190ca8ac249..02f553aef43 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -134,7 +134,7 @@ static struct se_device *fd_create_virtdevice( | |||
134 | mm_segment_t old_fs; | 134 | mm_segment_t old_fs; |
135 | struct file *file; | 135 | struct file *file; |
136 | struct inode *inode = NULL; | 136 | struct inode *inode = NULL; |
137 | int dev_flags = 0, flags; | 137 | int dev_flags = 0, flags, ret = -EINVAL; |
138 | 138 | ||
139 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 139 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
140 | 140 | ||
@@ -146,6 +146,7 @@ static struct se_device *fd_create_virtdevice( | |||
146 | if (IS_ERR(dev_p)) { | 146 | if (IS_ERR(dev_p)) { |
147 | printk(KERN_ERR "getname(%s) failed: %lu\n", | 147 | printk(KERN_ERR "getname(%s) failed: %lu\n", |
148 | fd_dev->fd_dev_name, IS_ERR(dev_p)); | 148 | fd_dev->fd_dev_name, IS_ERR(dev_p)); |
149 | ret = PTR_ERR(dev_p); | ||
149 | goto fail; | 150 | goto fail; |
150 | } | 151 | } |
151 | #if 0 | 152 | #if 0 |
@@ -165,8 +166,12 @@ static struct se_device *fd_create_virtdevice( | |||
165 | flags |= O_SYNC; | 166 | flags |= O_SYNC; |
166 | 167 | ||
167 | file = filp_open(dev_p, flags, 0600); | 168 | file = filp_open(dev_p, flags, 0600); |
168 | 169 | if (IS_ERR(file)) { | |
169 | if (IS_ERR(file) || !file || !file->f_dentry) { | 170 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); |
171 | ret = PTR_ERR(file); | ||
172 | goto fail; | ||
173 | } | ||
174 | if (!file || !file->f_dentry) { | ||
170 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); | 175 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); |
171 | goto fail; | 176 | goto fail; |
172 | } | 177 | } |
@@ -241,7 +246,7 @@ fail: | |||
241 | fd_dev->fd_file = NULL; | 246 | fd_dev->fd_file = NULL; |
242 | } | 247 | } |
243 | putname(dev_p); | 248 | putname(dev_p); |
244 | return NULL; | 249 | return ERR_PTR(ret); |
245 | } | 250 | } |
246 | 251 | ||
247 | /* fd_free_device(): (Part of se_subsystem_api_t template) | 252 | /* fd_free_device(): (Part of se_subsystem_api_t template) |
@@ -509,7 +514,7 @@ enum { | |||
509 | static match_table_t tokens = { | 514 | static match_table_t tokens = { |
510 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | 515 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
511 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | 516 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
512 | {Opt_fd_buffered_io, "fd_buffered_id=%d"}, | 517 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, |
513 | {Opt_err, NULL} | 518 | {Opt_err, NULL} |
514 | }; | 519 | }; |
515 | 520 | ||
@@ -536,15 +541,26 @@ static ssize_t fd_set_configfs_dev_params( | |||
536 | token = match_token(ptr, tokens, args); | 541 | token = match_token(ptr, tokens, args); |
537 | switch (token) { | 542 | switch (token) { |
538 | case Opt_fd_dev_name: | 543 | case Opt_fd_dev_name: |
544 | arg_p = match_strdup(&args[0]); | ||
545 | if (!arg_p) { | ||
546 | ret = -ENOMEM; | ||
547 | break; | ||
548 | } | ||
539 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, | 549 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, |
540 | "%s", match_strdup(&args[0])); | 550 | "%s", arg_p); |
551 | kfree(arg_p); | ||
541 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", | 552 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", |
542 | fd_dev->fd_dev_name); | 553 | fd_dev->fd_dev_name); |
543 | fd_dev->fbd_flags |= FBDF_HAS_PATH; | 554 | fd_dev->fbd_flags |= FBDF_HAS_PATH; |
544 | break; | 555 | break; |
545 | case Opt_fd_dev_size: | 556 | case Opt_fd_dev_size: |
546 | arg_p = match_strdup(&args[0]); | 557 | arg_p = match_strdup(&args[0]); |
558 | if (!arg_p) { | ||
559 | ret = -ENOMEM; | ||
560 | break; | ||
561 | } | ||
547 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); | 562 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); |
563 | kfree(arg_p); | ||
548 | if (ret < 0) { | 564 | if (ret < 0) { |
549 | printk(KERN_ERR "strict_strtoull() failed for" | 565 | printk(KERN_ERR "strict_strtoull() failed for" |
550 | " fd_dev_size=\n"); | 566 | " fd_dev_size=\n"); |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 6ec51cbc018..0b8f8da8901 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -151,19 +151,8 @@ out_free_hba: | |||
151 | int | 151 | int |
152 | core_delete_hba(struct se_hba *hba) | 152 | core_delete_hba(struct se_hba *hba) |
153 | { | 153 | { |
154 | struct se_device *dev, *dev_tmp; | 154 | if (!list_empty(&hba->hba_dev_list)) |
155 | 155 | dump_stack(); | |
156 | spin_lock(&hba->device_lock); | ||
157 | list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { | ||
158 | |||
159 | se_clear_dev_ports(dev); | ||
160 | spin_unlock(&hba->device_lock); | ||
161 | |||
162 | se_release_device_for_hba(dev); | ||
163 | |||
164 | spin_lock(&hba->device_lock); | ||
165 | } | ||
166 | spin_unlock(&hba->device_lock); | ||
167 | 156 | ||
168 | hba->transport->detach_hba(hba); | 157 | hba->transport->detach_hba(hba); |
169 | 158 | ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index eb0afec046e..86639004af9 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -129,10 +129,11 @@ static struct se_device *iblock_create_virtdevice( | |||
129 | struct request_queue *q; | 129 | struct request_queue *q; |
130 | struct queue_limits *limits; | 130 | struct queue_limits *limits; |
131 | u32 dev_flags = 0; | 131 | u32 dev_flags = 0; |
132 | int ret = -EINVAL; | ||
132 | 133 | ||
133 | if (!(ib_dev)) { | 134 | if (!(ib_dev)) { |
134 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); | 135 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); |
135 | return 0; | 136 | return ERR_PTR(ret); |
136 | } | 137 | } |
137 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 138 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
138 | /* | 139 | /* |
@@ -141,7 +142,7 @@ static struct se_device *iblock_create_virtdevice( | |||
141 | ib_dev->ibd_bio_set = bioset_create(32, 64); | 142 | ib_dev->ibd_bio_set = bioset_create(32, 64); |
142 | if (!(ib_dev->ibd_bio_set)) { | 143 | if (!(ib_dev->ibd_bio_set)) { |
143 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); | 144 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); |
144 | return 0; | 145 | return ERR_PTR(-ENOMEM); |
145 | } | 146 | } |
146 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); | 147 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); |
147 | /* | 148 | /* |
@@ -153,8 +154,10 @@ static struct se_device *iblock_create_virtdevice( | |||
153 | 154 | ||
154 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
155 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); |
156 | if (IS_ERR(bd)) | 157 | if (IS_ERR(bd)) { |
158 | ret = PTR_ERR(bd); | ||
157 | goto failed; | 159 | goto failed; |
160 | } | ||
158 | /* | 161 | /* |
159 | * Setup the local scope queue_limits from struct request_queue->limits | 162 | * Setup the local scope queue_limits from struct request_queue->limits |
160 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | 163 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
@@ -184,9 +187,7 @@ static struct se_device *iblock_create_virtdevice( | |||
184 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | 187 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM |
185 | * in ATA and we need to set TPE=1 | 188 | * in ATA and we need to set TPE=1 |
186 | */ | 189 | */ |
187 | if (blk_queue_discard(bdev_get_queue(bd))) { | 190 | if (blk_queue_discard(q)) { |
188 | struct request_queue *q = bdev_get_queue(bd); | ||
189 | |||
190 | DEV_ATTRIB(dev)->max_unmap_lba_count = | 191 | DEV_ATTRIB(dev)->max_unmap_lba_count = |
191 | q->limits.max_discard_sectors; | 192 | q->limits.max_discard_sectors; |
192 | /* | 193 | /* |
@@ -212,7 +213,7 @@ failed: | |||
212 | ib_dev->ibd_bd = NULL; | 213 | ib_dev->ibd_bd = NULL; |
213 | ib_dev->ibd_major = 0; | 214 | ib_dev->ibd_major = 0; |
214 | ib_dev->ibd_minor = 0; | 215 | ib_dev->ibd_minor = 0; |
215 | return NULL; | 216 | return ERR_PTR(ret); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void iblock_free_device(void *p) | 219 | static void iblock_free_device(void *p) |
@@ -467,7 +468,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
467 | const char *page, ssize_t count) | 468 | const char *page, ssize_t count) |
468 | { | 469 | { |
469 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | 470 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; |
470 | char *orig, *ptr, *opts; | 471 | char *orig, *ptr, *arg_p, *opts; |
471 | substring_t args[MAX_OPT_ARGS]; | 472 | substring_t args[MAX_OPT_ARGS]; |
472 | int ret = 0, arg, token; | 473 | int ret = 0, arg, token; |
473 | 474 | ||
@@ -490,9 +491,14 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
490 | ret = -EEXIST; | 491 | ret = -EEXIST; |
491 | goto out; | 492 | goto out; |
492 | } | 493 | } |
493 | 494 | arg_p = match_strdup(&args[0]); | |
494 | ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | 495 | if (!arg_p) { |
495 | "%s", match_strdup(&args[0])); | 496 | ret = -ENOMEM; |
497 | break; | ||
498 | } | ||
499 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | ||
500 | "%s", arg_p); | ||
501 | kfree(arg_p); | ||
496 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", | 502 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", |
497 | ib_dev->ibd_udev_path); | 503 | ib_dev->ibd_udev_path); |
498 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | 504 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 5a9d2ba4b60..7ff6a35f26a 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -441,6 +441,7 @@ static struct se_device *pscsi_create_type_disk( | |||
441 | struct pscsi_dev_virt *pdv, | 441 | struct pscsi_dev_virt *pdv, |
442 | struct se_subsystem_dev *se_dev, | 442 | struct se_subsystem_dev *se_dev, |
443 | struct se_hba *hba) | 443 | struct se_hba *hba) |
444 | __releases(sh->host_lock) | ||
444 | { | 445 | { |
445 | struct se_device *dev; | 446 | struct se_device *dev; |
446 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 447 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -488,6 +489,7 @@ static struct se_device *pscsi_create_type_rom( | |||
488 | struct pscsi_dev_virt *pdv, | 489 | struct pscsi_dev_virt *pdv, |
489 | struct se_subsystem_dev *se_dev, | 490 | struct se_subsystem_dev *se_dev, |
490 | struct se_hba *hba) | 491 | struct se_hba *hba) |
492 | __releases(sh->host_lock) | ||
491 | { | 493 | { |
492 | struct se_device *dev; | 494 | struct se_device *dev; |
493 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 495 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -522,6 +524,7 @@ static struct se_device *pscsi_create_type_other( | |||
522 | struct pscsi_dev_virt *pdv, | 524 | struct pscsi_dev_virt *pdv, |
523 | struct se_subsystem_dev *se_dev, | 525 | struct se_subsystem_dev *se_dev, |
524 | struct se_hba *hba) | 526 | struct se_hba *hba) |
527 | __releases(sh->host_lock) | ||
525 | { | 528 | { |
526 | struct se_device *dev; | 529 | struct se_device *dev; |
527 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 530 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -555,7 +558,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
555 | if (!(pdv)) { | 558 | if (!(pdv)) { |
556 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" | 559 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" |
557 | " parameter\n"); | 560 | " parameter\n"); |
558 | return NULL; | 561 | return ERR_PTR(-EINVAL); |
559 | } | 562 | } |
560 | /* | 563 | /* |
561 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | 564 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the |
@@ -565,7 +568,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
565 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 568 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
566 | printk(KERN_ERR "pSCSI: Unable to locate struct" | 569 | printk(KERN_ERR "pSCSI: Unable to locate struct" |
567 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | 570 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); |
568 | return NULL; | 571 | return ERR_PTR(-ENODEV); |
569 | } | 572 | } |
570 | /* | 573 | /* |
571 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device | 574 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device |
@@ -574,7 +577,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
574 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 577 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
575 | printk(KERN_ERR "pSCSI: udev_path attribute has not" | 578 | printk(KERN_ERR "pSCSI: udev_path attribute has not" |
576 | " been set before ENABLE=1\n"); | 579 | " been set before ENABLE=1\n"); |
577 | return NULL; | 580 | return ERR_PTR(-EINVAL); |
578 | } | 581 | } |
579 | /* | 582 | /* |
580 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, | 583 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, |
@@ -587,12 +590,12 @@ static struct se_device *pscsi_create_virtdevice( | |||
587 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" | 590 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" |
588 | " with active devices\n"); | 591 | " with active devices\n"); |
589 | spin_unlock(&hba->device_lock); | 592 | spin_unlock(&hba->device_lock); |
590 | return NULL; | 593 | return ERR_PTR(-EEXIST); |
591 | } | 594 | } |
592 | spin_unlock(&hba->device_lock); | 595 | spin_unlock(&hba->device_lock); |
593 | 596 | ||
594 | if (pscsi_pmode_enable_hba(hba, 1) != 1) | 597 | if (pscsi_pmode_enable_hba(hba, 1) != 1) |
595 | return NULL; | 598 | return ERR_PTR(-ENODEV); |
596 | 599 | ||
597 | legacy_mode_enable = 1; | 600 | legacy_mode_enable = 1; |
598 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 601 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
@@ -602,14 +605,14 @@ static struct se_device *pscsi_create_virtdevice( | |||
602 | if (!(sh)) { | 605 | if (!(sh)) { |
603 | printk(KERN_ERR "pSCSI: Unable to locate" | 606 | printk(KERN_ERR "pSCSI: Unable to locate" |
604 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 607 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
605 | return NULL; | 608 | return ERR_PTR(-ENODEV); |
606 | } | 609 | } |
607 | } | 610 | } |
608 | } else { | 611 | } else { |
609 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | 612 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { |
610 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" | 613 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" |
611 | " struct Scsi_Host exists\n"); | 614 | " struct Scsi_Host exists\n"); |
612 | return NULL; | 615 | return ERR_PTR(-EEXIST); |
613 | } | 616 | } |
614 | } | 617 | } |
615 | 618 | ||
@@ -644,7 +647,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
644 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 647 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
645 | } | 648 | } |
646 | pdv->pdv_sd = NULL; | 649 | pdv->pdv_sd = NULL; |
647 | return NULL; | 650 | return ERR_PTR(-ENODEV); |
648 | } | 651 | } |
649 | return dev; | 652 | return dev; |
650 | } | 653 | } |
@@ -660,7 +663,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
660 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 663 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
661 | } | 664 | } |
662 | 665 | ||
663 | return NULL; | 666 | return ERR_PTR(-ENODEV); |
664 | } | 667 | } |
665 | 668 | ||
666 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) | 669 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) |
@@ -816,6 +819,7 @@ pscsi_alloc_task(struct se_cmd *cmd) | |||
816 | if (!(pt->pscsi_cdb)) { | 819 | if (!(pt->pscsi_cdb)) { |
817 | printk(KERN_ERR "pSCSI: Unable to allocate extended" | 820 | printk(KERN_ERR "pSCSI: Unable to allocate extended" |
818 | " pt->pscsi_cdb\n"); | 821 | " pt->pscsi_cdb\n"); |
822 | kfree(pt); | ||
819 | return NULL; | 823 | return NULL; |
820 | } | 824 | } |
821 | } else | 825 | } else |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 8dc6d74c1d4..7837dd365a9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -150,7 +150,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
150 | if (rd_dev->rd_page_count <= 0) { | 150 | if (rd_dev->rd_page_count <= 0) { |
151 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", | 151 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", |
152 | rd_dev->rd_page_count); | 152 | rd_dev->rd_page_count); |
153 | return -1; | 153 | return -EINVAL; |
154 | } | 154 | } |
155 | total_sg_needed = rd_dev->rd_page_count; | 155 | total_sg_needed = rd_dev->rd_page_count; |
156 | 156 | ||
@@ -160,7 +160,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
160 | if (!(sg_table)) { | 160 | if (!(sg_table)) { |
161 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" | 161 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" |
162 | " scatterlist tables\n"); | 162 | " scatterlist tables\n"); |
163 | return -1; | 163 | return -ENOMEM; |
164 | } | 164 | } |
165 | 165 | ||
166 | rd_dev->sg_table_array = sg_table; | 166 | rd_dev->sg_table_array = sg_table; |
@@ -175,7 +175,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
175 | if (!(sg)) { | 175 | if (!(sg)) { |
176 | printk(KERN_ERR "Unable to allocate scatterlist array" | 176 | printk(KERN_ERR "Unable to allocate scatterlist array" |
177 | " for struct rd_dev\n"); | 177 | " for struct rd_dev\n"); |
178 | return -1; | 178 | return -ENOMEM; |
179 | } | 179 | } |
180 | 180 | ||
181 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); | 181 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); |
@@ -191,7 +191,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
191 | if (!(pg)) { | 191 | if (!(pg)) { |
192 | printk(KERN_ERR "Unable to allocate scatterlist" | 192 | printk(KERN_ERR "Unable to allocate scatterlist" |
193 | " pages for struct rd_dev_sg_table\n"); | 193 | " pages for struct rd_dev_sg_table\n"); |
194 | return -1; | 194 | return -ENOMEM; |
195 | } | 195 | } |
196 | sg_assign_page(&sg[j], pg); | 196 | sg_assign_page(&sg[j], pg); |
197 | sg[j].length = PAGE_SIZE; | 197 | sg[j].length = PAGE_SIZE; |
@@ -253,12 +253,13 @@ static struct se_device *rd_create_virtdevice( | |||
253 | struct se_dev_limits dev_limits; | 253 | struct se_dev_limits dev_limits; |
254 | struct rd_dev *rd_dev = p; | 254 | struct rd_dev *rd_dev = p; |
255 | struct rd_host *rd_host = hba->hba_ptr; | 255 | struct rd_host *rd_host = hba->hba_ptr; |
256 | int dev_flags = 0; | 256 | int dev_flags = 0, ret; |
257 | char prod[16], rev[4]; | 257 | char prod[16], rev[4]; |
258 | 258 | ||
259 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 259 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
260 | 260 | ||
261 | if (rd_build_device_space(rd_dev) < 0) | 261 | ret = rd_build_device_space(rd_dev); |
262 | if (ret < 0) | ||
262 | goto fail; | 263 | goto fail; |
263 | 264 | ||
264 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); | 265 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); |
@@ -292,7 +293,7 @@ static struct se_device *rd_create_virtdevice( | |||
292 | 293 | ||
293 | fail: | 294 | fail: |
294 | rd_release_device_space(rd_dev); | 295 | rd_release_device_space(rd_dev); |
295 | return NULL; | 296 | return ERR_PTR(ret); |
296 | } | 297 | } |
297 | 298 | ||
298 | static struct se_device *rd_DIRECT_create_virtdevice( | 299 | static struct se_device *rd_DIRECT_create_virtdevice( |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 13badfbaf9c..3ea19e29d8e 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #define RD_BLOCKSIZE 512 | 14 | #define RD_BLOCKSIZE 512 |
15 | #define RD_MAX_SECTORS 1024 | 15 | #define RD_MAX_SECTORS 1024 |
16 | 16 | ||
17 | extern struct kmem_cache *se_mem_cache; | ||
18 | |||
19 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ | 17 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ |
20 | int __init rd_module_init(void); | 18 | int __init rd_module_init(void); |
21 | void rd_module_exit(void); | 19 | void rd_module_exit(void); |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c new file mode 100644 index 00000000000..5e3a067a747 --- /dev/null +++ b/drivers/target/target_core_stat.c | |||
@@ -0,0 +1,1810 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_stat.c | ||
3 | * | ||
4 | * Copyright (c) 2011 Rising Tide Systems | ||
5 | * Copyright (c) 2011 Linux-iSCSI.org | ||
6 | * | ||
7 | * Modern ConfigFS group context specific statistics based on original | ||
8 | * target_core_mib.c code | ||
9 | * | ||
10 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
11 | * | ||
12 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/version.h> | ||
36 | #include <generated/utsrelease.h> | ||
37 | #include <linux/utsname.h> | ||
38 | #include <linux/proc_fs.h> | ||
39 | #include <linux/seq_file.h> | ||
40 | #include <linux/blkdev.h> | ||
41 | #include <linux/configfs.h> | ||
42 | #include <scsi/scsi.h> | ||
43 | #include <scsi/scsi_device.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | |||
46 | #include <target/target_core_base.h> | ||
47 | #include <target/target_core_transport.h> | ||
48 | #include <target/target_core_fabric_ops.h> | ||
49 | #include <target/target_core_configfs.h> | ||
50 | #include <target/configfs_macros.h> | ||
51 | |||
52 | #include "target_core_hba.h" | ||
53 | |||
54 | #ifndef INITIAL_JIFFIES | ||
55 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
56 | #endif | ||
57 | |||
58 | #define NONE "None" | ||
59 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
60 | |||
61 | #define SCSI_LU_INDEX 1 | ||
62 | #define LU_COUNT 1 | ||
63 | |||
64 | /* | ||
65 | * SCSI Device Table | ||
66 | */ | ||
67 | |||
68 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps); | ||
69 | #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \ | ||
70 | static struct target_stat_scsi_dev_attribute \ | ||
71 | target_stat_scsi_dev_##_name = \ | ||
72 | __CONFIGFS_EATTR(_name, _mode, \ | ||
73 | target_stat_scsi_dev_show_attr_##_name, \ | ||
74 | target_stat_scsi_dev_store_attr_##_name); | ||
75 | |||
76 | #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \ | ||
77 | static struct target_stat_scsi_dev_attribute \ | ||
78 | target_stat_scsi_dev_##_name = \ | ||
79 | __CONFIGFS_EATTR_RO(_name, \ | ||
80 | target_stat_scsi_dev_show_attr_##_name); | ||
81 | |||
82 | static ssize_t target_stat_scsi_dev_show_attr_inst( | ||
83 | struct se_dev_stat_grps *sgrps, char *page) | ||
84 | { | ||
85 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
86 | struct se_subsystem_dev, dev_stat_grps); | ||
87 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
88 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
89 | |||
90 | if (!dev) | ||
91 | return -ENODEV; | ||
92 | |||
93 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
94 | } | ||
95 | DEV_STAT_SCSI_DEV_ATTR_RO(inst); | ||
96 | |||
97 | static ssize_t target_stat_scsi_dev_show_attr_indx( | ||
98 | struct se_dev_stat_grps *sgrps, char *page) | ||
99 | { | ||
100 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
101 | struct se_subsystem_dev, dev_stat_grps); | ||
102 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
103 | |||
104 | if (!dev) | ||
105 | return -ENODEV; | ||
106 | |||
107 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
108 | } | ||
109 | DEV_STAT_SCSI_DEV_ATTR_RO(indx); | ||
110 | |||
111 | static ssize_t target_stat_scsi_dev_show_attr_role( | ||
112 | struct se_dev_stat_grps *sgrps, char *page) | ||
113 | { | ||
114 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
115 | struct se_subsystem_dev, dev_stat_grps); | ||
116 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
117 | |||
118 | if (!dev) | ||
119 | return -ENODEV; | ||
120 | |||
121 | return snprintf(page, PAGE_SIZE, "Target\n"); | ||
122 | } | ||
123 | DEV_STAT_SCSI_DEV_ATTR_RO(role); | ||
124 | |||
125 | static ssize_t target_stat_scsi_dev_show_attr_ports( | ||
126 | struct se_dev_stat_grps *sgrps, char *page) | ||
127 | { | ||
128 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
129 | struct se_subsystem_dev, dev_stat_grps); | ||
130 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
131 | |||
132 | if (!dev) | ||
133 | return -ENODEV; | ||
134 | |||
135 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); | ||
136 | } | ||
137 | DEV_STAT_SCSI_DEV_ATTR_RO(ports); | ||
138 | |||
139 | CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group); | ||
140 | |||
141 | static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { | ||
142 | &target_stat_scsi_dev_inst.attr, | ||
143 | &target_stat_scsi_dev_indx.attr, | ||
144 | &target_stat_scsi_dev_role.attr, | ||
145 | &target_stat_scsi_dev_ports.attr, | ||
146 | NULL, | ||
147 | }; | ||
148 | |||
149 | static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = { | ||
150 | .show_attribute = target_stat_scsi_dev_attr_show, | ||
151 | .store_attribute = target_stat_scsi_dev_attr_store, | ||
152 | }; | ||
153 | |||
154 | static struct config_item_type target_stat_scsi_dev_cit = { | ||
155 | .ct_item_ops = &target_stat_scsi_dev_attrib_ops, | ||
156 | .ct_attrs = target_stat_scsi_dev_attrs, | ||
157 | .ct_owner = THIS_MODULE, | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * SCSI Target Device Table | ||
162 | */ | ||
163 | |||
164 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps); | ||
165 | #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \ | ||
166 | static struct target_stat_scsi_tgt_dev_attribute \ | ||
167 | target_stat_scsi_tgt_dev_##_name = \ | ||
168 | __CONFIGFS_EATTR(_name, _mode, \ | ||
169 | target_stat_scsi_tgt_dev_show_attr_##_name, \ | ||
170 | target_stat_scsi_tgt_dev_store_attr_##_name); | ||
171 | |||
172 | #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \ | ||
173 | static struct target_stat_scsi_tgt_dev_attribute \ | ||
174 | target_stat_scsi_tgt_dev_##_name = \ | ||
175 | __CONFIGFS_EATTR_RO(_name, \ | ||
176 | target_stat_scsi_tgt_dev_show_attr_##_name); | ||
177 | |||
178 | static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( | ||
179 | struct se_dev_stat_grps *sgrps, char *page) | ||
180 | { | ||
181 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
182 | struct se_subsystem_dev, dev_stat_grps); | ||
183 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
184 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
185 | |||
186 | if (!dev) | ||
187 | return -ENODEV; | ||
188 | |||
189 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
190 | } | ||
191 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst); | ||
192 | |||
193 | static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( | ||
194 | struct se_dev_stat_grps *sgrps, char *page) | ||
195 | { | ||
196 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
197 | struct se_subsystem_dev, dev_stat_grps); | ||
198 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
199 | |||
200 | if (!dev) | ||
201 | return -ENODEV; | ||
202 | |||
203 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
204 | } | ||
205 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx); | ||
206 | |||
207 | static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( | ||
208 | struct se_dev_stat_grps *sgrps, char *page) | ||
209 | { | ||
210 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
211 | struct se_subsystem_dev, dev_stat_grps); | ||
212 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
213 | |||
214 | if (!dev) | ||
215 | return -ENODEV; | ||
216 | |||
217 | return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); | ||
218 | } | ||
219 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); | ||
220 | |||
221 | static ssize_t target_stat_scsi_tgt_dev_show_attr_status( | ||
222 | struct se_dev_stat_grps *sgrps, char *page) | ||
223 | { | ||
224 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
225 | struct se_subsystem_dev, dev_stat_grps); | ||
226 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
227 | char status[16]; | ||
228 | |||
229 | if (!dev) | ||
230 | return -ENODEV; | ||
231 | |||
232 | switch (dev->dev_status) { | ||
233 | case TRANSPORT_DEVICE_ACTIVATED: | ||
234 | strcpy(status, "activated"); | ||
235 | break; | ||
236 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
237 | strcpy(status, "deactivated"); | ||
238 | break; | ||
239 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
240 | strcpy(status, "shutdown"); | ||
241 | break; | ||
242 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
243 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
244 | strcpy(status, "offline"); | ||
245 | break; | ||
246 | default: | ||
247 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | return snprintf(page, PAGE_SIZE, "%s\n", status); | ||
252 | } | ||
253 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); | ||
254 | |||
255 | static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( | ||
256 | struct se_dev_stat_grps *sgrps, char *page) | ||
257 | { | ||
258 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
259 | struct se_subsystem_dev, dev_stat_grps); | ||
260 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
261 | int non_accessible_lus; | ||
262 | |||
263 | if (!dev) | ||
264 | return -ENODEV; | ||
265 | |||
266 | switch (dev->dev_status) { | ||
267 | case TRANSPORT_DEVICE_ACTIVATED: | ||
268 | non_accessible_lus = 0; | ||
269 | break; | ||
270 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
271 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
272 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
273 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
274 | default: | ||
275 | non_accessible_lus = 1; | ||
276 | break; | ||
277 | } | ||
278 | |||
279 | return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); | ||
280 | } | ||
281 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus); | ||
282 | |||
283 | static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( | ||
284 | struct se_dev_stat_grps *sgrps, char *page) | ||
285 | { | ||
286 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
287 | struct se_subsystem_dev, dev_stat_grps); | ||
288 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
289 | |||
290 | if (!dev) | ||
291 | return -ENODEV; | ||
292 | |||
293 | return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); | ||
294 | } | ||
295 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); | ||
296 | |||
297 | |||
298 | CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group); | ||
299 | |||
300 | static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { | ||
301 | &target_stat_scsi_tgt_dev_inst.attr, | ||
302 | &target_stat_scsi_tgt_dev_indx.attr, | ||
303 | &target_stat_scsi_tgt_dev_num_lus.attr, | ||
304 | &target_stat_scsi_tgt_dev_status.attr, | ||
305 | &target_stat_scsi_tgt_dev_non_access_lus.attr, | ||
306 | &target_stat_scsi_tgt_dev_resets.attr, | ||
307 | NULL, | ||
308 | }; | ||
309 | |||
310 | static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = { | ||
311 | .show_attribute = target_stat_scsi_tgt_dev_attr_show, | ||
312 | .store_attribute = target_stat_scsi_tgt_dev_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct config_item_type target_stat_scsi_tgt_dev_cit = { | ||
316 | .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops, | ||
317 | .ct_attrs = target_stat_scsi_tgt_dev_attrs, | ||
318 | .ct_owner = THIS_MODULE, | ||
319 | }; | ||
320 | |||
321 | /* | ||
322 | * SCSI Logical Unit Table | ||
323 | */ | ||
324 | |||
325 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps); | ||
326 | #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \ | ||
327 | static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ | ||
328 | __CONFIGFS_EATTR(_name, _mode, \ | ||
329 | target_stat_scsi_lu_show_attr_##_name, \ | ||
330 | target_stat_scsi_lu_store_attr_##_name); | ||
331 | |||
332 | #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \ | ||
333 | static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ | ||
334 | __CONFIGFS_EATTR_RO(_name, \ | ||
335 | target_stat_scsi_lu_show_attr_##_name); | ||
336 | |||
337 | static ssize_t target_stat_scsi_lu_show_attr_inst( | ||
338 | struct se_dev_stat_grps *sgrps, char *page) | ||
339 | { | ||
340 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
341 | struct se_subsystem_dev, dev_stat_grps); | ||
342 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
343 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
344 | |||
345 | if (!dev) | ||
346 | return -ENODEV; | ||
347 | |||
348 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
349 | } | ||
350 | DEV_STAT_SCSI_LU_ATTR_RO(inst); | ||
351 | |||
352 | static ssize_t target_stat_scsi_lu_show_attr_dev( | ||
353 | struct se_dev_stat_grps *sgrps, char *page) | ||
354 | { | ||
355 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
356 | struct se_subsystem_dev, dev_stat_grps); | ||
357 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
358 | |||
359 | if (!dev) | ||
360 | return -ENODEV; | ||
361 | |||
362 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
363 | } | ||
364 | DEV_STAT_SCSI_LU_ATTR_RO(dev); | ||
365 | |||
366 | static ssize_t target_stat_scsi_lu_show_attr_indx( | ||
367 | struct se_dev_stat_grps *sgrps, char *page) | ||
368 | { | ||
369 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
370 | struct se_subsystem_dev, dev_stat_grps); | ||
371 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
372 | |||
373 | if (!dev) | ||
374 | return -ENODEV; | ||
375 | |||
376 | return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); | ||
377 | } | ||
378 | DEV_STAT_SCSI_LU_ATTR_RO(indx); | ||
379 | |||
380 | static ssize_t target_stat_scsi_lu_show_attr_lun( | ||
381 | struct se_dev_stat_grps *sgrps, char *page) | ||
382 | { | ||
383 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
384 | struct se_subsystem_dev, dev_stat_grps); | ||
385 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
386 | |||
387 | if (!dev) | ||
388 | return -ENODEV; | ||
389 | /* FIXME: scsiLuDefaultLun */ | ||
390 | return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); | ||
391 | } | ||
392 | DEV_STAT_SCSI_LU_ATTR_RO(lun); | ||
393 | |||
394 | static ssize_t target_stat_scsi_lu_show_attr_lu_name( | ||
395 | struct se_dev_stat_grps *sgrps, char *page) | ||
396 | { | ||
397 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
398 | struct se_subsystem_dev, dev_stat_grps); | ||
399 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
400 | |||
401 | if (!dev) | ||
402 | return -ENODEV; | ||
403 | /* scsiLuWwnName */ | ||
404 | return snprintf(page, PAGE_SIZE, "%s\n", | ||
405 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
406 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); | ||
407 | } | ||
408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); | ||
409 | |||
410 | static ssize_t target_stat_scsi_lu_show_attr_vend( | ||
411 | struct se_dev_stat_grps *sgrps, char *page) | ||
412 | { | ||
413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
414 | struct se_subsystem_dev, dev_stat_grps); | ||
415 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
416 | int j; | ||
417 | char str[28]; | ||
418 | |||
419 | if (!dev) | ||
420 | return -ENODEV; | ||
421 | /* scsiLuVendorId */ | ||
422 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
423 | for (j = 0; j < 8; j++) | ||
424 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
425 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
426 | str[8] = 0; | ||
427 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
428 | } | ||
429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); | ||
430 | |||
431 | static ssize_t target_stat_scsi_lu_show_attr_prod( | ||
432 | struct se_dev_stat_grps *sgrps, char *page) | ||
433 | { | ||
434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
435 | struct se_subsystem_dev, dev_stat_grps); | ||
436 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
437 | int j; | ||
438 | char str[28]; | ||
439 | |||
440 | if (!dev) | ||
441 | return -ENODEV; | ||
442 | |||
443 | /* scsiLuProductId */ | ||
444 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
445 | for (j = 0; j < 16; j++) | ||
446 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
447 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
448 | str[16] = 0; | ||
449 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
450 | } | ||
451 | DEV_STAT_SCSI_LU_ATTR_RO(prod); | ||
452 | |||
453 | static ssize_t target_stat_scsi_lu_show_attr_rev( | ||
454 | struct se_dev_stat_grps *sgrps, char *page) | ||
455 | { | ||
456 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
457 | struct se_subsystem_dev, dev_stat_grps); | ||
458 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
459 | int j; | ||
460 | char str[28]; | ||
461 | |||
462 | if (!dev) | ||
463 | return -ENODEV; | ||
464 | |||
465 | /* scsiLuRevisionId */ | ||
466 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
467 | for (j = 0; j < 4; j++) | ||
468 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
469 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
470 | str[4] = 0; | ||
471 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
472 | } | ||
473 | DEV_STAT_SCSI_LU_ATTR_RO(rev); | ||
474 | |||
475 | static ssize_t target_stat_scsi_lu_show_attr_dev_type( | ||
476 | struct se_dev_stat_grps *sgrps, char *page) | ||
477 | { | ||
478 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
479 | struct se_subsystem_dev, dev_stat_grps); | ||
480 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
481 | |||
482 | if (!dev) | ||
483 | return -ENODEV; | ||
484 | |||
485 | /* scsiLuPeripheralType */ | ||
486 | return snprintf(page, PAGE_SIZE, "%u\n", | ||
487 | TRANSPORT(dev)->get_device_type(dev)); | ||
488 | } | ||
489 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); | ||
490 | |||
491 | static ssize_t target_stat_scsi_lu_show_attr_status( | ||
492 | struct se_dev_stat_grps *sgrps, char *page) | ||
493 | { | ||
494 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
495 | struct se_subsystem_dev, dev_stat_grps); | ||
496 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
497 | |||
498 | if (!dev) | ||
499 | return -ENODEV; | ||
500 | |||
501 | /* scsiLuStatus */ | ||
502 | return snprintf(page, PAGE_SIZE, "%s\n", | ||
503 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
504 | "available" : "notavailable"); | ||
505 | } | ||
506 | DEV_STAT_SCSI_LU_ATTR_RO(status); | ||
507 | |||
508 | static ssize_t target_stat_scsi_lu_show_attr_state_bit( | ||
509 | struct se_dev_stat_grps *sgrps, char *page) | ||
510 | { | ||
511 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
512 | struct se_subsystem_dev, dev_stat_grps); | ||
513 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
514 | |||
515 | if (!dev) | ||
516 | return -ENODEV; | ||
517 | |||
518 | /* scsiLuState */ | ||
519 | return snprintf(page, PAGE_SIZE, "exposed\n"); | ||
520 | } | ||
521 | DEV_STAT_SCSI_LU_ATTR_RO(state_bit); | ||
522 | |||
523 | static ssize_t target_stat_scsi_lu_show_attr_num_cmds( | ||
524 | struct se_dev_stat_grps *sgrps, char *page) | ||
525 | { | ||
526 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
527 | struct se_subsystem_dev, dev_stat_grps); | ||
528 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
529 | |||
530 | if (!dev) | ||
531 | return -ENODEV; | ||
532 | |||
533 | /* scsiLuNumCommands */ | ||
534 | return snprintf(page, PAGE_SIZE, "%llu\n", | ||
535 | (unsigned long long)dev->num_cmds); | ||
536 | } | ||
537 | DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); | ||
538 | |||
539 | static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( | ||
540 | struct se_dev_stat_grps *sgrps, char *page) | ||
541 | { | ||
542 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
543 | struct se_subsystem_dev, dev_stat_grps); | ||
544 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
545 | |||
546 | if (!dev) | ||
547 | return -ENODEV; | ||
548 | |||
549 | /* scsiLuReadMegaBytes */ | ||
550 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); | ||
551 | } | ||
552 | DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); | ||
553 | |||
554 | static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( | ||
555 | struct se_dev_stat_grps *sgrps, char *page) | ||
556 | { | ||
557 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
558 | struct se_subsystem_dev, dev_stat_grps); | ||
559 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
560 | |||
561 | if (!dev) | ||
562 | return -ENODEV; | ||
563 | |||
564 | /* scsiLuWrittenMegaBytes */ | ||
565 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); | ||
566 | } | ||
567 | DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); | ||
568 | |||
569 | static ssize_t target_stat_scsi_lu_show_attr_resets( | ||
570 | struct se_dev_stat_grps *sgrps, char *page) | ||
571 | { | ||
572 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
573 | struct se_subsystem_dev, dev_stat_grps); | ||
574 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
575 | |||
576 | if (!dev) | ||
577 | return -ENODEV; | ||
578 | |||
579 | /* scsiLuInResets */ | ||
580 | return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); | ||
581 | } | ||
582 | DEV_STAT_SCSI_LU_ATTR_RO(resets); | ||
583 | |||
584 | static ssize_t target_stat_scsi_lu_show_attr_full_stat( | ||
585 | struct se_dev_stat_grps *sgrps, char *page) | ||
586 | { | ||
587 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
588 | struct se_subsystem_dev, dev_stat_grps); | ||
589 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
590 | |||
591 | if (!dev) | ||
592 | return -ENODEV; | ||
593 | |||
594 | /* FIXME: scsiLuOutTaskSetFullStatus */ | ||
595 | return snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
596 | } | ||
597 | DEV_STAT_SCSI_LU_ATTR_RO(full_stat); | ||
598 | |||
599 | static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( | ||
600 | struct se_dev_stat_grps *sgrps, char *page) | ||
601 | { | ||
602 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
603 | struct se_subsystem_dev, dev_stat_grps); | ||
604 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
605 | |||
606 | if (!dev) | ||
607 | return -ENODEV; | ||
608 | |||
609 | /* FIXME: scsiLuHSInCommands */ | ||
610 | return snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
611 | } | ||
612 | DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds); | ||
613 | |||
614 | static ssize_t target_stat_scsi_lu_show_attr_creation_time( | ||
615 | struct se_dev_stat_grps *sgrps, char *page) | ||
616 | { | ||
617 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
618 | struct se_subsystem_dev, dev_stat_grps); | ||
619 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
620 | |||
621 | if (!dev) | ||
622 | return -ENODEV; | ||
623 | |||
624 | /* scsiLuCreationTime */ | ||
625 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - | ||
626 | INITIAL_JIFFIES) * 100 / HZ)); | ||
627 | } | ||
628 | DEV_STAT_SCSI_LU_ATTR_RO(creation_time); | ||
629 | |||
630 | CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group); | ||
631 | |||
632 | static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { | ||
633 | &target_stat_scsi_lu_inst.attr, | ||
634 | &target_stat_scsi_lu_dev.attr, | ||
635 | &target_stat_scsi_lu_indx.attr, | ||
636 | &target_stat_scsi_lu_lun.attr, | ||
637 | &target_stat_scsi_lu_lu_name.attr, | ||
638 | &target_stat_scsi_lu_vend.attr, | ||
639 | &target_stat_scsi_lu_prod.attr, | ||
640 | &target_stat_scsi_lu_rev.attr, | ||
641 | &target_stat_scsi_lu_dev_type.attr, | ||
642 | &target_stat_scsi_lu_status.attr, | ||
643 | &target_stat_scsi_lu_state_bit.attr, | ||
644 | &target_stat_scsi_lu_num_cmds.attr, | ||
645 | &target_stat_scsi_lu_read_mbytes.attr, | ||
646 | &target_stat_scsi_lu_write_mbytes.attr, | ||
647 | &target_stat_scsi_lu_resets.attr, | ||
648 | &target_stat_scsi_lu_full_stat.attr, | ||
649 | &target_stat_scsi_lu_hs_num_cmds.attr, | ||
650 | &target_stat_scsi_lu_creation_time.attr, | ||
651 | NULL, | ||
652 | }; | ||
653 | |||
654 | static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = { | ||
655 | .show_attribute = target_stat_scsi_lu_attr_show, | ||
656 | .store_attribute = target_stat_scsi_lu_attr_store, | ||
657 | }; | ||
658 | |||
659 | static struct config_item_type target_stat_scsi_lu_cit = { | ||
660 | .ct_item_ops = &target_stat_scsi_lu_attrib_ops, | ||
661 | .ct_attrs = target_stat_scsi_lu_attrs, | ||
662 | .ct_owner = THIS_MODULE, | ||
663 | }; | ||
664 | |||
665 | /* | ||
666 | * Called from target_core_configfs.c:target_core_make_subdev() to setup | ||
667 | * the target statistics groups + configfs CITs located in target_core_stat.c | ||
668 | */ | ||
669 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) | ||
670 | { | ||
671 | struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; | ||
672 | |||
673 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, | ||
674 | "scsi_dev", &target_stat_scsi_dev_cit); | ||
675 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, | ||
676 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); | ||
677 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, | ||
678 | "scsi_lu", &target_stat_scsi_lu_cit); | ||
679 | |||
680 | dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; | ||
681 | dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; | ||
682 | dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; | ||
683 | dev_stat_grp->default_groups[3] = NULL; | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * SCSI Port Table | ||
688 | */ | ||
689 | |||
690 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps); | ||
691 | #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \ | ||
692 | static struct target_stat_scsi_port_attribute \ | ||
693 | target_stat_scsi_port_##_name = \ | ||
694 | __CONFIGFS_EATTR(_name, _mode, \ | ||
695 | target_stat_scsi_port_show_attr_##_name, \ | ||
696 | target_stat_scsi_port_store_attr_##_name); | ||
697 | |||
698 | #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \ | ||
699 | static struct target_stat_scsi_port_attribute \ | ||
700 | target_stat_scsi_port_##_name = \ | ||
701 | __CONFIGFS_EATTR_RO(_name, \ | ||
702 | target_stat_scsi_port_show_attr_##_name); | ||
703 | |||
704 | static ssize_t target_stat_scsi_port_show_attr_inst( | ||
705 | struct se_port_stat_grps *pgrps, char *page) | ||
706 | { | ||
707 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
708 | struct se_port *sep; | ||
709 | struct se_device *dev = lun->lun_se_dev; | ||
710 | struct se_hba *hba; | ||
711 | ssize_t ret; | ||
712 | |||
713 | spin_lock(&lun->lun_sep_lock); | ||
714 | sep = lun->lun_sep; | ||
715 | if (!sep) { | ||
716 | spin_unlock(&lun->lun_sep_lock); | ||
717 | return -ENODEV; | ||
718 | } | ||
719 | hba = dev->se_hba; | ||
720 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
721 | spin_unlock(&lun->lun_sep_lock); | ||
722 | return ret; | ||
723 | } | ||
724 | DEV_STAT_SCSI_PORT_ATTR_RO(inst); | ||
725 | |||
726 | static ssize_t target_stat_scsi_port_show_attr_dev( | ||
727 | struct se_port_stat_grps *pgrps, char *page) | ||
728 | { | ||
729 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
730 | struct se_port *sep; | ||
731 | struct se_device *dev = lun->lun_se_dev; | ||
732 | ssize_t ret; | ||
733 | |||
734 | spin_lock(&lun->lun_sep_lock); | ||
735 | sep = lun->lun_sep; | ||
736 | if (!sep) { | ||
737 | spin_unlock(&lun->lun_sep_lock); | ||
738 | return -ENODEV; | ||
739 | } | ||
740 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
741 | spin_unlock(&lun->lun_sep_lock); | ||
742 | return ret; | ||
743 | } | ||
744 | DEV_STAT_SCSI_PORT_ATTR_RO(dev); | ||
745 | |||
746 | static ssize_t target_stat_scsi_port_show_attr_indx( | ||
747 | struct se_port_stat_grps *pgrps, char *page) | ||
748 | { | ||
749 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
750 | struct se_port *sep; | ||
751 | ssize_t ret; | ||
752 | |||
753 | spin_lock(&lun->lun_sep_lock); | ||
754 | sep = lun->lun_sep; | ||
755 | if (!sep) { | ||
756 | spin_unlock(&lun->lun_sep_lock); | ||
757 | return -ENODEV; | ||
758 | } | ||
759 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
760 | spin_unlock(&lun->lun_sep_lock); | ||
761 | return ret; | ||
762 | } | ||
763 | DEV_STAT_SCSI_PORT_ATTR_RO(indx); | ||
764 | |||
765 | static ssize_t target_stat_scsi_port_show_attr_role( | ||
766 | struct se_port_stat_grps *pgrps, char *page) | ||
767 | { | ||
768 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
769 | struct se_device *dev = lun->lun_se_dev; | ||
770 | struct se_port *sep; | ||
771 | ssize_t ret; | ||
772 | |||
773 | if (!dev) | ||
774 | return -ENODEV; | ||
775 | |||
776 | spin_lock(&lun->lun_sep_lock); | ||
777 | sep = lun->lun_sep; | ||
778 | if (!sep) { | ||
779 | spin_unlock(&lun->lun_sep_lock); | ||
780 | return -ENODEV; | ||
781 | } | ||
782 | ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); | ||
783 | spin_unlock(&lun->lun_sep_lock); | ||
784 | return ret; | ||
785 | } | ||
786 | DEV_STAT_SCSI_PORT_ATTR_RO(role); | ||
787 | |||
788 | static ssize_t target_stat_scsi_port_show_attr_busy_count( | ||
789 | struct se_port_stat_grps *pgrps, char *page) | ||
790 | { | ||
791 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
792 | struct se_port *sep; | ||
793 | ssize_t ret; | ||
794 | |||
795 | spin_lock(&lun->lun_sep_lock); | ||
796 | sep = lun->lun_sep; | ||
797 | if (!sep) { | ||
798 | spin_unlock(&lun->lun_sep_lock); | ||
799 | return -ENODEV; | ||
800 | } | ||
801 | /* FIXME: scsiPortBusyStatuses */ | ||
802 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
803 | spin_unlock(&lun->lun_sep_lock); | ||
804 | return ret; | ||
805 | } | ||
806 | DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); | ||
807 | |||
808 | CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group); | ||
809 | |||
810 | static struct configfs_attribute *target_stat_scsi_port_attrs[] = { | ||
811 | &target_stat_scsi_port_inst.attr, | ||
812 | &target_stat_scsi_port_dev.attr, | ||
813 | &target_stat_scsi_port_indx.attr, | ||
814 | &target_stat_scsi_port_role.attr, | ||
815 | &target_stat_scsi_port_busy_count.attr, | ||
816 | NULL, | ||
817 | }; | ||
818 | |||
819 | static struct configfs_item_operations target_stat_scsi_port_attrib_ops = { | ||
820 | .show_attribute = target_stat_scsi_port_attr_show, | ||
821 | .store_attribute = target_stat_scsi_port_attr_store, | ||
822 | }; | ||
823 | |||
824 | static struct config_item_type target_stat_scsi_port_cit = { | ||
825 | .ct_item_ops = &target_stat_scsi_port_attrib_ops, | ||
826 | .ct_attrs = target_stat_scsi_port_attrs, | ||
827 | .ct_owner = THIS_MODULE, | ||
828 | }; | ||
829 | |||
830 | /* | ||
831 | * SCSI Target Port Table | ||
832 | */ | ||
833 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps); | ||
834 | #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \ | ||
835 | static struct target_stat_scsi_tgt_port_attribute \ | ||
836 | target_stat_scsi_tgt_port_##_name = \ | ||
837 | __CONFIGFS_EATTR(_name, _mode, \ | ||
838 | target_stat_scsi_tgt_port_show_attr_##_name, \ | ||
839 | target_stat_scsi_tgt_port_store_attr_##_name); | ||
840 | |||
841 | #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \ | ||
842 | static struct target_stat_scsi_tgt_port_attribute \ | ||
843 | target_stat_scsi_tgt_port_##_name = \ | ||
844 | __CONFIGFS_EATTR_RO(_name, \ | ||
845 | target_stat_scsi_tgt_port_show_attr_##_name); | ||
846 | |||
847 | static ssize_t target_stat_scsi_tgt_port_show_attr_inst( | ||
848 | struct se_port_stat_grps *pgrps, char *page) | ||
849 | { | ||
850 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
851 | struct se_device *dev = lun->lun_se_dev; | ||
852 | struct se_port *sep; | ||
853 | struct se_hba *hba; | ||
854 | ssize_t ret; | ||
855 | |||
856 | spin_lock(&lun->lun_sep_lock); | ||
857 | sep = lun->lun_sep; | ||
858 | if (!sep) { | ||
859 | spin_unlock(&lun->lun_sep_lock); | ||
860 | return -ENODEV; | ||
861 | } | ||
862 | hba = dev->se_hba; | ||
863 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
864 | spin_unlock(&lun->lun_sep_lock); | ||
865 | return ret; | ||
866 | } | ||
867 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); | ||
868 | |||
869 | static ssize_t target_stat_scsi_tgt_port_show_attr_dev( | ||
870 | struct se_port_stat_grps *pgrps, char *page) | ||
871 | { | ||
872 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
873 | struct se_device *dev = lun->lun_se_dev; | ||
874 | struct se_port *sep; | ||
875 | ssize_t ret; | ||
876 | |||
877 | spin_lock(&lun->lun_sep_lock); | ||
878 | sep = lun->lun_sep; | ||
879 | if (!sep) { | ||
880 | spin_unlock(&lun->lun_sep_lock); | ||
881 | return -ENODEV; | ||
882 | } | ||
883 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
884 | spin_unlock(&lun->lun_sep_lock); | ||
885 | return ret; | ||
886 | } | ||
887 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); | ||
888 | |||
889 | static ssize_t target_stat_scsi_tgt_port_show_attr_indx( | ||
890 | struct se_port_stat_grps *pgrps, char *page) | ||
891 | { | ||
892 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
893 | struct se_port *sep; | ||
894 | ssize_t ret; | ||
895 | |||
896 | spin_lock(&lun->lun_sep_lock); | ||
897 | sep = lun->lun_sep; | ||
898 | if (!sep) { | ||
899 | spin_unlock(&lun->lun_sep_lock); | ||
900 | return -ENODEV; | ||
901 | } | ||
902 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
903 | spin_unlock(&lun->lun_sep_lock); | ||
904 | return ret; | ||
905 | } | ||
906 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); | ||
907 | |||
908 | static ssize_t target_stat_scsi_tgt_port_show_attr_name( | ||
909 | struct se_port_stat_grps *pgrps, char *page) | ||
910 | { | ||
911 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
912 | struct se_port *sep; | ||
913 | struct se_portal_group *tpg; | ||
914 | ssize_t ret; | ||
915 | |||
916 | spin_lock(&lun->lun_sep_lock); | ||
917 | sep = lun->lun_sep; | ||
918 | if (!sep) { | ||
919 | spin_unlock(&lun->lun_sep_lock); | ||
920 | return -ENODEV; | ||
921 | } | ||
922 | tpg = sep->sep_tpg; | ||
923 | |||
924 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", | ||
925 | TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); | ||
926 | spin_unlock(&lun->lun_sep_lock); | ||
927 | return ret; | ||
928 | } | ||
929 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); | ||
930 | |||
931 | static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( | ||
932 | struct se_port_stat_grps *pgrps, char *page) | ||
933 | { | ||
934 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
935 | struct se_port *sep; | ||
936 | struct se_portal_group *tpg; | ||
937 | ssize_t ret; | ||
938 | |||
939 | spin_lock(&lun->lun_sep_lock); | ||
940 | sep = lun->lun_sep; | ||
941 | if (!sep) { | ||
942 | spin_unlock(&lun->lun_sep_lock); | ||
943 | return -ENODEV; | ||
944 | } | ||
945 | tpg = sep->sep_tpg; | ||
946 | |||
947 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", | ||
948 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
949 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
950 | spin_unlock(&lun->lun_sep_lock); | ||
951 | return ret; | ||
952 | } | ||
953 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); | ||
954 | |||
955 | static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( | ||
956 | struct se_port_stat_grps *pgrps, char *page) | ||
957 | { | ||
958 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
959 | struct se_port *sep; | ||
960 | struct se_portal_group *tpg; | ||
961 | ssize_t ret; | ||
962 | |||
963 | spin_lock(&lun->lun_sep_lock); | ||
964 | sep = lun->lun_sep; | ||
965 | if (!sep) { | ||
966 | spin_unlock(&lun->lun_sep_lock); | ||
967 | return -ENODEV; | ||
968 | } | ||
969 | tpg = sep->sep_tpg; | ||
970 | |||
971 | ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); | ||
972 | spin_unlock(&lun->lun_sep_lock); | ||
973 | return ret; | ||
974 | } | ||
975 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); | ||
976 | |||
977 | static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( | ||
978 | struct se_port_stat_grps *pgrps, char *page) | ||
979 | { | ||
980 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
981 | struct se_port *sep; | ||
982 | struct se_portal_group *tpg; | ||
983 | ssize_t ret; | ||
984 | |||
985 | spin_lock(&lun->lun_sep_lock); | ||
986 | sep = lun->lun_sep; | ||
987 | if (!sep) { | ||
988 | spin_unlock(&lun->lun_sep_lock); | ||
989 | return -ENODEV; | ||
990 | } | ||
991 | tpg = sep->sep_tpg; | ||
992 | |||
993 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
994 | (u32)(sep->sep_stats.rx_data_octets >> 20)); | ||
995 | spin_unlock(&lun->lun_sep_lock); | ||
996 | return ret; | ||
997 | } | ||
998 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); | ||
999 | |||
1000 | static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( | ||
1001 | struct se_port_stat_grps *pgrps, char *page) | ||
1002 | { | ||
1003 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1004 | struct se_port *sep; | ||
1005 | struct se_portal_group *tpg; | ||
1006 | ssize_t ret; | ||
1007 | |||
1008 | spin_lock(&lun->lun_sep_lock); | ||
1009 | sep = lun->lun_sep; | ||
1010 | if (!sep) { | ||
1011 | spin_unlock(&lun->lun_sep_lock); | ||
1012 | return -ENODEV; | ||
1013 | } | ||
1014 | tpg = sep->sep_tpg; | ||
1015 | |||
1016 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1017 | (u32)(sep->sep_stats.tx_data_octets >> 20)); | ||
1018 | spin_unlock(&lun->lun_sep_lock); | ||
1019 | return ret; | ||
1020 | } | ||
1021 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); | ||
1022 | |||
1023 | static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( | ||
1024 | struct se_port_stat_grps *pgrps, char *page) | ||
1025 | { | ||
1026 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1027 | struct se_port *sep; | ||
1028 | struct se_portal_group *tpg; | ||
1029 | ssize_t ret; | ||
1030 | |||
1031 | spin_lock(&lun->lun_sep_lock); | ||
1032 | sep = lun->lun_sep; | ||
1033 | if (!sep) { | ||
1034 | spin_unlock(&lun->lun_sep_lock); | ||
1035 | return -ENODEV; | ||
1036 | } | ||
1037 | tpg = sep->sep_tpg; | ||
1038 | |||
1039 | /* FIXME: scsiTgtPortHsInCommands */ | ||
1040 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1041 | spin_unlock(&lun->lun_sep_lock); | ||
1042 | return ret; | ||
1043 | } | ||
1044 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); | ||
1045 | |||
1046 | CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps, | ||
1047 | scsi_tgt_port_group); | ||
1048 | |||
1049 | static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { | ||
1050 | &target_stat_scsi_tgt_port_inst.attr, | ||
1051 | &target_stat_scsi_tgt_port_dev.attr, | ||
1052 | &target_stat_scsi_tgt_port_indx.attr, | ||
1053 | &target_stat_scsi_tgt_port_name.attr, | ||
1054 | &target_stat_scsi_tgt_port_port_index.attr, | ||
1055 | &target_stat_scsi_tgt_port_in_cmds.attr, | ||
1056 | &target_stat_scsi_tgt_port_write_mbytes.attr, | ||
1057 | &target_stat_scsi_tgt_port_read_mbytes.attr, | ||
1058 | &target_stat_scsi_tgt_port_hs_in_cmds.attr, | ||
1059 | NULL, | ||
1060 | }; | ||
1061 | |||
1062 | static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = { | ||
1063 | .show_attribute = target_stat_scsi_tgt_port_attr_show, | ||
1064 | .store_attribute = target_stat_scsi_tgt_port_attr_store, | ||
1065 | }; | ||
1066 | |||
1067 | static struct config_item_type target_stat_scsi_tgt_port_cit = { | ||
1068 | .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops, | ||
1069 | .ct_attrs = target_stat_scsi_tgt_port_attrs, | ||
1070 | .ct_owner = THIS_MODULE, | ||
1071 | }; | ||
1072 | |||
1073 | /* | ||
1074 | * SCSI Transport Table | ||
1075 | o */ | ||
1076 | |||
1077 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps); | ||
1078 | #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \ | ||
1079 | static struct target_stat_scsi_transport_attribute \ | ||
1080 | target_stat_scsi_transport_##_name = \ | ||
1081 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1082 | target_stat_scsi_transport_show_attr_##_name, \ | ||
1083 | target_stat_scsi_transport_store_attr_##_name); | ||
1084 | |||
1085 | #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \ | ||
1086 | static struct target_stat_scsi_transport_attribute \ | ||
1087 | target_stat_scsi_transport_##_name = \ | ||
1088 | __CONFIGFS_EATTR_RO(_name, \ | ||
1089 | target_stat_scsi_transport_show_attr_##_name); | ||
1090 | |||
1091 | static ssize_t target_stat_scsi_transport_show_attr_inst( | ||
1092 | struct se_port_stat_grps *pgrps, char *page) | ||
1093 | { | ||
1094 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1095 | struct se_device *dev = lun->lun_se_dev; | ||
1096 | struct se_port *sep; | ||
1097 | struct se_hba *hba; | ||
1098 | ssize_t ret; | ||
1099 | |||
1100 | spin_lock(&lun->lun_sep_lock); | ||
1101 | sep = lun->lun_sep; | ||
1102 | if (!sep) { | ||
1103 | spin_unlock(&lun->lun_sep_lock); | ||
1104 | return -ENODEV; | ||
1105 | } | ||
1106 | |||
1107 | hba = dev->se_hba; | ||
1108 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
1109 | spin_unlock(&lun->lun_sep_lock); | ||
1110 | return ret; | ||
1111 | } | ||
1112 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); | ||
1113 | |||
1114 | static ssize_t target_stat_scsi_transport_show_attr_device( | ||
1115 | struct se_port_stat_grps *pgrps, char *page) | ||
1116 | { | ||
1117 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1118 | struct se_port *sep; | ||
1119 | struct se_portal_group *tpg; | ||
1120 | ssize_t ret; | ||
1121 | |||
1122 | spin_lock(&lun->lun_sep_lock); | ||
1123 | sep = lun->lun_sep; | ||
1124 | if (!sep) { | ||
1125 | spin_unlock(&lun->lun_sep_lock); | ||
1126 | return -ENODEV; | ||
1127 | } | ||
1128 | tpg = sep->sep_tpg; | ||
1129 | /* scsiTransportType */ | ||
1130 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", | ||
1131 | TPG_TFO(tpg)->get_fabric_name()); | ||
1132 | spin_unlock(&lun->lun_sep_lock); | ||
1133 | return ret; | ||
1134 | } | ||
1135 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); | ||
1136 | |||
1137 | static ssize_t target_stat_scsi_transport_show_attr_indx( | ||
1138 | struct se_port_stat_grps *pgrps, char *page) | ||
1139 | { | ||
1140 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1141 | struct se_port *sep; | ||
1142 | struct se_portal_group *tpg; | ||
1143 | ssize_t ret; | ||
1144 | |||
1145 | spin_lock(&lun->lun_sep_lock); | ||
1146 | sep = lun->lun_sep; | ||
1147 | if (!sep) { | ||
1148 | spin_unlock(&lun->lun_sep_lock); | ||
1149 | return -ENODEV; | ||
1150 | } | ||
1151 | tpg = sep->sep_tpg; | ||
1152 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1153 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1154 | spin_unlock(&lun->lun_sep_lock); | ||
1155 | return ret; | ||
1156 | } | ||
1157 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); | ||
1158 | |||
1159 | static ssize_t target_stat_scsi_transport_show_attr_dev_name( | ||
1160 | struct se_port_stat_grps *pgrps, char *page) | ||
1161 | { | ||
1162 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1163 | struct se_device *dev = lun->lun_se_dev; | ||
1164 | struct se_port *sep; | ||
1165 | struct se_portal_group *tpg; | ||
1166 | struct t10_wwn *wwn; | ||
1167 | ssize_t ret; | ||
1168 | |||
1169 | spin_lock(&lun->lun_sep_lock); | ||
1170 | sep = lun->lun_sep; | ||
1171 | if (!sep) { | ||
1172 | spin_unlock(&lun->lun_sep_lock); | ||
1173 | return -ENODEV; | ||
1174 | } | ||
1175 | tpg = sep->sep_tpg; | ||
1176 | wwn = DEV_T10_WWN(dev); | ||
1177 | /* scsiTransportDevName */ | ||
1178 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | ||
1179 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1180 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | ||
1181 | wwn->vendor); | ||
1182 | spin_unlock(&lun->lun_sep_lock); | ||
1183 | return ret; | ||
1184 | } | ||
1185 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); | ||
1186 | |||
1187 | CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps, | ||
1188 | scsi_transport_group); | ||
1189 | |||
1190 | static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { | ||
1191 | &target_stat_scsi_transport_inst.attr, | ||
1192 | &target_stat_scsi_transport_device.attr, | ||
1193 | &target_stat_scsi_transport_indx.attr, | ||
1194 | &target_stat_scsi_transport_dev_name.attr, | ||
1195 | NULL, | ||
1196 | }; | ||
1197 | |||
1198 | static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = { | ||
1199 | .show_attribute = target_stat_scsi_transport_attr_show, | ||
1200 | .store_attribute = target_stat_scsi_transport_attr_store, | ||
1201 | }; | ||
1202 | |||
1203 | static struct config_item_type target_stat_scsi_transport_cit = { | ||
1204 | .ct_item_ops = &target_stat_scsi_transport_attrib_ops, | ||
1205 | .ct_attrs = target_stat_scsi_transport_attrs, | ||
1206 | .ct_owner = THIS_MODULE, | ||
1207 | }; | ||
1208 | |||
1209 | /* | ||
1210 | * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup | ||
1211 | * the target port statistics groups + configfs CITs located in target_core_stat.c | ||
1212 | */ | ||
1213 | void target_stat_setup_port_default_groups(struct se_lun *lun) | ||
1214 | { | ||
1215 | struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
1216 | |||
1217 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, | ||
1218 | "scsi_port", &target_stat_scsi_port_cit); | ||
1219 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, | ||
1220 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); | ||
1221 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, | ||
1222 | "scsi_transport", &target_stat_scsi_transport_cit); | ||
1223 | |||
1224 | port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; | ||
1225 | port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; | ||
1226 | port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; | ||
1227 | port_stat_grp->default_groups[3] = NULL; | ||
1228 | } | ||
1229 | |||
1230 | /* | ||
1231 | * SCSI Authorized Initiator Table | ||
1232 | */ | ||
1233 | |||
1234 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps); | ||
1235 | #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \ | ||
1236 | static struct target_stat_scsi_auth_intr_attribute \ | ||
1237 | target_stat_scsi_auth_intr_##_name = \ | ||
1238 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1239 | target_stat_scsi_auth_intr_show_attr_##_name, \ | ||
1240 | target_stat_scsi_auth_intr_store_attr_##_name); | ||
1241 | |||
1242 | #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \ | ||
1243 | static struct target_stat_scsi_auth_intr_attribute \ | ||
1244 | target_stat_scsi_auth_intr_##_name = \ | ||
1245 | __CONFIGFS_EATTR_RO(_name, \ | ||
1246 | target_stat_scsi_auth_intr_show_attr_##_name); | ||
1247 | |||
1248 | static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | ||
1249 | struct se_ml_stat_grps *lgrps, char *page) | ||
1250 | { | ||
1251 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1252 | struct se_lun_acl, ml_stat_grps); | ||
1253 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1254 | struct se_dev_entry *deve; | ||
1255 | struct se_portal_group *tpg; | ||
1256 | ssize_t ret; | ||
1257 | |||
1258 | spin_lock_irq(&nacl->device_list_lock); | ||
1259 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1260 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1261 | spin_unlock_irq(&nacl->device_list_lock); | ||
1262 | return -ENODEV; | ||
1263 | } | ||
1264 | tpg = nacl->se_tpg; | ||
1265 | /* scsiInstIndex */ | ||
1266 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1267 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1268 | spin_unlock_irq(&nacl->device_list_lock); | ||
1269 | return ret; | ||
1270 | } | ||
1271 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); | ||
1272 | |||
1273 | static ssize_t target_stat_scsi_auth_intr_show_attr_dev( | ||
1274 | struct se_ml_stat_grps *lgrps, char *page) | ||
1275 | { | ||
1276 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1277 | struct se_lun_acl, ml_stat_grps); | ||
1278 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1279 | struct se_dev_entry *deve; | ||
1280 | struct se_lun *lun; | ||
1281 | struct se_portal_group *tpg; | ||
1282 | ssize_t ret; | ||
1283 | |||
1284 | spin_lock_irq(&nacl->device_list_lock); | ||
1285 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1286 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1287 | spin_unlock_irq(&nacl->device_list_lock); | ||
1288 | return -ENODEV; | ||
1289 | } | ||
1290 | tpg = nacl->se_tpg; | ||
1291 | lun = deve->se_lun; | ||
1292 | /* scsiDeviceIndex */ | ||
1293 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | ||
1294 | spin_unlock_irq(&nacl->device_list_lock); | ||
1295 | return ret; | ||
1296 | } | ||
1297 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); | ||
1298 | |||
1299 | static ssize_t target_stat_scsi_auth_intr_show_attr_port( | ||
1300 | struct se_ml_stat_grps *lgrps, char *page) | ||
1301 | { | ||
1302 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1303 | struct se_lun_acl, ml_stat_grps); | ||
1304 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1305 | struct se_dev_entry *deve; | ||
1306 | struct se_portal_group *tpg; | ||
1307 | ssize_t ret; | ||
1308 | |||
1309 | spin_lock_irq(&nacl->device_list_lock); | ||
1310 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1311 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1312 | spin_unlock_irq(&nacl->device_list_lock); | ||
1313 | return -ENODEV; | ||
1314 | } | ||
1315 | tpg = nacl->se_tpg; | ||
1316 | /* scsiAuthIntrTgtPortIndex */ | ||
1317 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1318 | spin_unlock_irq(&nacl->device_list_lock); | ||
1319 | return ret; | ||
1320 | } | ||
1321 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); | ||
1322 | |||
1323 | static ssize_t target_stat_scsi_auth_intr_show_attr_indx( | ||
1324 | struct se_ml_stat_grps *lgrps, char *page) | ||
1325 | { | ||
1326 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1327 | struct se_lun_acl, ml_stat_grps); | ||
1328 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1329 | struct se_dev_entry *deve; | ||
1330 | ssize_t ret; | ||
1331 | |||
1332 | spin_lock_irq(&nacl->device_list_lock); | ||
1333 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1334 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1335 | spin_unlock_irq(&nacl->device_list_lock); | ||
1336 | return -ENODEV; | ||
1337 | } | ||
1338 | /* scsiAuthIntrIndex */ | ||
1339 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | ||
1340 | spin_unlock_irq(&nacl->device_list_lock); | ||
1341 | return ret; | ||
1342 | } | ||
1343 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); | ||
1344 | |||
1345 | static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( | ||
1346 | struct se_ml_stat_grps *lgrps, char *page) | ||
1347 | { | ||
1348 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1349 | struct se_lun_acl, ml_stat_grps); | ||
1350 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1351 | struct se_dev_entry *deve; | ||
1352 | ssize_t ret; | ||
1353 | |||
1354 | spin_lock_irq(&nacl->device_list_lock); | ||
1355 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1356 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1357 | spin_unlock_irq(&nacl->device_list_lock); | ||
1358 | return -ENODEV; | ||
1359 | } | ||
1360 | /* scsiAuthIntrDevOrPort */ | ||
1361 | ret = snprintf(page, PAGE_SIZE, "%u\n", 1); | ||
1362 | spin_unlock_irq(&nacl->device_list_lock); | ||
1363 | return ret; | ||
1364 | } | ||
1365 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); | ||
1366 | |||
1367 | static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( | ||
1368 | struct se_ml_stat_grps *lgrps, char *page) | ||
1369 | { | ||
1370 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1371 | struct se_lun_acl, ml_stat_grps); | ||
1372 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1373 | struct se_dev_entry *deve; | ||
1374 | ssize_t ret; | ||
1375 | |||
1376 | spin_lock_irq(&nacl->device_list_lock); | ||
1377 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1378 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1379 | spin_unlock_irq(&nacl->device_list_lock); | ||
1380 | return -ENODEV; | ||
1381 | } | ||
1382 | /* scsiAuthIntrName */ | ||
1383 | ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); | ||
1384 | spin_unlock_irq(&nacl->device_list_lock); | ||
1385 | return ret; | ||
1386 | } | ||
1387 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); | ||
1388 | |||
1389 | static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( | ||
1390 | struct se_ml_stat_grps *lgrps, char *page) | ||
1391 | { | ||
1392 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1393 | struct se_lun_acl, ml_stat_grps); | ||
1394 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1395 | struct se_dev_entry *deve; | ||
1396 | ssize_t ret; | ||
1397 | |||
1398 | spin_lock_irq(&nacl->device_list_lock); | ||
1399 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1400 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1401 | spin_unlock_irq(&nacl->device_list_lock); | ||
1402 | return -ENODEV; | ||
1403 | } | ||
1404 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
1405 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1406 | spin_unlock_irq(&nacl->device_list_lock); | ||
1407 | return ret; | ||
1408 | } | ||
1409 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); | ||
1410 | |||
1411 | static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( | ||
1412 | struct se_ml_stat_grps *lgrps, char *page) | ||
1413 | { | ||
1414 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1415 | struct se_lun_acl, ml_stat_grps); | ||
1416 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1417 | struct se_dev_entry *deve; | ||
1418 | ssize_t ret; | ||
1419 | |||
1420 | spin_lock_irq(&nacl->device_list_lock); | ||
1421 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1422 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1423 | spin_unlock_irq(&nacl->device_list_lock); | ||
1424 | return -ENODEV; | ||
1425 | } | ||
1426 | /* scsiAuthIntrAttachedTimes */ | ||
1427 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); | ||
1428 | spin_unlock_irq(&nacl->device_list_lock); | ||
1429 | return ret; | ||
1430 | } | ||
1431 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); | ||
1432 | |||
1433 | static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( | ||
1434 | struct se_ml_stat_grps *lgrps, char *page) | ||
1435 | { | ||
1436 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1437 | struct se_lun_acl, ml_stat_grps); | ||
1438 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1439 | struct se_dev_entry *deve; | ||
1440 | ssize_t ret; | ||
1441 | |||
1442 | spin_lock_irq(&nacl->device_list_lock); | ||
1443 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1444 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1445 | spin_unlock_irq(&nacl->device_list_lock); | ||
1446 | return -ENODEV; | ||
1447 | } | ||
1448 | /* scsiAuthIntrOutCommands */ | ||
1449 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); | ||
1450 | spin_unlock_irq(&nacl->device_list_lock); | ||
1451 | return ret; | ||
1452 | } | ||
1453 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); | ||
1454 | |||
1455 | static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( | ||
1456 | struct se_ml_stat_grps *lgrps, char *page) | ||
1457 | { | ||
1458 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1459 | struct se_lun_acl, ml_stat_grps); | ||
1460 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1461 | struct se_dev_entry *deve; | ||
1462 | ssize_t ret; | ||
1463 | |||
1464 | spin_lock_irq(&nacl->device_list_lock); | ||
1465 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1466 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1467 | spin_unlock_irq(&nacl->device_list_lock); | ||
1468 | return -ENODEV; | ||
1469 | } | ||
1470 | /* scsiAuthIntrReadMegaBytes */ | ||
1471 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); | ||
1472 | spin_unlock_irq(&nacl->device_list_lock); | ||
1473 | return ret; | ||
1474 | } | ||
1475 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); | ||
1476 | |||
1477 | static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( | ||
1478 | struct se_ml_stat_grps *lgrps, char *page) | ||
1479 | { | ||
1480 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1481 | struct se_lun_acl, ml_stat_grps); | ||
1482 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1483 | struct se_dev_entry *deve; | ||
1484 | ssize_t ret; | ||
1485 | |||
1486 | spin_lock_irq(&nacl->device_list_lock); | ||
1487 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1488 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1489 | spin_unlock_irq(&nacl->device_list_lock); | ||
1490 | return -ENODEV; | ||
1491 | } | ||
1492 | /* scsiAuthIntrWrittenMegaBytes */ | ||
1493 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); | ||
1494 | spin_unlock_irq(&nacl->device_list_lock); | ||
1495 | return ret; | ||
1496 | } | ||
1497 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); | ||
1498 | |||
1499 | static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( | ||
1500 | struct se_ml_stat_grps *lgrps, char *page) | ||
1501 | { | ||
1502 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1503 | struct se_lun_acl, ml_stat_grps); | ||
1504 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1505 | struct se_dev_entry *deve; | ||
1506 | ssize_t ret; | ||
1507 | |||
1508 | spin_lock_irq(&nacl->device_list_lock); | ||
1509 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1510 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1511 | spin_unlock_irq(&nacl->device_list_lock); | ||
1512 | return -ENODEV; | ||
1513 | } | ||
1514 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
1515 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1516 | spin_unlock_irq(&nacl->device_list_lock); | ||
1517 | return ret; | ||
1518 | } | ||
1519 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); | ||
1520 | |||
1521 | static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( | ||
1522 | struct se_ml_stat_grps *lgrps, char *page) | ||
1523 | { | ||
1524 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1525 | struct se_lun_acl, ml_stat_grps); | ||
1526 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1527 | struct se_dev_entry *deve; | ||
1528 | ssize_t ret; | ||
1529 | |||
1530 | spin_lock_irq(&nacl->device_list_lock); | ||
1531 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1532 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1533 | spin_unlock_irq(&nacl->device_list_lock); | ||
1534 | return -ENODEV; | ||
1535 | } | ||
1536 | /* scsiAuthIntrLastCreation */ | ||
1537 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - | ||
1538 | INITIAL_JIFFIES) * 100 / HZ)); | ||
1539 | spin_unlock_irq(&nacl->device_list_lock); | ||
1540 | return ret; | ||
1541 | } | ||
1542 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); | ||
1543 | |||
1544 | static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( | ||
1545 | struct se_ml_stat_grps *lgrps, char *page) | ||
1546 | { | ||
1547 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1548 | struct se_lun_acl, ml_stat_grps); | ||
1549 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1550 | struct se_dev_entry *deve; | ||
1551 | ssize_t ret; | ||
1552 | |||
1553 | spin_lock_irq(&nacl->device_list_lock); | ||
1554 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1555 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1556 | spin_unlock_irq(&nacl->device_list_lock); | ||
1557 | return -ENODEV; | ||
1558 | } | ||
1559 | /* FIXME: scsiAuthIntrRowStatus */ | ||
1560 | ret = snprintf(page, PAGE_SIZE, "Ready\n"); | ||
1561 | spin_unlock_irq(&nacl->device_list_lock); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); | ||
1565 | |||
1566 | CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps, | ||
1567 | scsi_auth_intr_group); | ||
1568 | |||
1569 | static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { | ||
1570 | &target_stat_scsi_auth_intr_inst.attr, | ||
1571 | &target_stat_scsi_auth_intr_dev.attr, | ||
1572 | &target_stat_scsi_auth_intr_port.attr, | ||
1573 | &target_stat_scsi_auth_intr_indx.attr, | ||
1574 | &target_stat_scsi_auth_intr_dev_or_port.attr, | ||
1575 | &target_stat_scsi_auth_intr_intr_name.attr, | ||
1576 | &target_stat_scsi_auth_intr_map_indx.attr, | ||
1577 | &target_stat_scsi_auth_intr_att_count.attr, | ||
1578 | &target_stat_scsi_auth_intr_num_cmds.attr, | ||
1579 | &target_stat_scsi_auth_intr_read_mbytes.attr, | ||
1580 | &target_stat_scsi_auth_intr_write_mbytes.attr, | ||
1581 | &target_stat_scsi_auth_intr_hs_num_cmds.attr, | ||
1582 | &target_stat_scsi_auth_intr_creation_time.attr, | ||
1583 | &target_stat_scsi_auth_intr_row_status.attr, | ||
1584 | NULL, | ||
1585 | }; | ||
1586 | |||
1587 | static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = { | ||
1588 | .show_attribute = target_stat_scsi_auth_intr_attr_show, | ||
1589 | .store_attribute = target_stat_scsi_auth_intr_attr_store, | ||
1590 | }; | ||
1591 | |||
1592 | static struct config_item_type target_stat_scsi_auth_intr_cit = { | ||
1593 | .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops, | ||
1594 | .ct_attrs = target_stat_scsi_auth_intr_attrs, | ||
1595 | .ct_owner = THIS_MODULE, | ||
1596 | }; | ||
1597 | |||
1598 | /* | ||
1599 | * SCSI Attached Initiator Port Table | ||
1600 | */ | ||
1601 | |||
1602 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps); | ||
1603 | #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \ | ||
1604 | static struct target_stat_scsi_att_intr_port_attribute \ | ||
1605 | target_stat_scsi_att_intr_port_##_name = \ | ||
1606 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1607 | target_stat_scsi_att_intr_port_show_attr_##_name, \ | ||
1608 | target_stat_scsi_att_intr_port_store_attr_##_name); | ||
1609 | |||
1610 | #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \ | ||
1611 | static struct target_stat_scsi_att_intr_port_attribute \ | ||
1612 | target_stat_scsi_att_intr_port_##_name = \ | ||
1613 | __CONFIGFS_EATTR_RO(_name, \ | ||
1614 | target_stat_scsi_att_intr_port_show_attr_##_name); | ||
1615 | |||
1616 | static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | ||
1617 | struct se_ml_stat_grps *lgrps, char *page) | ||
1618 | { | ||
1619 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1620 | struct se_lun_acl, ml_stat_grps); | ||
1621 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1622 | struct se_dev_entry *deve; | ||
1623 | struct se_portal_group *tpg; | ||
1624 | ssize_t ret; | ||
1625 | |||
1626 | spin_lock_irq(&nacl->device_list_lock); | ||
1627 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1628 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1629 | spin_unlock_irq(&nacl->device_list_lock); | ||
1630 | return -ENODEV; | ||
1631 | } | ||
1632 | tpg = nacl->se_tpg; | ||
1633 | /* scsiInstIndex */ | ||
1634 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1635 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1636 | spin_unlock_irq(&nacl->device_list_lock); | ||
1637 | return ret; | ||
1638 | } | ||
1639 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); | ||
1640 | |||
1641 | static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( | ||
1642 | struct se_ml_stat_grps *lgrps, char *page) | ||
1643 | { | ||
1644 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1645 | struct se_lun_acl, ml_stat_grps); | ||
1646 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1647 | struct se_dev_entry *deve; | ||
1648 | struct se_lun *lun; | ||
1649 | struct se_portal_group *tpg; | ||
1650 | ssize_t ret; | ||
1651 | |||
1652 | spin_lock_irq(&nacl->device_list_lock); | ||
1653 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1654 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1655 | spin_unlock_irq(&nacl->device_list_lock); | ||
1656 | return -ENODEV; | ||
1657 | } | ||
1658 | tpg = nacl->se_tpg; | ||
1659 | lun = deve->se_lun; | ||
1660 | /* scsiDeviceIndex */ | ||
1661 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | ||
1662 | spin_unlock_irq(&nacl->device_list_lock); | ||
1663 | return ret; | ||
1664 | } | ||
1665 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); | ||
1666 | |||
1667 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | ||
1668 | struct se_ml_stat_grps *lgrps, char *page) | ||
1669 | { | ||
1670 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1671 | struct se_lun_acl, ml_stat_grps); | ||
1672 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1673 | struct se_dev_entry *deve; | ||
1674 | struct se_portal_group *tpg; | ||
1675 | ssize_t ret; | ||
1676 | |||
1677 | spin_lock_irq(&nacl->device_list_lock); | ||
1678 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1679 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1680 | spin_unlock_irq(&nacl->device_list_lock); | ||
1681 | return -ENODEV; | ||
1682 | } | ||
1683 | tpg = nacl->se_tpg; | ||
1684 | /* scsiPortIndex */ | ||
1685 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1686 | spin_unlock_irq(&nacl->device_list_lock); | ||
1687 | return ret; | ||
1688 | } | ||
1689 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); | ||
1690 | |||
1691 | static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( | ||
1692 | struct se_ml_stat_grps *lgrps, char *page) | ||
1693 | { | ||
1694 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1695 | struct se_lun_acl, ml_stat_grps); | ||
1696 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1697 | struct se_session *se_sess; | ||
1698 | struct se_portal_group *tpg; | ||
1699 | ssize_t ret; | ||
1700 | |||
1701 | spin_lock_irq(&nacl->nacl_sess_lock); | ||
1702 | se_sess = nacl->nacl_sess; | ||
1703 | if (!se_sess) { | ||
1704 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1705 | return -ENODEV; | ||
1706 | } | ||
1707 | |||
1708 | tpg = nacl->se_tpg; | ||
1709 | /* scsiAttIntrPortIndex */ | ||
1710 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1711 | TPG_TFO(tpg)->sess_get_index(se_sess)); | ||
1712 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1713 | return ret; | ||
1714 | } | ||
1715 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx); | ||
1716 | |||
1717 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( | ||
1718 | struct se_ml_stat_grps *lgrps, char *page) | ||
1719 | { | ||
1720 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1721 | struct se_lun_acl, ml_stat_grps); | ||
1722 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1723 | struct se_dev_entry *deve; | ||
1724 | ssize_t ret; | ||
1725 | |||
1726 | spin_lock_irq(&nacl->device_list_lock); | ||
1727 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1728 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1729 | spin_unlock_irq(&nacl->device_list_lock); | ||
1730 | return -ENODEV; | ||
1731 | } | ||
1732 | /* scsiAttIntrPortAuthIntrIdx */ | ||
1733 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | ||
1734 | spin_unlock_irq(&nacl->device_list_lock); | ||
1735 | return ret; | ||
1736 | } | ||
1737 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); | ||
1738 | |||
1739 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( | ||
1740 | struct se_ml_stat_grps *lgrps, char *page) | ||
1741 | { | ||
1742 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1743 | struct se_lun_acl, ml_stat_grps); | ||
1744 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1745 | struct se_session *se_sess; | ||
1746 | struct se_portal_group *tpg; | ||
1747 | ssize_t ret; | ||
1748 | unsigned char buf[64]; | ||
1749 | |||
1750 | spin_lock_irq(&nacl->nacl_sess_lock); | ||
1751 | se_sess = nacl->nacl_sess; | ||
1752 | if (!se_sess) { | ||
1753 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1754 | return -ENODEV; | ||
1755 | } | ||
1756 | |||
1757 | tpg = nacl->se_tpg; | ||
1758 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ | ||
1759 | memset(buf, 0, 64); | ||
1760 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) | ||
1761 | TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, | ||
1762 | (unsigned char *)&buf[0], 64); | ||
1763 | |||
1764 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); | ||
1765 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1766 | return ret; | ||
1767 | } | ||
1768 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident); | ||
1769 | |||
1770 | CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps, | ||
1771 | scsi_att_intr_port_group); | ||
1772 | |||
1773 | static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { | ||
1774 | &target_stat_scsi_att_intr_port_inst.attr, | ||
1775 | &target_stat_scsi_att_intr_port_dev.attr, | ||
1776 | &target_stat_scsi_att_intr_port_port.attr, | ||
1777 | &target_stat_scsi_att_intr_port_indx.attr, | ||
1778 | &target_stat_scsi_att_intr_port_port_auth_indx.attr, | ||
1779 | &target_stat_scsi_att_intr_port_port_ident.attr, | ||
1780 | NULL, | ||
1781 | }; | ||
1782 | |||
1783 | static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = { | ||
1784 | .show_attribute = target_stat_scsi_att_intr_port_attr_show, | ||
1785 | .store_attribute = target_stat_scsi_att_intr_port_attr_store, | ||
1786 | }; | ||
1787 | |||
1788 | static struct config_item_type target_stat_scsi_att_intr_port_cit = { | ||
1789 | .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops, | ||
1790 | .ct_attrs = target_stat_scsi_ath_intr_port_attrs, | ||
1791 | .ct_owner = THIS_MODULE, | ||
1792 | }; | ||
1793 | |||
1794 | /* | ||
1795 | * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup | ||
1796 | * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c | ||
1797 | */ | ||
1798 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) | ||
1799 | { | ||
1800 | struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
1801 | |||
1802 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, | ||
1803 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); | ||
1804 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, | ||
1805 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); | ||
1806 | |||
1807 | ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; | ||
1808 | ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; | ||
1809 | ml_stat_grp->default_groups[2] = NULL; | ||
1810 | } | ||
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h new file mode 100644 index 00000000000..86c252f9ea4 --- /dev/null +++ b/drivers/target/target_core_stat.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef TARGET_CORE_STAT_H | ||
2 | #define TARGET_CORE_STAT_H | ||
3 | |||
4 | extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); | ||
5 | extern void target_stat_setup_port_default_groups(struct se_lun *); | ||
6 | extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); | ||
7 | |||
8 | #endif /*** TARGET_CORE_STAT_H ***/ | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ff9ace01e27..bf6aa8a9f1d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -227,8 +227,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); |
229 | 229 | ||
230 | int transport_emulate_control_cdb(struct se_task *task); | ||
231 | |||
232 | int init_se_global(void) | 230 | int init_se_global(void) |
233 | { | 231 | { |
234 | struct se_global *global; | 232 | struct se_global *global; |
@@ -1622,7 +1620,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1622 | const char *inquiry_prod, | 1620 | const char *inquiry_prod, |
1623 | const char *inquiry_rev) | 1621 | const char *inquiry_rev) |
1624 | { | 1622 | { |
1625 | int ret = 0, force_pt; | 1623 | int force_pt; |
1626 | struct se_device *dev; | 1624 | struct se_device *dev; |
1627 | 1625 | ||
1628 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | 1626 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); |
@@ -1739,9 +1737,8 @@ struct se_device *transport_add_device_to_core_hba( | |||
1739 | } | 1737 | } |
1740 | scsi_dump_inquiry(dev); | 1738 | scsi_dump_inquiry(dev); |
1741 | 1739 | ||
1740 | return dev; | ||
1742 | out: | 1741 | out: |
1743 | if (!ret) | ||
1744 | return dev; | ||
1745 | kthread_stop(dev->process_thread); | 1742 | kthread_stop(dev->process_thread); |
1746 | 1743 | ||
1747 | spin_lock(&hba->device_lock); | 1744 | spin_lock(&hba->device_lock); |
@@ -4359,11 +4356,9 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4359 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 4356 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
4360 | goto out; | 4357 | goto out; |
4361 | } | 4358 | } |
4362 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4363 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4364 | 4359 | ||
4365 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | 4360 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ |
4366 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | 4361 | se_mem->se_page = alloc_pages(GFP_KERNEL, 0); |
4367 | if (!(se_mem->se_page)) { | 4362 | if (!(se_mem->se_page)) { |
4368 | printk(KERN_ERR "alloc_pages() failed\n"); | 4363 | printk(KERN_ERR "alloc_pages() failed\n"); |
4369 | goto out; | 4364 | goto out; |
@@ -4374,6 +4369,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4374 | printk(KERN_ERR "kmap_atomic() failed\n"); | 4369 | printk(KERN_ERR "kmap_atomic() failed\n"); |
4375 | goto out; | 4370 | goto out; |
4376 | } | 4371 | } |
4372 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4373 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4377 | memset(buf, 0, se_mem->se_len); | 4374 | memset(buf, 0, se_mem->se_len); |
4378 | kunmap_atomic(buf, KM_IRQ0); | 4375 | kunmap_atomic(buf, KM_IRQ0); |
4379 | 4376 | ||
@@ -4392,10 +4389,13 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4392 | 4389 | ||
4393 | return 0; | 4390 | return 0; |
4394 | out: | 4391 | out: |
4392 | if (se_mem) | ||
4393 | __free_pages(se_mem->se_page, 0); | ||
4394 | kmem_cache_free(se_mem_cache, se_mem); | ||
4395 | return -1; | 4395 | return -1; |
4396 | } | 4396 | } |
4397 | 4397 | ||
4398 | extern u32 transport_calc_sg_num( | 4398 | u32 transport_calc_sg_num( |
4399 | struct se_task *task, | 4399 | struct se_task *task, |
4400 | struct se_mem *in_se_mem, | 4400 | struct se_mem *in_se_mem, |
4401 | u32 task_offset) | 4401 | u32 task_offset) |
@@ -5834,31 +5834,26 @@ int transport_generic_do_tmr(struct se_cmd *cmd) | |||
5834 | int ret; | 5834 | int ret; |
5835 | 5835 | ||
5836 | switch (tmr->function) { | 5836 | switch (tmr->function) { |
5837 | case ABORT_TASK: | 5837 | case TMR_ABORT_TASK: |
5838 | ref_cmd = tmr->ref_cmd; | 5838 | ref_cmd = tmr->ref_cmd; |
5839 | tmr->response = TMR_FUNCTION_REJECTED; | 5839 | tmr->response = TMR_FUNCTION_REJECTED; |
5840 | break; | 5840 | break; |
5841 | case ABORT_TASK_SET: | 5841 | case TMR_ABORT_TASK_SET: |
5842 | case CLEAR_ACA: | 5842 | case TMR_CLEAR_ACA: |
5843 | case CLEAR_TASK_SET: | 5843 | case TMR_CLEAR_TASK_SET: |
5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | 5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
5845 | break; | 5845 | break; |
5846 | case LUN_RESET: | 5846 | case TMR_LUN_RESET: |
5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | 5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | 5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : |
5849 | TMR_FUNCTION_REJECTED; | 5849 | TMR_FUNCTION_REJECTED; |
5850 | break; | 5850 | break; |
5851 | #if 0 | 5851 | case TMR_TARGET_WARM_RESET: |
5852 | case TARGET_WARM_RESET: | ||
5853 | transport_generic_host_reset(dev->se_hba); | ||
5854 | tmr->response = TMR_FUNCTION_REJECTED; | 5852 | tmr->response = TMR_FUNCTION_REJECTED; |
5855 | break; | 5853 | break; |
5856 | case TARGET_COLD_RESET: | 5854 | case TMR_TARGET_COLD_RESET: |
5857 | transport_generic_host_reset(dev->se_hba); | ||
5858 | transport_generic_cold_reset(dev->se_hba); | ||
5859 | tmr->response = TMR_FUNCTION_REJECTED; | 5855 | tmr->response = TMR_FUNCTION_REJECTED; |
5860 | break; | 5856 | break; |
5861 | #endif | ||
5862 | default: | 5857 | default: |
5863 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | 5858 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", |
5864 | tmr->function); | 5859 | tmr->function); |
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h index 741ae7ed439..e6b9fd2eea3 100644 --- a/include/scsi/libiscsi_tcp.h +++ b/include/scsi/libiscsi_tcp.h | |||
@@ -47,6 +47,7 @@ struct iscsi_segment { | |||
47 | struct scatterlist *sg; | 47 | struct scatterlist *sg; |
48 | void *sg_mapped; | 48 | void *sg_mapped; |
49 | unsigned int sg_offset; | 49 | unsigned int sg_offset; |
50 | bool atomic_mapped; | ||
50 | 51 | ||
51 | iscsi_segment_done_fn_t *done; | 52 | iscsi_segment_done_fn_t *done; |
52 | }; | 53 | }; |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index f171c65dc5a..2d3ec509468 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -462,7 +462,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) | |||
462 | } | 462 | } |
463 | static inline int scsi_device_enclosure(struct scsi_device *sdev) | 463 | static inline int scsi_device_enclosure(struct scsi_device *sdev) |
464 | { | 464 | { |
465 | return sdev->inquiry[6] & (1<<6); | 465 | return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; |
466 | } | 466 | } |
467 | 467 | ||
468 | static inline int scsi_device_protection(struct scsi_device *sdev) | 468 | static inline int scsi_device_protection(struct scsi_device *sdev) |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0828b6c8610..c15ed5026fb 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | 11 | ||
12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | 12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" |
13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
14 | 14 | ||
15 | /* Used by transport_generic_allocate_iovecs() */ | 15 | /* Used by transport_generic_allocate_iovecs() */ |
@@ -239,7 +239,7 @@ struct t10_alua_lu_gp { | |||
239 | } ____cacheline_aligned; | 239 | } ____cacheline_aligned; |
240 | 240 | ||
241 | struct t10_alua_lu_gp_member { | 241 | struct t10_alua_lu_gp_member { |
242 | int lu_gp_assoc:1; | 242 | bool lu_gp_assoc; |
243 | atomic_t lu_gp_mem_ref_cnt; | 243 | atomic_t lu_gp_mem_ref_cnt; |
244 | spinlock_t lu_gp_mem_lock; | 244 | spinlock_t lu_gp_mem_lock; |
245 | struct t10_alua_lu_gp *lu_gp; | 245 | struct t10_alua_lu_gp *lu_gp; |
@@ -271,7 +271,7 @@ struct t10_alua_tg_pt_gp { | |||
271 | } ____cacheline_aligned; | 271 | } ____cacheline_aligned; |
272 | 272 | ||
273 | struct t10_alua_tg_pt_gp_member { | 273 | struct t10_alua_tg_pt_gp_member { |
274 | int tg_pt_gp_assoc:1; | 274 | bool tg_pt_gp_assoc; |
275 | atomic_t tg_pt_gp_mem_ref_cnt; | 275 | atomic_t tg_pt_gp_mem_ref_cnt; |
276 | spinlock_t tg_pt_gp_mem_lock; | 276 | spinlock_t tg_pt_gp_mem_lock; |
277 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 277 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
@@ -336,7 +336,7 @@ struct t10_pr_registration { | |||
336 | int pr_res_type; | 336 | int pr_res_type; |
337 | int pr_res_scope; | 337 | int pr_res_scope; |
338 | /* Used for fabric initiator WWPNs using a ISID */ | 338 | /* Used for fabric initiator WWPNs using a ISID */ |
339 | int isid_present_at_reg:1; | 339 | bool isid_present_at_reg; |
340 | u32 pr_res_mapped_lun; | 340 | u32 pr_res_mapped_lun; |
341 | u32 pr_aptpl_target_lun; | 341 | u32 pr_aptpl_target_lun; |
342 | u32 pr_res_generation; | 342 | u32 pr_res_generation; |
@@ -418,7 +418,7 @@ struct se_transport_task { | |||
418 | unsigned long long t_task_lba; | 418 | unsigned long long t_task_lba; |
419 | int t_tasks_failed; | 419 | int t_tasks_failed; |
420 | int t_tasks_fua; | 420 | int t_tasks_fua; |
421 | int t_tasks_bidi:1; | 421 | bool t_tasks_bidi; |
422 | u32 t_task_cdbs; | 422 | u32 t_task_cdbs; |
423 | u32 t_tasks_check; | 423 | u32 t_tasks_check; |
424 | u32 t_tasks_no; | 424 | u32 t_tasks_no; |
@@ -470,7 +470,7 @@ struct se_task { | |||
470 | u8 task_flags; | 470 | u8 task_flags; |
471 | int task_error_status; | 471 | int task_error_status; |
472 | int task_state_flags; | 472 | int task_state_flags; |
473 | int task_padded_sg:1; | 473 | bool task_padded_sg; |
474 | unsigned long long task_lba; | 474 | unsigned long long task_lba; |
475 | u32 task_no; | 475 | u32 task_no; |
476 | u32 task_sectors; | 476 | u32 task_sectors; |
@@ -494,8 +494,8 @@ struct se_task { | |||
494 | struct list_head t_state_list; | 494 | struct list_head t_state_list; |
495 | } ____cacheline_aligned; | 495 | } ____cacheline_aligned; |
496 | 496 | ||
497 | #define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) | 497 | #define TASK_CMD(task) ((task)->task_se_cmd) |
498 | #define TASK_DEV(task) ((struct se_device *)task->se_dev) | 498 | #define TASK_DEV(task) ((task)->se_dev) |
499 | 499 | ||
500 | struct se_cmd { | 500 | struct se_cmd { |
501 | /* SAM response code being sent to initiator */ | 501 | /* SAM response code being sent to initiator */ |
@@ -551,8 +551,8 @@ struct se_cmd { | |||
551 | void (*transport_complete_callback)(struct se_cmd *); | 551 | void (*transport_complete_callback)(struct se_cmd *); |
552 | } ____cacheline_aligned; | 552 | } ____cacheline_aligned; |
553 | 553 | ||
554 | #define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) | 554 | #define T_TASK(cmd) ((cmd)->t_task) |
555 | #define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) | 555 | #define CMD_TFO(cmd) ((cmd)->se_tfo) |
556 | 556 | ||
557 | struct se_tmr_req { | 557 | struct se_tmr_req { |
558 | /* Task Management function to be preformed */ | 558 | /* Task Management function to be preformed */ |
@@ -583,7 +583,7 @@ struct se_ua { | |||
583 | struct se_node_acl { | 583 | struct se_node_acl { |
584 | char initiatorname[TRANSPORT_IQN_LEN]; | 584 | char initiatorname[TRANSPORT_IQN_LEN]; |
585 | /* Used to signal demo mode created ACL, disabled by default */ | 585 | /* Used to signal demo mode created ACL, disabled by default */ |
586 | int dynamic_node_acl:1; | 586 | bool dynamic_node_acl; |
587 | u32 queue_depth; | 587 | u32 queue_depth; |
588 | u32 acl_index; | 588 | u32 acl_index; |
589 | u64 num_cmds; | 589 | u64 num_cmds; |
@@ -601,7 +601,8 @@ struct se_node_acl { | |||
601 | struct config_group acl_attrib_group; | 601 | struct config_group acl_attrib_group; |
602 | struct config_group acl_auth_group; | 602 | struct config_group acl_auth_group; |
603 | struct config_group acl_param_group; | 603 | struct config_group acl_param_group; |
604 | struct config_group *acl_default_groups[4]; | 604 | struct config_group acl_fabric_stat_group; |
605 | struct config_group *acl_default_groups[5]; | ||
605 | struct list_head acl_list; | 606 | struct list_head acl_list; |
606 | struct list_head acl_sess_list; | 607 | struct list_head acl_sess_list; |
607 | } ____cacheline_aligned; | 608 | } ____cacheline_aligned; |
@@ -615,13 +616,19 @@ struct se_session { | |||
615 | struct list_head sess_acl_list; | 616 | struct list_head sess_acl_list; |
616 | } ____cacheline_aligned; | 617 | } ____cacheline_aligned; |
617 | 618 | ||
618 | #define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) | 619 | #define SE_SESS(cmd) ((cmd)->se_sess) |
619 | #define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) | 620 | #define SE_NODE_ACL(sess) ((sess)->se_node_acl) |
620 | 621 | ||
621 | struct se_device; | 622 | struct se_device; |
622 | struct se_transform_info; | 623 | struct se_transform_info; |
623 | struct scatterlist; | 624 | struct scatterlist; |
624 | 625 | ||
626 | struct se_ml_stat_grps { | ||
627 | struct config_group stat_group; | ||
628 | struct config_group scsi_auth_intr_group; | ||
629 | struct config_group scsi_att_intr_port_group; | ||
630 | }; | ||
631 | |||
625 | struct se_lun_acl { | 632 | struct se_lun_acl { |
626 | char initiatorname[TRANSPORT_IQN_LEN]; | 633 | char initiatorname[TRANSPORT_IQN_LEN]; |
627 | u32 mapped_lun; | 634 | u32 mapped_lun; |
@@ -629,10 +636,13 @@ struct se_lun_acl { | |||
629 | struct se_lun *se_lun; | 636 | struct se_lun *se_lun; |
630 | struct list_head lacl_list; | 637 | struct list_head lacl_list; |
631 | struct config_group se_lun_group; | 638 | struct config_group se_lun_group; |
639 | struct se_ml_stat_grps ml_stat_grps; | ||
632 | } ____cacheline_aligned; | 640 | } ____cacheline_aligned; |
633 | 641 | ||
642 | #define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) | ||
643 | |||
634 | struct se_dev_entry { | 644 | struct se_dev_entry { |
635 | int def_pr_registered:1; | 645 | bool def_pr_registered; |
636 | /* See transport_lunflags_table */ | 646 | /* See transport_lunflags_table */ |
637 | u32 lun_flags; | 647 | u32 lun_flags; |
638 | u32 deve_cmds; | 648 | u32 deve_cmds; |
@@ -693,6 +703,13 @@ struct se_dev_attrib { | |||
693 | struct config_group da_group; | 703 | struct config_group da_group; |
694 | } ____cacheline_aligned; | 704 | } ____cacheline_aligned; |
695 | 705 | ||
706 | struct se_dev_stat_grps { | ||
707 | struct config_group stat_group; | ||
708 | struct config_group scsi_dev_group; | ||
709 | struct config_group scsi_tgt_dev_group; | ||
710 | struct config_group scsi_lu_group; | ||
711 | }; | ||
712 | |||
696 | struct se_subsystem_dev { | 713 | struct se_subsystem_dev { |
697 | /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ | 714 | /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ |
698 | #define SE_DEV_ALIAS_LEN 512 | 715 | #define SE_DEV_ALIAS_LEN 512 |
@@ -716,11 +733,14 @@ struct se_subsystem_dev { | |||
716 | struct config_group se_dev_group; | 733 | struct config_group se_dev_group; |
717 | /* For T10 Reservations */ | 734 | /* For T10 Reservations */ |
718 | struct config_group se_dev_pr_group; | 735 | struct config_group se_dev_pr_group; |
736 | /* For target_core_stat.c groups */ | ||
737 | struct se_dev_stat_grps dev_stat_grps; | ||
719 | } ____cacheline_aligned; | 738 | } ____cacheline_aligned; |
720 | 739 | ||
721 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) | 740 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) |
722 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) | 741 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) |
723 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) | 742 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) |
743 | #define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) | ||
724 | 744 | ||
725 | struct se_device { | 745 | struct se_device { |
726 | /* Set to 1 if thread is NOT sleeping on thread_sem */ | 746 | /* Set to 1 if thread is NOT sleeping on thread_sem */ |
@@ -803,8 +823,8 @@ struct se_device { | |||
803 | struct list_head g_se_dev_list; | 823 | struct list_head g_se_dev_list; |
804 | } ____cacheline_aligned; | 824 | } ____cacheline_aligned; |
805 | 825 | ||
806 | #define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) | 826 | #define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) |
807 | #define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) | 827 | #define SU_DEV(dev) ((dev)->se_sub_dev) |
808 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) | 828 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) |
809 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) | 829 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) |
810 | 830 | ||
@@ -832,7 +852,14 @@ struct se_hba { | |||
832 | struct se_subsystem_api *transport; | 852 | struct se_subsystem_api *transport; |
833 | } ____cacheline_aligned; | 853 | } ____cacheline_aligned; |
834 | 854 | ||
835 | #define SE_HBA(d) ((struct se_hba *)(d)->se_hba) | 855 | #define SE_HBA(dev) ((dev)->se_hba) |
856 | |||
857 | struct se_port_stat_grps { | ||
858 | struct config_group stat_group; | ||
859 | struct config_group scsi_port_group; | ||
860 | struct config_group scsi_tgt_port_group; | ||
861 | struct config_group scsi_transport_group; | ||
862 | }; | ||
836 | 863 | ||
837 | struct se_lun { | 864 | struct se_lun { |
838 | /* See transport_lun_status_table */ | 865 | /* See transport_lun_status_table */ |
@@ -848,11 +875,13 @@ struct se_lun { | |||
848 | struct list_head lun_cmd_list; | 875 | struct list_head lun_cmd_list; |
849 | struct list_head lun_acl_list; | 876 | struct list_head lun_acl_list; |
850 | struct se_device *lun_se_dev; | 877 | struct se_device *lun_se_dev; |
878 | struct se_port *lun_sep; | ||
851 | struct config_group lun_group; | 879 | struct config_group lun_group; |
852 | struct se_port *lun_sep; | 880 | struct se_port_stat_grps port_stat_grps; |
853 | } ____cacheline_aligned; | 881 | } ____cacheline_aligned; |
854 | 882 | ||
855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | 883 | #define SE_LUN(cmd) ((cmd)->se_lun) |
884 | #define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) | ||
856 | 885 | ||
857 | struct scsi_port_stats { | 886 | struct scsi_port_stats { |
858 | u64 cmd_pdus; | 887 | u64 cmd_pdus; |
@@ -919,11 +948,13 @@ struct se_portal_group { | |||
919 | struct config_group tpg_param_group; | 948 | struct config_group tpg_param_group; |
920 | } ____cacheline_aligned; | 949 | } ____cacheline_aligned; |
921 | 950 | ||
922 | #define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) | 951 | #define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) |
923 | 952 | ||
924 | struct se_wwn { | 953 | struct se_wwn { |
925 | struct target_fabric_configfs *wwn_tf; | 954 | struct target_fabric_configfs *wwn_tf; |
926 | struct config_group wwn_group; | 955 | struct config_group wwn_group; |
956 | struct config_group *wwn_default_groups[2]; | ||
957 | struct config_group fabric_stat_group; | ||
927 | } ____cacheline_aligned; | 958 | } ____cacheline_aligned; |
928 | 959 | ||
929 | struct se_global { | 960 | struct se_global { |
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index 40e6e740527..612509592ff 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h | |||
@@ -14,10 +14,12 @@ extern void target_fabric_configfs_deregister(struct target_fabric_configfs *); | |||
14 | struct target_fabric_configfs_template { | 14 | struct target_fabric_configfs_template { |
15 | struct config_item_type tfc_discovery_cit; | 15 | struct config_item_type tfc_discovery_cit; |
16 | struct config_item_type tfc_wwn_cit; | 16 | struct config_item_type tfc_wwn_cit; |
17 | struct config_item_type tfc_wwn_fabric_stats_cit; | ||
17 | struct config_item_type tfc_tpg_cit; | 18 | struct config_item_type tfc_tpg_cit; |
18 | struct config_item_type tfc_tpg_base_cit; | 19 | struct config_item_type tfc_tpg_base_cit; |
19 | struct config_item_type tfc_tpg_lun_cit; | 20 | struct config_item_type tfc_tpg_lun_cit; |
20 | struct config_item_type tfc_tpg_port_cit; | 21 | struct config_item_type tfc_tpg_port_cit; |
22 | struct config_item_type tfc_tpg_port_stat_cit; | ||
21 | struct config_item_type tfc_tpg_np_cit; | 23 | struct config_item_type tfc_tpg_np_cit; |
22 | struct config_item_type tfc_tpg_np_base_cit; | 24 | struct config_item_type tfc_tpg_np_base_cit; |
23 | struct config_item_type tfc_tpg_attrib_cit; | 25 | struct config_item_type tfc_tpg_attrib_cit; |
@@ -27,7 +29,9 @@ struct target_fabric_configfs_template { | |||
27 | struct config_item_type tfc_tpg_nacl_attrib_cit; | 29 | struct config_item_type tfc_tpg_nacl_attrib_cit; |
28 | struct config_item_type tfc_tpg_nacl_auth_cit; | 30 | struct config_item_type tfc_tpg_nacl_auth_cit; |
29 | struct config_item_type tfc_tpg_nacl_param_cit; | 31 | struct config_item_type tfc_tpg_nacl_param_cit; |
32 | struct config_item_type tfc_tpg_nacl_stat_cit; | ||
30 | struct config_item_type tfc_tpg_mappedlun_cit; | 33 | struct config_item_type tfc_tpg_mappedlun_cit; |
34 | struct config_item_type tfc_tpg_mappedlun_stat_cit; | ||
31 | }; | 35 | }; |
32 | 36 | ||
33 | struct target_fabric_configfs { | 37 | struct target_fabric_configfs { |
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index f3ac12b019c..5eb8b1ae59d 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h | |||
@@ -8,7 +8,7 @@ struct target_core_fabric_ops { | |||
8 | * for scatterlist chaining using transport_do_task_sg_link(), | 8 | * for scatterlist chaining using transport_do_task_sg_link(), |
9 | * disabled by default | 9 | * disabled by default |
10 | */ | 10 | */ |
11 | int task_sg_chaining:1; | 11 | bool task_sg_chaining; |
12 | char *(*get_fabric_name)(void); | 12 | char *(*get_fabric_name)(void); |
13 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | 13 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); |
14 | char *(*tpg_get_wwn)(struct se_portal_group *); | 14 | char *(*tpg_get_wwn)(struct se_portal_group *); |
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h index 6c8248bc2c6..bd559680747 100644 --- a/include/target/target_core_tmr.h +++ b/include/target/target_core_tmr.h | |||
@@ -1,37 +1,29 @@ | |||
1 | #ifndef TARGET_CORE_TMR_H | 1 | #ifndef TARGET_CORE_TMR_H |
2 | #define TARGET_CORE_TMR_H | 2 | #define TARGET_CORE_TMR_H |
3 | 3 | ||
4 | /* task management function values */ | 4 | /* fabric independent task management function values */ |
5 | #ifdef ABORT_TASK | 5 | enum tcm_tmreq_table { |
6 | #undef ABORT_TASK | 6 | TMR_ABORT_TASK = 1, |
7 | #endif /* ABORT_TASK */ | 7 | TMR_ABORT_TASK_SET = 2, |
8 | #define ABORT_TASK 1 | 8 | TMR_CLEAR_ACA = 3, |
9 | #ifdef ABORT_TASK_SET | 9 | TMR_CLEAR_TASK_SET = 4, |
10 | #undef ABORT_TASK_SET | 10 | TMR_LUN_RESET = 5, |
11 | #endif /* ABORT_TASK_SET */ | 11 | TMR_TARGET_WARM_RESET = 6, |
12 | #define ABORT_TASK_SET 2 | 12 | TMR_TARGET_COLD_RESET = 7, |
13 | #ifdef CLEAR_ACA | 13 | TMR_FABRIC_TMR = 255, |
14 | #undef CLEAR_ACA | 14 | }; |
15 | #endif /* CLEAR_ACA */ | ||
16 | #define CLEAR_ACA 3 | ||
17 | #ifdef CLEAR_TASK_SET | ||
18 | #undef CLEAR_TASK_SET | ||
19 | #endif /* CLEAR_TASK_SET */ | ||
20 | #define CLEAR_TASK_SET 4 | ||
21 | #define LUN_RESET 5 | ||
22 | #define TARGET_WARM_RESET 6 | ||
23 | #define TARGET_COLD_RESET 7 | ||
24 | #define TASK_REASSIGN 8 | ||
25 | 15 | ||
26 | /* task management response values */ | 16 | /* fabric independent task management response values */ |
27 | #define TMR_FUNCTION_COMPLETE 0 | 17 | enum tcm_tmrsp_table { |
28 | #define TMR_TASK_DOES_NOT_EXIST 1 | 18 | TMR_FUNCTION_COMPLETE = 0, |
29 | #define TMR_LUN_DOES_NOT_EXIST 2 | 19 | TMR_TASK_DOES_NOT_EXIST = 1, |
30 | #define TMR_TASK_STILL_ALLEGIANT 3 | 20 | TMR_LUN_DOES_NOT_EXIST = 2, |
31 | #define TMR_TASK_FAILOVER_NOT_SUPPORTED 4 | 21 | TMR_TASK_STILL_ALLEGIANT = 3, |
32 | #define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5 | 22 | TMR_TASK_FAILOVER_NOT_SUPPORTED = 4, |
33 | #define TMR_FUNCTION_AUTHORIZATION_FAILED 6 | 23 | TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5, |
34 | #define TMR_FUNCTION_REJECTED 255 | 24 | TMR_FUNCTION_AUTHORIZATION_FAILED = 6, |
25 | TMR_FUNCTION_REJECTED = 255, | ||
26 | }; | ||
35 | 27 | ||
36 | extern struct kmem_cache *se_tmr_req_cache; | 28 | extern struct kmem_cache *se_tmr_req_cache; |
37 | 29 | ||
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 2e8ec51f061..59aa464f6ee 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -109,6 +109,8 @@ | |||
109 | struct se_mem; | 109 | struct se_mem; |
110 | struct se_subsystem_api; | 110 | struct se_subsystem_api; |
111 | 111 | ||
112 | extern struct kmem_cache *se_mem_cache; | ||
113 | |||
112 | extern int init_se_global(void); | 114 | extern int init_se_global(void); |
113 | extern void release_se_global(void); | 115 | extern void release_se_global(void); |
114 | extern void init_scsi_index_table(void); | 116 | extern void init_scsi_index_table(void); |
@@ -190,6 +192,8 @@ extern void transport_generic_process_write(struct se_cmd *); | |||
190 | extern int transport_generic_do_tmr(struct se_cmd *); | 192 | extern int transport_generic_do_tmr(struct se_cmd *); |
191 | /* From target_core_alua.c */ | 193 | /* From target_core_alua.c */ |
192 | extern int core_alua_check_nonop_delay(struct se_cmd *); | 194 | extern int core_alua_check_nonop_delay(struct se_cmd *); |
195 | /* From target_core_cdb.c */ | ||
196 | extern int transport_emulate_control_cdb(struct se_task *); | ||
193 | 197 | ||
194 | /* | 198 | /* |
195 | * Each se_transport_task_t can have N number of possible struct se_task's | 199 | * Each se_transport_task_t can have N number of possible struct se_task's |