diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-02-16 12:07:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-02-16 12:07:00 -0500 |
commit | a2640111d5edb3f4e6dd6089c0dbddc7590110b4 (patch) | |
tree | 2c7ddab035a844b77e03567035f25d8cfaadad6a | |
parent | 0d6e82e7e0b6a192ec9c875d9ed08ad9e43c7c2f (diff) | |
parent | 1621dbbdb90f42b7bd14aea1c44ee49b558d1b1a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] qla2xxx: Return DID_NO_CONNECT when FC device is lost.
[SCSI] mptfusion: Bump version 03.04.18
[SCSI] mptfusion: Fix Incorrect return value in mptscsih_dev_reset
[SCSI] mptfusion: mptctl_release is required in mptctl.c
[SCSI] target: fix use after free detected by SLUB poison
[SCSI] target: Remove procfs based target_core_mib.c code
[SCSI] target: Fix SCF_SCSI_CONTROL_SG_IO_CDB breakage
[SCSI] target: Fix top-level configfs_subsystem default_group shutdown breakage
[SCSI] target: fixed missing lock drop in error path
[SCSI] target: Fix demo-mode MappedLUN shutdown UA/PR breakage
[SCSI] target/iblock: Fix failed bd claim NULL pointer dereference
[SCSI] target: iblock/pscsi claim checking for NULL instead of IS_ERR
[SCSI] scsi_debug: Fix 32-bit overflow in do_device_access causing memory corruption
[SCSI] qla2xxx: Change from irq to irqsave with host_lock
[SCSI] qla2xxx: Fix race that could hang kthread_stop()
-rw-r--r-- | drivers/message/fusion/mptbase.h | 4 | ||||
-rw-r--r-- | drivers/message/fusion/mptctl.c | 8 | ||||
-rw-r--r-- | drivers/message/fusion/mptscsih.c | 7 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_attr.c | 5 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 10 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 10 | ||||
-rw-r--r-- | drivers/scsi/scsi_debug.c | 2 | ||||
-rw-r--r-- | drivers/target/Makefile | 3 | ||||
-rw-r--r-- | drivers/target/target_core_configfs.c | 155 | ||||
-rw-r--r-- | drivers/target/target_core_device.c | 13 | ||||
-rw-r--r-- | drivers/target/target_core_fabric_configfs.c | 92 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 8 | ||||
-rw-r--r-- | drivers/target/target_core_mib.c | 1078 | ||||
-rw-r--r-- | drivers/target/target_core_mib.h | 28 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 4 | ||||
-rw-r--r-- | drivers/target/target_core_tpg.c | 29 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 48 | ||||
-rw-r--r-- | include/target/target_core_base.h | 28 | ||||
-rw-r--r-- | include/target/target_core_transport.h | 2 |
19 files changed, 278 insertions, 1256 deletions
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index f71f22948477..1735c84ff757 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -76,8 +76,8 @@ | |||
76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR | 76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #define MPT_LINUX_VERSION_COMMON "3.04.17" | 79 | #define MPT_LINUX_VERSION_COMMON "3.04.18" |
80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" | 80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" |
81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" | 81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" |
82 | 82 | ||
83 | #define show_mptmod_ver(s,ver) \ | 83 | #define show_mptmod_ver(s,ver) \ |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90aef..e8deb8ed0499 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) | |||
597 | } | 597 | } |
598 | 598 | ||
599 | static int | 599 | static int |
600 | mptctl_release(struct inode *inode, struct file *filep) | ||
601 | { | ||
602 | fasync_helper(-1, filep, 0, &async_queue); | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static int | ||
600 | mptctl_fasync(int fd, struct file *filep, int mode) | 607 | mptctl_fasync(int fd, struct file *filep, int mode) |
601 | { | 608 | { |
602 | MPT_ADAPTER *ioc; | 609 | MPT_ADAPTER *ioc; |
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = { | |||
2815 | .llseek = no_llseek, | 2822 | .llseek = no_llseek, |
2816 | .fasync = mptctl_fasync, | 2823 | .fasync = mptctl_fasync, |
2817 | .unlocked_ioctl = mptctl_ioctl, | 2824 | .unlocked_ioctl = mptctl_ioctl, |
2825 | .release = mptctl_release, | ||
2818 | #ifdef CONFIG_COMPAT | 2826 | #ifdef CONFIG_COMPAT |
2819 | .compat_ioctl = compat_mpctl_ioctl, | 2827 | .compat_ioctl = compat_mpctl_ioctl, |
2820 | #endif | 2828 | #endif |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 59b8f53d1ece..0d9b82a44540 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | |||
1873 | } | 1873 | } |
1874 | 1874 | ||
1875 | out: | 1875 | out: |
1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", | 1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", |
1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); | 1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, |
1878 | SCpnt, SCpnt->serial_number); | ||
1878 | 1879 | ||
1879 | return retval; | 1880 | return retval; |
1880 | } | 1881 | } |
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) | |||
1911 | 1912 | ||
1912 | vdevice = SCpnt->device->hostdata; | 1913 | vdevice = SCpnt->device->hostdata; |
1913 | if (!vdevice || !vdevice->vtarget) { | 1914 | if (!vdevice || !vdevice->vtarget) { |
1914 | retval = SUCCESS; | 1915 | retval = 0; |
1915 | goto out; | 1916 | goto out; |
1916 | } | 1917 | } |
1917 | 1918 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 44578b56ad0a..d3e58d763b43 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1561 | { | 1561 | { |
1562 | struct Scsi_Host *host = rport_to_shost(rport); | 1562 | struct Scsi_Host *host = rport_to_shost(rport); |
1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1564 | unsigned long flags; | ||
1564 | 1565 | ||
1565 | if (!fcport) | 1566 | if (!fcport) |
1566 | return; | 1567 | return; |
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1573 | * Transport has effectively 'deleted' the rport, clear | 1574 | * Transport has effectively 'deleted' the rport, clear |
1574 | * all local references. | 1575 | * all local references. |
1575 | */ | 1576 | */ |
1576 | spin_lock_irq(host->host_lock); | 1577 | spin_lock_irqsave(host->host_lock, flags); |
1577 | fcport->rport = fcport->drport = NULL; | 1578 | fcport->rport = fcport->drport = NULL; |
1578 | *((fc_port_t **)rport->dd_data) = NULL; | 1579 | *((fc_port_t **)rport->dd_data) = NULL; |
1579 | spin_unlock_irq(host->host_lock); | 1580 | spin_unlock_irqrestore(host->host_lock, flags); |
1580 | 1581 | ||
1581 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) | 1582 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) |
1582 | return; | 1583 | return; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73aec..d9479c3fe5f8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data) | |||
2505 | { | 2505 | { |
2506 | fc_port_t *fcport = data; | 2506 | fc_port_t *fcport = data; |
2507 | struct fc_rport *rport; | 2507 | struct fc_rport *rport; |
2508 | unsigned long flags; | ||
2508 | 2509 | ||
2509 | spin_lock_irq(fcport->vha->host->host_lock); | 2510 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2510 | rport = fcport->drport ? fcport->drport: fcport->rport; | 2511 | rport = fcport->drport ? fcport->drport: fcport->rport; |
2511 | fcport->drport = NULL; | 2512 | fcport->drport = NULL; |
2512 | spin_unlock_irq(fcport->vha->host->host_lock); | 2513 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2513 | if (rport) | 2514 | if (rport) |
2514 | fc_remote_port_delete(rport); | 2515 | fc_remote_port_delete(rport); |
2515 | } | 2516 | } |
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2879 | struct fc_rport_identifiers rport_ids; | 2880 | struct fc_rport_identifiers rport_ids; |
2880 | struct fc_rport *rport; | 2881 | struct fc_rport *rport; |
2881 | struct qla_hw_data *ha = vha->hw; | 2882 | struct qla_hw_data *ha = vha->hw; |
2883 | unsigned long flags; | ||
2882 | 2884 | ||
2883 | qla2x00_rport_del(fcport); | 2885 | qla2x00_rport_del(fcport); |
2884 | 2886 | ||
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2893 | "Unable to allocate fc remote port!\n"); | 2895 | "Unable to allocate fc remote port!\n"); |
2894 | return; | 2896 | return; |
2895 | } | 2897 | } |
2896 | spin_lock_irq(fcport->vha->host->host_lock); | 2898 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2897 | *((fc_port_t **)rport->dd_data) = fcport; | 2899 | *((fc_port_t **)rport->dd_data) = fcport; |
2898 | spin_unlock_irq(fcport->vha->host->host_lock); | 2900 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2899 | 2901 | ||
2900 | rport->supported_classes = fcport->supported_classes; | 2902 | rport->supported_classes = fcport->supported_classes; |
2901 | 2903 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..f27724d76cf6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) | |||
562 | } | 562 | } |
563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
565 | atomic_read(&fcport->state) == FCS_DEVICE_LOST || | ||
566 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | 565 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { |
567 | cmd->result = DID_NO_CONNECT << 16; | 566 | cmd->result = DID_NO_CONNECT << 16; |
568 | goto qc24_fail_command; | 567 | goto qc24_fail_command; |
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2513 | { | 2512 | { |
2514 | struct fc_rport *rport; | 2513 | struct fc_rport *rport; |
2515 | scsi_qla_host_t *base_vha; | 2514 | scsi_qla_host_t *base_vha; |
2515 | unsigned long flags; | ||
2516 | 2516 | ||
2517 | if (!fcport->rport) | 2517 | if (!fcport->rport) |
2518 | return; | 2518 | return; |
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2520 | rport = fcport->rport; | 2520 | rport = fcport->rport; |
2521 | if (defer) { | 2521 | if (defer) { |
2522 | base_vha = pci_get_drvdata(vha->hw->pdev); | 2522 | base_vha = pci_get_drvdata(vha->hw->pdev); |
2523 | spin_lock_irq(vha->host->host_lock); | 2523 | spin_lock_irqsave(vha->host->host_lock, flags); |
2524 | fcport->drport = rport; | 2524 | fcport->drport = rport; |
2525 | spin_unlock_irq(vha->host->host_lock); | 2525 | spin_unlock_irqrestore(vha->host->host_lock, flags); |
2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); | 2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
2527 | qla2xxx_wake_dpc(base_vha); | 2527 | qla2xxx_wake_dpc(base_vha); |
2528 | } else | 2528 | } else |
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data) | |||
3282 | 3282 | ||
3283 | set_user_nice(current, -20); | 3283 | set_user_nice(current, -20); |
3284 | 3284 | ||
3285 | set_current_state(TASK_INTERRUPTIBLE); | ||
3285 | while (!kthread_should_stop()) { | 3286 | while (!kthread_should_stop()) { |
3286 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); | 3287 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); |
3287 | 3288 | ||
3288 | set_current_state(TASK_INTERRUPTIBLE); | ||
3289 | schedule(); | 3289 | schedule(); |
3290 | __set_current_state(TASK_RUNNING); | 3290 | __set_current_state(TASK_RUNNING); |
3291 | 3291 | ||
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data) | |||
3454 | qla2x00_do_dpc_all_vps(base_vha); | 3454 | qla2x00_do_dpc_all_vps(base_vha); |
3455 | 3455 | ||
3456 | ha->dpc_active = 0; | 3456 | ha->dpc_active = 0; |
3457 | set_current_state(TASK_INTERRUPTIBLE); | ||
3457 | } /* End of while(1) */ | 3458 | } /* End of while(1) */ |
3459 | __set_current_state(TASK_RUNNING); | ||
3458 | 3460 | ||
3459 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); | 3461 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); |
3460 | 3462 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efed..a6b2d72022fc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd, | |||
1671 | unsigned long long lba, unsigned int num, int write) | 1671 | unsigned long long lba, unsigned int num, int write) |
1672 | { | 1672 | { |
1673 | int ret; | 1673 | int ret; |
1674 | unsigned int block, rest = 0; | 1674 | unsigned long long block, rest = 0; |
1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); | 1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); |
1676 | 1676 | ||
1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; | 1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 5cfd70819f08..973bb190ef57 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \ | |||
13 | target_core_transport.o \ | 13 | target_core_transport.o \ |
14 | target_core_cdb.o \ | 14 | target_core_cdb.o \ |
15 | target_core_ua.o \ | 15 | target_core_ua.o \ |
16 | target_core_rd.o \ | 16 | target_core_rd.o |
17 | target_core_mib.o | ||
18 | 17 | ||
19 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | 18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o |
20 | 19 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2764510798b0..caf8dc18ee0a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/parser.h> | 37 | #include <linux/parser.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/proc_fs.h> | ||
41 | 40 | ||
42 | #include <target/target_core_base.h> | 41 | #include <target/target_core_base.h> |
43 | #include <target/target_core_device.h> | 42 | #include <target/target_core_device.h> |
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item) | |||
1971 | { | 1970 | { |
1972 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | 1971 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), |
1973 | struct se_subsystem_dev, se_dev_group); | 1972 | struct se_subsystem_dev, se_dev_group); |
1974 | struct config_group *dev_cg; | 1973 | struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
1975 | 1974 | struct se_subsystem_api *t = hba->transport; | |
1976 | if (!(se_dev)) | 1975 | struct config_group *dev_cg = &se_dev->se_dev_group; |
1977 | return; | ||
1978 | 1976 | ||
1979 | dev_cg = &se_dev->se_dev_group; | ||
1980 | kfree(dev_cg->default_groups); | 1977 | kfree(dev_cg->default_groups); |
1978 | /* | ||
1979 | * This pointer will set when the storage is enabled with: | ||
1980 | *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | ||
1981 | */ | ||
1982 | if (se_dev->se_dev_ptr) { | ||
1983 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
1984 | "virtual_device() for se_dev_ptr: %p\n", | ||
1985 | se_dev->se_dev_ptr); | ||
1986 | |||
1987 | se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
1988 | } else { | ||
1989 | /* | ||
1990 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
1991 | */ | ||
1992 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
1993 | "device() for se_dev_su_ptr: %p\n", | ||
1994 | se_dev->se_dev_su_ptr); | ||
1995 | |||
1996 | t->free_device(se_dev->se_dev_su_ptr); | ||
1997 | } | ||
1998 | |||
1999 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2000 | "_dev_t: %p\n", se_dev); | ||
2001 | kfree(se_dev); | ||
1981 | } | 2002 | } |
1982 | 2003 | ||
1983 | static ssize_t target_core_dev_show(struct config_item *item, | 2004 | static ssize_t target_core_dev_show(struct config_item *item, |
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { | |||
2140 | NULL, | 2161 | NULL, |
2141 | }; | 2162 | }; |
2142 | 2163 | ||
2164 | static void target_core_alua_lu_gp_release(struct config_item *item) | ||
2165 | { | ||
2166 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | ||
2167 | struct t10_alua_lu_gp, lu_gp_group); | ||
2168 | |||
2169 | core_alua_free_lu_gp(lu_gp); | ||
2170 | } | ||
2171 | |||
2143 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { | 2172 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { |
2173 | .release = target_core_alua_lu_gp_release, | ||
2144 | .show_attribute = target_core_alua_lu_gp_attr_show, | 2174 | .show_attribute = target_core_alua_lu_gp_attr_show, |
2145 | .store_attribute = target_core_alua_lu_gp_attr_store, | 2175 | .store_attribute = target_core_alua_lu_gp_attr_store, |
2146 | }; | 2176 | }; |
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp( | |||
2191 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" | 2221 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" |
2192 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | 2222 | " Group: core/alua/lu_gps/%s, ID: %hu\n", |
2193 | config_item_name(item), lu_gp->lu_gp_id); | 2223 | config_item_name(item), lu_gp->lu_gp_id); |
2194 | 2224 | /* | |
2225 | * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() | ||
2226 | * -> target_core_alua_lu_gp_release() | ||
2227 | */ | ||
2195 | config_item_put(item); | 2228 | config_item_put(item); |
2196 | core_alua_free_lu_gp(lu_gp); | ||
2197 | } | 2229 | } |
2198 | 2230 | ||
2199 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { | 2231 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { |
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { | |||
2549 | NULL, | 2581 | NULL, |
2550 | }; | 2582 | }; |
2551 | 2583 | ||
2584 | static void target_core_alua_tg_pt_gp_release(struct config_item *item) | ||
2585 | { | ||
2586 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | ||
2587 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | ||
2588 | |||
2589 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2590 | } | ||
2591 | |||
2552 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { | 2592 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { |
2593 | .release = target_core_alua_tg_pt_gp_release, | ||
2553 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, | 2594 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, |
2554 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, | 2595 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, |
2555 | }; | 2596 | }; |
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp( | |||
2602 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" | 2643 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" |
2603 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | 2644 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", |
2604 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | 2645 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); |
2605 | 2646 | /* | |
2647 | * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() | ||
2648 | * -> target_core_alua_tg_pt_gp_release(). | ||
2649 | */ | ||
2606 | config_item_put(item); | 2650 | config_item_put(item); |
2607 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2608 | } | 2651 | } |
2609 | 2652 | ||
2610 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | 2653 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { |
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev( | |||
2771 | struct se_subsystem_api *t; | 2814 | struct se_subsystem_api *t; |
2772 | struct config_item *df_item; | 2815 | struct config_item *df_item; |
2773 | struct config_group *dev_cg, *tg_pt_gp_cg; | 2816 | struct config_group *dev_cg, *tg_pt_gp_cg; |
2774 | int i, ret; | 2817 | int i; |
2775 | 2818 | ||
2776 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2819 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
2777 | 2820 | ||
2778 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | 2821 | mutex_lock(&hba->hba_access_mutex); |
2779 | goto out; | ||
2780 | |||
2781 | t = hba->transport; | 2822 | t = hba->transport; |
2782 | 2823 | ||
2783 | spin_lock(&se_global->g_device_lock); | 2824 | spin_lock(&se_global->g_device_lock); |
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev( | |||
2791 | config_item_put(df_item); | 2832 | config_item_put(df_item); |
2792 | } | 2833 | } |
2793 | kfree(tg_pt_gp_cg->default_groups); | 2834 | kfree(tg_pt_gp_cg->default_groups); |
2794 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2835 | /* |
2836 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | ||
2837 | * directly from target_core_alua_tg_pt_gp_release(). | ||
2838 | */ | ||
2795 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2839 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; |
2796 | 2840 | ||
2797 | dev_cg = &se_dev->se_dev_group; | 2841 | dev_cg = &se_dev->se_dev_group; |
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev( | |||
2800 | dev_cg->default_groups[i] = NULL; | 2844 | dev_cg->default_groups[i] = NULL; |
2801 | config_item_put(df_item); | 2845 | config_item_put(df_item); |
2802 | } | 2846 | } |
2803 | |||
2804 | config_item_put(item); | ||
2805 | /* | 2847 | /* |
2806 | * This pointer will set when the storage is enabled with: | 2848 | * The releasing of se_dev and associated se_dev->se_dev_ptr is done |
2807 | * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | 2849 | * from target_core_dev_item_ops->release() ->target_core_dev_release(). |
2808 | */ | 2850 | */ |
2809 | if (se_dev->se_dev_ptr) { | 2851 | config_item_put(item); |
2810 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
2811 | "virtual_device() for se_dev_ptr: %p\n", | ||
2812 | se_dev->se_dev_ptr); | ||
2813 | |||
2814 | ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
2815 | if (ret < 0) | ||
2816 | goto hba_out; | ||
2817 | } else { | ||
2818 | /* | ||
2819 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
2820 | */ | ||
2821 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
2822 | "device() for se_dev_su_ptr: %p\n", | ||
2823 | se_dev->se_dev_su_ptr); | ||
2824 | |||
2825 | t->free_device(se_dev->se_dev_su_ptr); | ||
2826 | } | ||
2827 | |||
2828 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2829 | "_dev_t: %p\n", se_dev); | ||
2830 | |||
2831 | hba_out: | ||
2832 | mutex_unlock(&hba->hba_access_mutex); | 2852 | mutex_unlock(&hba->hba_access_mutex); |
2833 | out: | ||
2834 | kfree(se_dev); | ||
2835 | } | 2853 | } |
2836 | 2854 | ||
2837 | static struct configfs_group_operations target_core_hba_group_ops = { | 2855 | static struct configfs_group_operations target_core_hba_group_ops = { |
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); | |||
2914 | 2932 | ||
2915 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); | 2933 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); |
2916 | 2934 | ||
2935 | static void target_core_hba_release(struct config_item *item) | ||
2936 | { | ||
2937 | struct se_hba *hba = container_of(to_config_group(item), | ||
2938 | struct se_hba, hba_group); | ||
2939 | core_delete_hba(hba); | ||
2940 | } | ||
2941 | |||
2917 | static struct configfs_attribute *target_core_hba_attrs[] = { | 2942 | static struct configfs_attribute *target_core_hba_attrs[] = { |
2918 | &target_core_hba_hba_info.attr, | 2943 | &target_core_hba_hba_info.attr, |
2919 | &target_core_hba_hba_mode.attr, | 2944 | &target_core_hba_hba_mode.attr, |
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = { | |||
2921 | }; | 2946 | }; |
2922 | 2947 | ||
2923 | static struct configfs_item_operations target_core_hba_item_ops = { | 2948 | static struct configfs_item_operations target_core_hba_item_ops = { |
2949 | .release = target_core_hba_release, | ||
2924 | .show_attribute = target_core_hba_attr_show, | 2950 | .show_attribute = target_core_hba_attr_show, |
2925 | .store_attribute = target_core_hba_attr_store, | 2951 | .store_attribute = target_core_hba_attr_store, |
2926 | }; | 2952 | }; |
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget( | |||
2997 | struct config_group *group, | 3023 | struct config_group *group, |
2998 | struct config_item *item) | 3024 | struct config_item *item) |
2999 | { | 3025 | { |
3000 | struct se_hba *hba = item_to_hba(item); | 3026 | /* |
3001 | 3027 | * core_delete_hba() is called from target_core_hba_item_ops->release() | |
3028 | * -> target_core_hba_release() | ||
3029 | */ | ||
3002 | config_item_put(item); | 3030 | config_item_put(item); |
3003 | core_delete_hba(hba); | ||
3004 | } | 3031 | } |
3005 | 3032 | ||
3006 | static struct configfs_group_operations target_core_group_ops = { | 3033 | static struct configfs_group_operations target_core_group_ops = { |
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void) | |||
3022 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 3049 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
3023 | struct config_group *lu_gp_cg = NULL; | 3050 | struct config_group *lu_gp_cg = NULL; |
3024 | struct configfs_subsystem *subsys; | 3051 | struct configfs_subsystem *subsys; |
3025 | struct proc_dir_entry *scsi_target_proc = NULL; | ||
3026 | struct t10_alua_lu_gp *lu_gp; | 3052 | struct t10_alua_lu_gp *lu_gp; |
3027 | int ret; | 3053 | int ret; |
3028 | 3054 | ||
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void) | |||
3128 | if (core_dev_setup_virtual_lun0() < 0) | 3154 | if (core_dev_setup_virtual_lun0() < 0) |
3129 | goto out; | 3155 | goto out; |
3130 | 3156 | ||
3131 | scsi_target_proc = proc_mkdir("scsi_target", 0); | ||
3132 | if (!(scsi_target_proc)) { | ||
3133 | printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); | ||
3134 | goto out; | ||
3135 | } | ||
3136 | ret = init_scsi_target_mib(); | ||
3137 | if (ret < 0) | ||
3138 | goto out; | ||
3139 | |||
3140 | return 0; | 3157 | return 0; |
3141 | 3158 | ||
3142 | out: | 3159 | out: |
3143 | configfs_unregister_subsystem(subsys); | 3160 | configfs_unregister_subsystem(subsys); |
3144 | if (scsi_target_proc) | ||
3145 | remove_proc_entry("scsi_target", 0); | ||
3146 | core_dev_release_virtual_lun0(); | 3161 | core_dev_release_virtual_lun0(); |
3147 | rd_module_exit(); | 3162 | rd_module_exit(); |
3148 | out_global: | 3163 | out_global: |
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void) | |||
3178 | config_item_put(item); | 3193 | config_item_put(item); |
3179 | } | 3194 | } |
3180 | kfree(lu_gp_cg->default_groups); | 3195 | kfree(lu_gp_cg->default_groups); |
3181 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3196 | lu_gp_cg->default_groups = NULL; |
3182 | se_global->default_lu_gp = NULL; | ||
3183 | 3197 | ||
3184 | alua_cg = &se_global->alua_group; | 3198 | alua_cg = &se_global->alua_group; |
3185 | for (i = 0; alua_cg->default_groups[i]; i++) { | 3199 | for (i = 0; alua_cg->default_groups[i]; i++) { |
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void) | |||
3188 | config_item_put(item); | 3202 | config_item_put(item); |
3189 | } | 3203 | } |
3190 | kfree(alua_cg->default_groups); | 3204 | kfree(alua_cg->default_groups); |
3205 | alua_cg->default_groups = NULL; | ||
3191 | 3206 | ||
3192 | hba_cg = &se_global->target_core_hbagroup; | 3207 | hba_cg = &se_global->target_core_hbagroup; |
3193 | for (i = 0; hba_cg->default_groups[i]; i++) { | 3208 | for (i = 0; hba_cg->default_groups[i]; i++) { |
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void) | |||
3196 | config_item_put(item); | 3211 | config_item_put(item); |
3197 | } | 3212 | } |
3198 | kfree(hba_cg->default_groups); | 3213 | kfree(hba_cg->default_groups); |
3199 | 3214 | hba_cg->default_groups = NULL; | |
3200 | for (i = 0; subsys->su_group.default_groups[i]; i++) { | 3215 | /* |
3201 | item = &subsys->su_group.default_groups[i]->cg_item; | 3216 | * We expect subsys->su_group.default_groups to be released |
3202 | subsys->su_group.default_groups[i] = NULL; | 3217 | * by configfs subsystem provider logic.. |
3203 | config_item_put(item); | 3218 | */ |
3204 | } | 3219 | configfs_unregister_subsystem(subsys); |
3205 | kfree(subsys->su_group.default_groups); | 3220 | kfree(subsys->su_group.default_groups); |
3206 | 3221 | ||
3207 | configfs_unregister_subsystem(subsys); | 3222 | core_alua_free_lu_gp(se_global->default_lu_gp); |
3223 | se_global->default_lu_gp = NULL; | ||
3224 | |||
3208 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | 3225 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" |
3209 | " Infrastructure\n"); | 3226 | " Infrastructure\n"); |
3210 | 3227 | ||
3211 | remove_scsi_target_mib(); | ||
3212 | remove_proc_entry("scsi_target", 0); | ||
3213 | core_dev_release_virtual_lun0(); | 3228 | core_dev_release_virtual_lun0(); |
3214 | rd_module_exit(); | 3229 | rd_module_exit(); |
3215 | release_se_global(); | 3230 | release_se_global(); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 317ce58d426d..5da051a07fa3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -373,11 +373,11 @@ int core_update_device_list_for_node( | |||
373 | /* | 373 | /* |
374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
375 | * that have not been explictly concerted to MappedLUNs -> | 375 | * that have not been explictly concerted to MappedLUNs -> |
376 | * struct se_lun_acl. | 376 | * struct se_lun_acl, but we remove deve->alua_port_list from |
377 | * port->sep_alua_list. This also means that active UAs and | ||
378 | * NodeACL context specific PR metadata for demo-mode | ||
379 | * MappedLUN *deve will be released below.. | ||
377 | */ | 380 | */ |
378 | if (!(deve->se_lun_acl)) | ||
379 | return 0; | ||
380 | |||
381 | spin_lock_bh(&port->sep_alua_lock); | 381 | spin_lock_bh(&port->sep_alua_lock); |
382 | list_del(&deve->alua_port_list); | 382 | list_del(&deve->alua_port_list); |
383 | spin_unlock_bh(&port->sep_alua_lock); | 383 | spin_unlock_bh(&port->sep_alua_lock); |
@@ -395,12 +395,14 @@ int core_update_device_list_for_node( | |||
395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" |
396 | " already set for demo mode -> explict" | 396 | " already set for demo mode -> explict" |
397 | " LUN ACL transition\n"); | 397 | " LUN ACL transition\n"); |
398 | spin_unlock_irq(&nacl->device_list_lock); | ||
398 | return -1; | 399 | return -1; |
399 | } | 400 | } |
400 | if (deve->se_lun != lun) { | 401 | if (deve->se_lun != lun) { |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 402 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
402 | " match passed struct se_lun for demo mode" | 403 | " match passed struct se_lun for demo mode" |
403 | " -> explict LUN ACL transition\n"); | 404 | " -> explict LUN ACL transition\n"); |
405 | spin_unlock_irq(&nacl->device_list_lock); | ||
404 | return -1; | 406 | return -1; |
405 | } | 407 | } |
406 | deve->se_lun_acl = lun_acl; | 408 | deve->se_lun_acl = lun_acl; |
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev) | |||
865 | } | 867 | } |
866 | } | 868 | } |
867 | spin_unlock(&hba->device_lock); | 869 | spin_unlock(&hba->device_lock); |
868 | |||
869 | while (atomic_read(&hba->dev_mib_access_count)) | ||
870 | cpu_relax(); | ||
871 | } | 870 | } |
872 | 871 | ||
873 | int se_dev_check_online(struct se_device *dev) | 872 | int se_dev_check_online(struct se_device *dev) |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 32b148d7e261..b65d1c8e7740 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); | |||
214 | 214 | ||
215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); | 215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); |
216 | 216 | ||
217 | static void target_fabric_mappedlun_release(struct config_item *item) | ||
218 | { | ||
219 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
220 | struct se_lun_acl, se_lun_group); | ||
221 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
222 | |||
223 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | ||
224 | } | ||
225 | |||
217 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { | 226 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { |
218 | &target_fabric_mappedlun_write_protect.attr, | 227 | &target_fabric_mappedlun_write_protect.attr, |
219 | NULL, | 228 | NULL, |
220 | }; | 229 | }; |
221 | 230 | ||
222 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { | 231 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { |
232 | .release = target_fabric_mappedlun_release, | ||
223 | .show_attribute = target_fabric_mappedlun_attr_show, | 233 | .show_attribute = target_fabric_mappedlun_attr_show, |
224 | .store_attribute = target_fabric_mappedlun_attr_store, | 234 | .store_attribute = target_fabric_mappedlun_attr_store, |
225 | .allow_link = target_fabric_mappedlun_link, | 235 | .allow_link = target_fabric_mappedlun_link, |
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun( | |||
337 | struct config_group *group, | 347 | struct config_group *group, |
338 | struct config_item *item) | 348 | struct config_item *item) |
339 | { | 349 | { |
340 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
341 | struct se_lun_acl, se_lun_group); | ||
342 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
343 | |||
344 | config_item_put(item); | 350 | config_item_put(item); |
345 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | 351 | } |
352 | |||
353 | static void target_fabric_nacl_base_release(struct config_item *item) | ||
354 | { | ||
355 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | ||
356 | struct se_node_acl, acl_group); | ||
357 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
358 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
359 | |||
360 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
346 | } | 361 | } |
347 | 362 | ||
348 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { | 363 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { |
364 | .release = target_fabric_nacl_base_release, | ||
349 | .show_attribute = target_fabric_nacl_base_attr_show, | 365 | .show_attribute = target_fabric_nacl_base_attr_show, |
350 | .store_attribute = target_fabric_nacl_base_attr_store, | 366 | .store_attribute = target_fabric_nacl_base_attr_store, |
351 | }; | 367 | }; |
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl( | |||
404 | struct config_group *group, | 420 | struct config_group *group, |
405 | struct config_item *item) | 421 | struct config_item *item) |
406 | { | 422 | { |
407 | struct se_portal_group *se_tpg = container_of(group, | ||
408 | struct se_portal_group, tpg_acl_group); | ||
409 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
410 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | 423 | struct se_node_acl *se_nacl = container_of(to_config_group(item), |
411 | struct se_node_acl, acl_group); | 424 | struct se_node_acl, acl_group); |
412 | struct config_item *df_item; | 425 | struct config_item *df_item; |
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl( | |||
419 | nacl_cg->default_groups[i] = NULL; | 432 | nacl_cg->default_groups[i] = NULL; |
420 | config_item_put(df_item); | 433 | config_item_put(df_item); |
421 | } | 434 | } |
422 | 435 | /* | |
436 | * struct se_node_acl free is done in target_fabric_nacl_base_release() | ||
437 | */ | ||
423 | config_item_put(item); | 438 | config_item_put(item); |
424 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
425 | } | 439 | } |
426 | 440 | ||
427 | static struct configfs_group_operations target_fabric_nacl_group_ops = { | 441 | static struct configfs_group_operations target_fabric_nacl_group_ops = { |
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); | |||
437 | 451 | ||
438 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); | 452 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); |
439 | 453 | ||
454 | static void target_fabric_np_base_release(struct config_item *item) | ||
455 | { | ||
456 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
457 | struct se_tpg_np, tpg_np_group); | ||
458 | struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; | ||
459 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
460 | |||
461 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
462 | } | ||
463 | |||
440 | static struct configfs_item_operations target_fabric_np_base_item_ops = { | 464 | static struct configfs_item_operations target_fabric_np_base_item_ops = { |
465 | .release = target_fabric_np_base_release, | ||
441 | .show_attribute = target_fabric_np_base_attr_show, | 466 | .show_attribute = target_fabric_np_base_attr_show, |
442 | .store_attribute = target_fabric_np_base_attr_store, | 467 | .store_attribute = target_fabric_np_base_attr_store, |
443 | }; | 468 | }; |
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np( | |||
466 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) | 491 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) |
467 | return ERR_PTR(-EINVAL); | 492 | return ERR_PTR(-EINVAL); |
468 | 493 | ||
494 | se_tpg_np->tpg_np_parent = se_tpg; | ||
469 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, | 495 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, |
470 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); | 496 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); |
471 | 497 | ||
@@ -476,14 +502,10 @@ static void target_fabric_drop_np( | |||
476 | struct config_group *group, | 502 | struct config_group *group, |
477 | struct config_item *item) | 503 | struct config_item *item) |
478 | { | 504 | { |
479 | struct se_portal_group *se_tpg = container_of(group, | 505 | /* |
480 | struct se_portal_group, tpg_np_group); | 506 | * struct se_tpg_np is released via target_fabric_np_base_release() |
481 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 507 | */ |
482 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
483 | struct se_tpg_np, tpg_np_group); | ||
484 | |||
485 | config_item_put(item); | 508 | config_item_put(item); |
486 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
487 | } | 509 | } |
488 | 510 | ||
489 | static struct configfs_group_operations target_fabric_np_group_ops = { | 511 | static struct configfs_group_operations target_fabric_np_group_ops = { |
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | |||
814 | */ | 836 | */ |
815 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); | 837 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); |
816 | 838 | ||
839 | static void target_fabric_tpg_release(struct config_item *item) | ||
840 | { | ||
841 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | ||
842 | struct se_portal_group, tpg_group); | ||
843 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | ||
844 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
845 | |||
846 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
847 | } | ||
848 | |||
817 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | 849 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { |
850 | .release = target_fabric_tpg_release, | ||
818 | .show_attribute = target_fabric_tpg_attr_show, | 851 | .show_attribute = target_fabric_tpg_attr_show, |
819 | .store_attribute = target_fabric_tpg_attr_store, | 852 | .store_attribute = target_fabric_tpg_attr_store, |
820 | }; | 853 | }; |
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg( | |||
872 | struct config_group *group, | 905 | struct config_group *group, |
873 | struct config_item *item) | 906 | struct config_item *item) |
874 | { | 907 | { |
875 | struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); | ||
876 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
877 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | 908 | struct se_portal_group *se_tpg = container_of(to_config_group(item), |
878 | struct se_portal_group, tpg_group); | 909 | struct se_portal_group, tpg_group); |
879 | struct config_group *tpg_cg = &se_tpg->tpg_group; | 910 | struct config_group *tpg_cg = &se_tpg->tpg_group; |
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg( | |||
890 | } | 921 | } |
891 | 922 | ||
892 | config_item_put(item); | 923 | config_item_put(item); |
893 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
894 | } | 924 | } |
895 | 925 | ||
926 | static void target_fabric_release_wwn(struct config_item *item) | ||
927 | { | ||
928 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
929 | struct se_wwn, wwn_group); | ||
930 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
931 | |||
932 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
933 | } | ||
934 | |||
935 | static struct configfs_item_operations target_fabric_tpg_item_ops = { | ||
936 | .release = target_fabric_release_wwn, | ||
937 | }; | ||
938 | |||
896 | static struct configfs_group_operations target_fabric_tpg_group_ops = { | 939 | static struct configfs_group_operations target_fabric_tpg_group_ops = { |
897 | .make_group = target_fabric_make_tpg, | 940 | .make_group = target_fabric_make_tpg, |
898 | .drop_item = target_fabric_drop_tpg, | 941 | .drop_item = target_fabric_drop_tpg, |
899 | }; | 942 | }; |
900 | 943 | ||
901 | TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); | 944 | TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, |
945 | NULL); | ||
902 | 946 | ||
903 | /* End of tfc_tpg_cit */ | 947 | /* End of tfc_tpg_cit */ |
904 | 948 | ||
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn( | |||
932 | struct config_group *group, | 976 | struct config_group *group, |
933 | struct config_item *item) | 977 | struct config_item *item) |
934 | { | 978 | { |
935 | struct target_fabric_configfs *tf = container_of(group, | ||
936 | struct target_fabric_configfs, tf_group); | ||
937 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
938 | struct se_wwn, wwn_group); | ||
939 | |||
940 | config_item_put(item); | 979 | config_item_put(item); |
941 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
942 | } | 980 | } |
943 | 981 | ||
944 | static struct configfs_group_operations target_fabric_wwn_group_ops = { | 982 | static struct configfs_group_operations target_fabric_wwn_group_ops = { |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c6e0d757e76e..67f0c09983c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice( | |||
154 | 154 | ||
155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); |
157 | if (!(bd)) | 157 | if (IS_ERR(bd)) |
158 | goto failed; | 158 | goto failed; |
159 | /* | 159 | /* |
160 | * Setup the local scope queue_limits from struct request_queue->limits | 160 | * Setup the local scope queue_limits from struct request_queue->limits |
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p) | |||
220 | { | 220 | { |
221 | struct iblock_dev *ib_dev = p; | 221 | struct iblock_dev *ib_dev = p; |
222 | 222 | ||
223 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 223 | if (ib_dev->ibd_bd != NULL) |
224 | bioset_free(ib_dev->ibd_bio_set); | 224 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
225 | if (ib_dev->ibd_bio_set != NULL) | ||
226 | bioset_free(ib_dev->ibd_bio_set); | ||
225 | kfree(ib_dev); | 227 | kfree(ib_dev); |
226 | } | 228 | } |
227 | 229 | ||
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c deleted file mode 100644 index d5a48aa0d2d1..000000000000 --- a/drivers/target/target_core_mib.c +++ /dev/null | |||
@@ -1,1078 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_mib.c | ||
3 | * | ||
4 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
5 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
6 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
7 | * | ||
8 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
23 | * | ||
24 | ******************************************************************************/ | ||
25 | |||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/timer.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/version.h> | ||
33 | #include <generated/utsrelease.h> | ||
34 | #include <linux/utsname.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_device.h> | ||
40 | #include <scsi/scsi_host.h> | ||
41 | |||
42 | #include <target/target_core_base.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | #include <target/target_core_fabric_ops.h> | ||
45 | #include <target/target_core_configfs.h> | ||
46 | |||
47 | #include "target_core_hba.h" | ||
48 | #include "target_core_mib.h" | ||
49 | |||
50 | /* SCSI mib table index */ | ||
51 | static struct scsi_index_table scsi_index_table; | ||
52 | |||
53 | #ifndef INITIAL_JIFFIES | ||
54 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
55 | #endif | ||
56 | |||
57 | /* SCSI Instance Table */ | ||
58 | #define SCSI_INST_SW_INDEX 1 | ||
59 | #define SCSI_TRANSPORT_INDEX 1 | ||
60 | |||
61 | #define NONE "None" | ||
62 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
63 | |||
64 | static inline int list_is_first(const struct list_head *list, | ||
65 | const struct list_head *head) | ||
66 | { | ||
67 | return list->prev == head; | ||
68 | } | ||
69 | |||
70 | static void *locate_hba_start( | ||
71 | struct seq_file *seq, | ||
72 | loff_t *pos) | ||
73 | { | ||
74 | spin_lock(&se_global->g_device_lock); | ||
75 | return seq_list_start(&se_global->g_se_dev_list, *pos); | ||
76 | } | ||
77 | |||
78 | static void *locate_hba_next( | ||
79 | struct seq_file *seq, | ||
80 | void *v, | ||
81 | loff_t *pos) | ||
82 | { | ||
83 | return seq_list_next(v, &se_global->g_se_dev_list, pos); | ||
84 | } | ||
85 | |||
86 | static void locate_hba_stop(struct seq_file *seq, void *v) | ||
87 | { | ||
88 | spin_unlock(&se_global->g_device_lock); | ||
89 | } | ||
90 | |||
91 | /**************************************************************************** | ||
92 | * SCSI MIB Tables | ||
93 | ****************************************************************************/ | ||
94 | |||
95 | /* | ||
96 | * SCSI Instance Table | ||
97 | */ | ||
98 | static void *scsi_inst_seq_start( | ||
99 | struct seq_file *seq, | ||
100 | loff_t *pos) | ||
101 | { | ||
102 | spin_lock(&se_global->hba_lock); | ||
103 | return seq_list_start(&se_global->g_hba_list, *pos); | ||
104 | } | ||
105 | |||
106 | static void *scsi_inst_seq_next( | ||
107 | struct seq_file *seq, | ||
108 | void *v, | ||
109 | loff_t *pos) | ||
110 | { | ||
111 | return seq_list_next(v, &se_global->g_hba_list, pos); | ||
112 | } | ||
113 | |||
114 | static void scsi_inst_seq_stop(struct seq_file *seq, void *v) | ||
115 | { | ||
116 | spin_unlock(&se_global->hba_lock); | ||
117 | } | ||
118 | |||
119 | static int scsi_inst_seq_show(struct seq_file *seq, void *v) | ||
120 | { | ||
121 | struct se_hba *hba = list_entry(v, struct se_hba, hba_list); | ||
122 | |||
123 | if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) | ||
124 | seq_puts(seq, "inst sw_indx\n"); | ||
125 | |||
126 | seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); | ||
127 | seq_printf(seq, "plugin: %s version: %s\n", | ||
128 | hba->transport->name, TARGET_CORE_VERSION); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static const struct seq_operations scsi_inst_seq_ops = { | ||
134 | .start = scsi_inst_seq_start, | ||
135 | .next = scsi_inst_seq_next, | ||
136 | .stop = scsi_inst_seq_stop, | ||
137 | .show = scsi_inst_seq_show | ||
138 | }; | ||
139 | |||
140 | static int scsi_inst_seq_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return seq_open(file, &scsi_inst_seq_ops); | ||
143 | } | ||
144 | |||
145 | static const struct file_operations scsi_inst_seq_fops = { | ||
146 | .owner = THIS_MODULE, | ||
147 | .open = scsi_inst_seq_open, | ||
148 | .read = seq_read, | ||
149 | .llseek = seq_lseek, | ||
150 | .release = seq_release, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * SCSI Device Table | ||
155 | */ | ||
156 | static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
157 | { | ||
158 | return locate_hba_start(seq, pos); | ||
159 | } | ||
160 | |||
161 | static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
162 | { | ||
163 | return locate_hba_next(seq, v, pos); | ||
164 | } | ||
165 | |||
166 | static void scsi_dev_seq_stop(struct seq_file *seq, void *v) | ||
167 | { | ||
168 | locate_hba_stop(seq, v); | ||
169 | } | ||
170 | |||
171 | static int scsi_dev_seq_show(struct seq_file *seq, void *v) | ||
172 | { | ||
173 | struct se_hba *hba; | ||
174 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
175 | g_se_dev_list); | ||
176 | struct se_device *dev = se_dev->se_dev_ptr; | ||
177 | char str[28]; | ||
178 | int k; | ||
179 | |||
180 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
181 | seq_puts(seq, "inst indx role ports\n"); | ||
182 | |||
183 | if (!(dev)) | ||
184 | return 0; | ||
185 | |||
186 | hba = dev->se_hba; | ||
187 | if (!(hba)) { | ||
188 | /* Log error ? */ | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | seq_printf(seq, "%u %u %s %u\n", hba->hba_index, | ||
193 | dev->dev_index, "Target", dev->dev_port_count); | ||
194 | |||
195 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
196 | |||
197 | /* vendor */ | ||
198 | for (k = 0; k < 8; k++) | ||
199 | str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? | ||
200 | DEV_T10_WWN(dev)->vendor[k] : 0x20; | ||
201 | str[k] = 0x20; | ||
202 | |||
203 | /* model */ | ||
204 | for (k = 0; k < 16; k++) | ||
205 | str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? | ||
206 | DEV_T10_WWN(dev)->model[k] : 0x20; | ||
207 | str[k + 9] = 0; | ||
208 | |||
209 | seq_printf(seq, "dev_alias: %s\n", str); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static const struct seq_operations scsi_dev_seq_ops = { | ||
215 | .start = scsi_dev_seq_start, | ||
216 | .next = scsi_dev_seq_next, | ||
217 | .stop = scsi_dev_seq_stop, | ||
218 | .show = scsi_dev_seq_show | ||
219 | }; | ||
220 | |||
221 | static int scsi_dev_seq_open(struct inode *inode, struct file *file) | ||
222 | { | ||
223 | return seq_open(file, &scsi_dev_seq_ops); | ||
224 | } | ||
225 | |||
226 | static const struct file_operations scsi_dev_seq_fops = { | ||
227 | .owner = THIS_MODULE, | ||
228 | .open = scsi_dev_seq_open, | ||
229 | .read = seq_read, | ||
230 | .llseek = seq_lseek, | ||
231 | .release = seq_release, | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * SCSI Port Table | ||
236 | */ | ||
237 | static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
238 | { | ||
239 | return locate_hba_start(seq, pos); | ||
240 | } | ||
241 | |||
242 | static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
243 | { | ||
244 | return locate_hba_next(seq, v, pos); | ||
245 | } | ||
246 | |||
247 | static void scsi_port_seq_stop(struct seq_file *seq, void *v) | ||
248 | { | ||
249 | locate_hba_stop(seq, v); | ||
250 | } | ||
251 | |||
252 | static int scsi_port_seq_show(struct seq_file *seq, void *v) | ||
253 | { | ||
254 | struct se_hba *hba; | ||
255 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
256 | g_se_dev_list); | ||
257 | struct se_device *dev = se_dev->se_dev_ptr; | ||
258 | struct se_port *sep, *sep_tmp; | ||
259 | |||
260 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
261 | seq_puts(seq, "inst device indx role busy_count\n"); | ||
262 | |||
263 | if (!(dev)) | ||
264 | return 0; | ||
265 | |||
266 | hba = dev->se_hba; | ||
267 | if (!(hba)) { | ||
268 | /* Log error ? */ | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* FIXME: scsiPortBusyStatuses count */ | ||
273 | spin_lock(&dev->se_port_lock); | ||
274 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
275 | seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, | ||
276 | dev->dev_index, sep->sep_index, "Device", | ||
277 | dev->dev_index, 0); | ||
278 | } | ||
279 | spin_unlock(&dev->se_port_lock); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static const struct seq_operations scsi_port_seq_ops = { | ||
285 | .start = scsi_port_seq_start, | ||
286 | .next = scsi_port_seq_next, | ||
287 | .stop = scsi_port_seq_stop, | ||
288 | .show = scsi_port_seq_show | ||
289 | }; | ||
290 | |||
291 | static int scsi_port_seq_open(struct inode *inode, struct file *file) | ||
292 | { | ||
293 | return seq_open(file, &scsi_port_seq_ops); | ||
294 | } | ||
295 | |||
296 | static const struct file_operations scsi_port_seq_fops = { | ||
297 | .owner = THIS_MODULE, | ||
298 | .open = scsi_port_seq_open, | ||
299 | .read = seq_read, | ||
300 | .llseek = seq_lseek, | ||
301 | .release = seq_release, | ||
302 | }; | ||
303 | |||
304 | /* | ||
305 | * SCSI Transport Table | ||
306 | */ | ||
307 | static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) | ||
308 | { | ||
309 | return locate_hba_start(seq, pos); | ||
310 | } | ||
311 | |||
312 | static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
313 | { | ||
314 | return locate_hba_next(seq, v, pos); | ||
315 | } | ||
316 | |||
317 | static void scsi_transport_seq_stop(struct seq_file *seq, void *v) | ||
318 | { | ||
319 | locate_hba_stop(seq, v); | ||
320 | } | ||
321 | |||
322 | static int scsi_transport_seq_show(struct seq_file *seq, void *v) | ||
323 | { | ||
324 | struct se_hba *hba; | ||
325 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
326 | g_se_dev_list); | ||
327 | struct se_device *dev = se_dev->se_dev_ptr; | ||
328 | struct se_port *se, *se_tmp; | ||
329 | struct se_portal_group *tpg; | ||
330 | struct t10_wwn *wwn; | ||
331 | char buf[64]; | ||
332 | |||
333 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
334 | seq_puts(seq, "inst device indx dev_name\n"); | ||
335 | |||
336 | if (!(dev)) | ||
337 | return 0; | ||
338 | |||
339 | hba = dev->se_hba; | ||
340 | if (!(hba)) { | ||
341 | /* Log error ? */ | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | wwn = DEV_T10_WWN(dev); | ||
346 | |||
347 | spin_lock(&dev->se_port_lock); | ||
348 | list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { | ||
349 | tpg = se->sep_tpg; | ||
350 | sprintf(buf, "scsiTransport%s", | ||
351 | TPG_TFO(tpg)->get_fabric_name()); | ||
352 | |||
353 | seq_printf(seq, "%u %s %u %s+%s\n", | ||
354 | hba->hba_index, /* scsiTransportIndex */ | ||
355 | buf, /* scsiTransportType */ | ||
356 | (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? | ||
357 | TPG_TFO(tpg)->tpg_get_inst_index(tpg) : | ||
358 | 0, | ||
359 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
360 | (strlen(wwn->unit_serial)) ? | ||
361 | /* scsiTransportDevName */ | ||
362 | wwn->unit_serial : wwn->vendor); | ||
363 | } | ||
364 | spin_unlock(&dev->se_port_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static const struct seq_operations scsi_transport_seq_ops = { | ||
370 | .start = scsi_transport_seq_start, | ||
371 | .next = scsi_transport_seq_next, | ||
372 | .stop = scsi_transport_seq_stop, | ||
373 | .show = scsi_transport_seq_show | ||
374 | }; | ||
375 | |||
376 | static int scsi_transport_seq_open(struct inode *inode, struct file *file) | ||
377 | { | ||
378 | return seq_open(file, &scsi_transport_seq_ops); | ||
379 | } | ||
380 | |||
381 | static const struct file_operations scsi_transport_seq_fops = { | ||
382 | .owner = THIS_MODULE, | ||
383 | .open = scsi_transport_seq_open, | ||
384 | .read = seq_read, | ||
385 | .llseek = seq_lseek, | ||
386 | .release = seq_release, | ||
387 | }; | ||
388 | |||
389 | /* | ||
390 | * SCSI Target Device Table | ||
391 | */ | ||
392 | static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
393 | { | ||
394 | return locate_hba_start(seq, pos); | ||
395 | } | ||
396 | |||
397 | static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
398 | { | ||
399 | return locate_hba_next(seq, v, pos); | ||
400 | } | ||
401 | |||
402 | static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) | ||
403 | { | ||
404 | locate_hba_stop(seq, v); | ||
405 | } | ||
406 | |||
407 | |||
408 | #define LU_COUNT 1 /* for now */ | ||
409 | static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) | ||
410 | { | ||
411 | struct se_hba *hba; | ||
412 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
413 | g_se_dev_list); | ||
414 | struct se_device *dev = se_dev->se_dev_ptr; | ||
415 | int non_accessible_lus = 0; | ||
416 | char status[16]; | ||
417 | |||
418 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
419 | seq_puts(seq, "inst indx num_LUs status non_access_LUs" | ||
420 | " resets\n"); | ||
421 | |||
422 | if (!(dev)) | ||
423 | return 0; | ||
424 | |||
425 | hba = dev->se_hba; | ||
426 | if (!(hba)) { | ||
427 | /* Log error ? */ | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | switch (dev->dev_status) { | ||
432 | case TRANSPORT_DEVICE_ACTIVATED: | ||
433 | strcpy(status, "activated"); | ||
434 | break; | ||
435 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
436 | strcpy(status, "deactivated"); | ||
437 | non_accessible_lus = 1; | ||
438 | break; | ||
439 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
440 | strcpy(status, "shutdown"); | ||
441 | non_accessible_lus = 1; | ||
442 | break; | ||
443 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
444 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
445 | strcpy(status, "offline"); | ||
446 | non_accessible_lus = 1; | ||
447 | break; | ||
448 | default: | ||
449 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
450 | non_accessible_lus = 1; | ||
451 | } | ||
452 | |||
453 | seq_printf(seq, "%u %u %u %s %u %u\n", | ||
454 | hba->hba_index, dev->dev_index, LU_COUNT, | ||
455 | status, non_accessible_lus, dev->num_resets); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static const struct seq_operations scsi_tgt_dev_seq_ops = { | ||
461 | .start = scsi_tgt_dev_seq_start, | ||
462 | .next = scsi_tgt_dev_seq_next, | ||
463 | .stop = scsi_tgt_dev_seq_stop, | ||
464 | .show = scsi_tgt_dev_seq_show | ||
465 | }; | ||
466 | |||
467 | static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) | ||
468 | { | ||
469 | return seq_open(file, &scsi_tgt_dev_seq_ops); | ||
470 | } | ||
471 | |||
472 | static const struct file_operations scsi_tgt_dev_seq_fops = { | ||
473 | .owner = THIS_MODULE, | ||
474 | .open = scsi_tgt_dev_seq_open, | ||
475 | .read = seq_read, | ||
476 | .llseek = seq_lseek, | ||
477 | .release = seq_release, | ||
478 | }; | ||
479 | |||
480 | /* | ||
481 | * SCSI Target Port Table | ||
482 | */ | ||
483 | static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
484 | { | ||
485 | return locate_hba_start(seq, pos); | ||
486 | } | ||
487 | |||
488 | static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
489 | { | ||
490 | return locate_hba_next(seq, v, pos); | ||
491 | } | ||
492 | |||
493 | static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) | ||
494 | { | ||
495 | locate_hba_stop(seq, v); | ||
496 | } | ||
497 | |||
498 | static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) | ||
499 | { | ||
500 | struct se_hba *hba; | ||
501 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
502 | g_se_dev_list); | ||
503 | struct se_device *dev = se_dev->se_dev_ptr; | ||
504 | struct se_port *sep, *sep_tmp; | ||
505 | struct se_portal_group *tpg; | ||
506 | u32 rx_mbytes, tx_mbytes; | ||
507 | unsigned long long num_cmds; | ||
508 | char buf[64]; | ||
509 | |||
510 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
511 | seq_puts(seq, "inst device indx name port_index in_cmds" | ||
512 | " write_mbytes read_mbytes hs_in_cmds\n"); | ||
513 | |||
514 | if (!(dev)) | ||
515 | return 0; | ||
516 | |||
517 | hba = dev->se_hba; | ||
518 | if (!(hba)) { | ||
519 | /* Log error ? */ | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | spin_lock(&dev->se_port_lock); | ||
524 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
525 | tpg = sep->sep_tpg; | ||
526 | sprintf(buf, "%sPort#", | ||
527 | TPG_TFO(tpg)->get_fabric_name()); | ||
528 | |||
529 | seq_printf(seq, "%u %u %u %s%d %s%s%d ", | ||
530 | hba->hba_index, | ||
531 | dev->dev_index, | ||
532 | sep->sep_index, | ||
533 | buf, sep->sep_index, | ||
534 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
535 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
536 | |||
537 | spin_lock(&sep->sep_lun->lun_sep_lock); | ||
538 | num_cmds = sep->sep_stats.cmd_pdus; | ||
539 | rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); | ||
540 | tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); | ||
541 | spin_unlock(&sep->sep_lun->lun_sep_lock); | ||
542 | |||
543 | seq_printf(seq, "%llu %u %u %u\n", num_cmds, | ||
544 | rx_mbytes, tx_mbytes, 0); | ||
545 | } | ||
546 | spin_unlock(&dev->se_port_lock); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static const struct seq_operations scsi_tgt_port_seq_ops = { | ||
552 | .start = scsi_tgt_port_seq_start, | ||
553 | .next = scsi_tgt_port_seq_next, | ||
554 | .stop = scsi_tgt_port_seq_stop, | ||
555 | .show = scsi_tgt_port_seq_show | ||
556 | }; | ||
557 | |||
558 | static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) | ||
559 | { | ||
560 | return seq_open(file, &scsi_tgt_port_seq_ops); | ||
561 | } | ||
562 | |||
563 | static const struct file_operations scsi_tgt_port_seq_fops = { | ||
564 | .owner = THIS_MODULE, | ||
565 | .open = scsi_tgt_port_seq_open, | ||
566 | .read = seq_read, | ||
567 | .llseek = seq_lseek, | ||
568 | .release = seq_release, | ||
569 | }; | ||
570 | |||
571 | /* | ||
572 | * SCSI Authorized Initiator Table: | ||
573 | * It contains the SCSI Initiators authorized to be attached to one of the | ||
574 | * local Target ports. | ||
575 | * Iterates through all active TPGs and extracts the info from the ACLs | ||
576 | */ | ||
577 | static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) | ||
578 | { | ||
579 | spin_lock_bh(&se_global->se_tpg_lock); | ||
580 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
581 | } | ||
582 | |||
583 | static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, | ||
584 | loff_t *pos) | ||
585 | { | ||
586 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
587 | } | ||
588 | |||
589 | static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) | ||
590 | { | ||
591 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
592 | } | ||
593 | |||
594 | static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) | ||
595 | { | ||
596 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
597 | se_tpg_list); | ||
598 | struct se_dev_entry *deve; | ||
599 | struct se_lun *lun; | ||
600 | struct se_node_acl *se_nacl; | ||
601 | int j; | ||
602 | |||
603 | if (list_is_first(&se_tpg->se_tpg_list, | ||
604 | &se_global->g_se_tpg_list)) | ||
605 | seq_puts(seq, "inst dev port indx dev_or_port intr_name " | ||
606 | "map_indx att_count num_cmds read_mbytes " | ||
607 | "write_mbytes hs_num_cmds creation_time row_status\n"); | ||
608 | |||
609 | if (!(se_tpg)) | ||
610 | return 0; | ||
611 | |||
612 | spin_lock(&se_tpg->acl_node_lock); | ||
613 | list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { | ||
614 | |||
615 | atomic_inc(&se_nacl->mib_ref_count); | ||
616 | smp_mb__after_atomic_inc(); | ||
617 | spin_unlock(&se_tpg->acl_node_lock); | ||
618 | |||
619 | spin_lock_irq(&se_nacl->device_list_lock); | ||
620 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
621 | deve = &se_nacl->device_list[j]; | ||
622 | if (!(deve->lun_flags & | ||
623 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
624 | (!deve->se_lun)) | ||
625 | continue; | ||
626 | lun = deve->se_lun; | ||
627 | if (!lun->lun_se_dev) | ||
628 | continue; | ||
629 | |||
630 | seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" | ||
631 | " %u %s\n", | ||
632 | /* scsiInstIndex */ | ||
633 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
634 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
635 | 0, | ||
636 | /* scsiDeviceIndex */ | ||
637 | lun->lun_se_dev->dev_index, | ||
638 | /* scsiAuthIntrTgtPortIndex */ | ||
639 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
640 | /* scsiAuthIntrIndex */ | ||
641 | se_nacl->acl_index, | ||
642 | /* scsiAuthIntrDevOrPort */ | ||
643 | 1, | ||
644 | /* scsiAuthIntrName */ | ||
645 | se_nacl->initiatorname[0] ? | ||
646 | se_nacl->initiatorname : NONE, | ||
647 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
648 | 0, | ||
649 | /* scsiAuthIntrAttachedTimes */ | ||
650 | deve->attach_count, | ||
651 | /* scsiAuthIntrOutCommands */ | ||
652 | deve->total_cmds, | ||
653 | /* scsiAuthIntrReadMegaBytes */ | ||
654 | (u32)(deve->read_bytes >> 20), | ||
655 | /* scsiAuthIntrWrittenMegaBytes */ | ||
656 | (u32)(deve->write_bytes >> 20), | ||
657 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
658 | 0, | ||
659 | /* scsiAuthIntrLastCreation */ | ||
660 | (u32)(((u32)deve->creation_time - | ||
661 | INITIAL_JIFFIES) * 100 / HZ), | ||
662 | /* FIXME: scsiAuthIntrRowStatus */ | ||
663 | "Ready"); | ||
664 | } | ||
665 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
666 | |||
667 | spin_lock(&se_tpg->acl_node_lock); | ||
668 | atomic_dec(&se_nacl->mib_ref_count); | ||
669 | smp_mb__after_atomic_dec(); | ||
670 | } | ||
671 | spin_unlock(&se_tpg->acl_node_lock); | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static const struct seq_operations scsi_auth_intr_seq_ops = { | ||
677 | .start = scsi_auth_intr_seq_start, | ||
678 | .next = scsi_auth_intr_seq_next, | ||
679 | .stop = scsi_auth_intr_seq_stop, | ||
680 | .show = scsi_auth_intr_seq_show | ||
681 | }; | ||
682 | |||
683 | static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) | ||
684 | { | ||
685 | return seq_open(file, &scsi_auth_intr_seq_ops); | ||
686 | } | ||
687 | |||
688 | static const struct file_operations scsi_auth_intr_seq_fops = { | ||
689 | .owner = THIS_MODULE, | ||
690 | .open = scsi_auth_intr_seq_open, | ||
691 | .read = seq_read, | ||
692 | .llseek = seq_lseek, | ||
693 | .release = seq_release, | ||
694 | }; | ||
695 | |||
696 | /* | ||
697 | * SCSI Attached Initiator Port Table: | ||
698 | * It lists the SCSI Initiators attached to one of the local Target ports. | ||
699 | * Iterates through all active TPGs and use active sessions from each TPG | ||
700 | * to list the info fo this table. | ||
701 | */ | ||
702 | static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
703 | { | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | ||
705 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
706 | } | ||
707 | |||
708 | static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, | ||
709 | loff_t *pos) | ||
710 | { | ||
711 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
712 | } | ||
713 | |||
714 | static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) | ||
715 | { | ||
716 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
717 | } | ||
718 | |||
719 | static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) | ||
720 | { | ||
721 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
722 | se_tpg_list); | ||
723 | struct se_dev_entry *deve; | ||
724 | struct se_lun *lun; | ||
725 | struct se_node_acl *se_nacl; | ||
726 | struct se_session *se_sess; | ||
727 | unsigned char buf[64]; | ||
728 | int j; | ||
729 | |||
730 | if (list_is_first(&se_tpg->se_tpg_list, | ||
731 | &se_global->g_se_tpg_list)) | ||
732 | seq_puts(seq, "inst dev port indx port_auth_indx port_name" | ||
733 | " port_ident\n"); | ||
734 | |||
735 | if (!(se_tpg)) | ||
736 | return 0; | ||
737 | |||
738 | spin_lock(&se_tpg->session_lock); | ||
739 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
740 | if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || | ||
741 | (!se_sess->se_node_acl) || | ||
742 | (!se_sess->se_node_acl->device_list)) | ||
743 | continue; | ||
744 | |||
745 | atomic_inc(&se_sess->mib_ref_count); | ||
746 | smp_mb__after_atomic_inc(); | ||
747 | se_nacl = se_sess->se_node_acl; | ||
748 | atomic_inc(&se_nacl->mib_ref_count); | ||
749 | smp_mb__after_atomic_inc(); | ||
750 | spin_unlock(&se_tpg->session_lock); | ||
751 | |||
752 | spin_lock_irq(&se_nacl->device_list_lock); | ||
753 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
754 | deve = &se_nacl->device_list[j]; | ||
755 | if (!(deve->lun_flags & | ||
756 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
757 | (!deve->se_lun)) | ||
758 | continue; | ||
759 | |||
760 | lun = deve->se_lun; | ||
761 | if (!lun->lun_se_dev) | ||
762 | continue; | ||
763 | |||
764 | memset(buf, 0, 64); | ||
765 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) | ||
766 | TPG_TFO(se_tpg)->sess_get_initiator_sid( | ||
767 | se_sess, (unsigned char *)&buf[0], 64); | ||
768 | |||
769 | seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", | ||
770 | /* scsiInstIndex */ | ||
771 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
772 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
773 | 0, | ||
774 | /* scsiDeviceIndex */ | ||
775 | lun->lun_se_dev->dev_index, | ||
776 | /* scsiPortIndex */ | ||
777 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
778 | /* scsiAttIntrPortIndex */ | ||
779 | (TPG_TFO(se_tpg)->sess_get_index != NULL) ? | ||
780 | TPG_TFO(se_tpg)->sess_get_index(se_sess) : | ||
781 | 0, | ||
782 | /* scsiAttIntrPortAuthIntrIdx */ | ||
783 | se_nacl->acl_index, | ||
784 | /* scsiAttIntrPortName */ | ||
785 | se_nacl->initiatorname[0] ? | ||
786 | se_nacl->initiatorname : NONE, | ||
787 | /* scsiAttIntrPortIdentifier */ | ||
788 | buf); | ||
789 | } | ||
790 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
791 | |||
792 | spin_lock(&se_tpg->session_lock); | ||
793 | atomic_dec(&se_nacl->mib_ref_count); | ||
794 | smp_mb__after_atomic_dec(); | ||
795 | atomic_dec(&se_sess->mib_ref_count); | ||
796 | smp_mb__after_atomic_dec(); | ||
797 | } | ||
798 | spin_unlock(&se_tpg->session_lock); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static const struct seq_operations scsi_att_intr_port_seq_ops = { | ||
804 | .start = scsi_att_intr_port_seq_start, | ||
805 | .next = scsi_att_intr_port_seq_next, | ||
806 | .stop = scsi_att_intr_port_seq_stop, | ||
807 | .show = scsi_att_intr_port_seq_show | ||
808 | }; | ||
809 | |||
810 | static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) | ||
811 | { | ||
812 | return seq_open(file, &scsi_att_intr_port_seq_ops); | ||
813 | } | ||
814 | |||
815 | static const struct file_operations scsi_att_intr_port_seq_fops = { | ||
816 | .owner = THIS_MODULE, | ||
817 | .open = scsi_att_intr_port_seq_open, | ||
818 | .read = seq_read, | ||
819 | .llseek = seq_lseek, | ||
820 | .release = seq_release, | ||
821 | }; | ||
822 | |||
823 | /* | ||
824 | * SCSI Logical Unit Table | ||
825 | */ | ||
826 | static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) | ||
827 | { | ||
828 | return locate_hba_start(seq, pos); | ||
829 | } | ||
830 | |||
831 | static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
832 | { | ||
833 | return locate_hba_next(seq, v, pos); | ||
834 | } | ||
835 | |||
836 | static void scsi_lu_seq_stop(struct seq_file *seq, void *v) | ||
837 | { | ||
838 | locate_hba_stop(seq, v); | ||
839 | } | ||
840 | |||
841 | #define SCSI_LU_INDEX 1 | ||
842 | static int scsi_lu_seq_show(struct seq_file *seq, void *v) | ||
843 | { | ||
844 | struct se_hba *hba; | ||
845 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
846 | g_se_dev_list); | ||
847 | struct se_device *dev = se_dev->se_dev_ptr; | ||
848 | int j; | ||
849 | char str[28]; | ||
850 | |||
851 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
852 | seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" | ||
853 | " dev_type status state-bit num_cmds read_mbytes" | ||
854 | " write_mbytes resets full_stat hs_num_cmds creation_time\n"); | ||
855 | |||
856 | if (!(dev)) | ||
857 | return 0; | ||
858 | |||
859 | hba = dev->se_hba; | ||
860 | if (!(hba)) { | ||
861 | /* Log error ? */ | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* Fix LU state, if we can read it from the device */ | ||
866 | seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, | ||
867 | dev->dev_index, SCSI_LU_INDEX, | ||
868 | (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ | ||
869 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
870 | /* scsiLuWwnName */ | ||
871 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : | ||
872 | "None"); | ||
873 | |||
874 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
875 | /* scsiLuVendorId */ | ||
876 | for (j = 0; j < 8; j++) | ||
877 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
878 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
879 | str[8] = 0; | ||
880 | seq_printf(seq, " %s", str); | ||
881 | |||
882 | /* scsiLuProductId */ | ||
883 | for (j = 0; j < 16; j++) | ||
884 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
885 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
886 | str[16] = 0; | ||
887 | seq_printf(seq, " %s", str); | ||
888 | |||
889 | /* scsiLuRevisionId */ | ||
890 | for (j = 0; j < 4; j++) | ||
891 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
892 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
893 | str[4] = 0; | ||
894 | seq_printf(seq, " %s", str); | ||
895 | |||
896 | seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", | ||
897 | /* scsiLuPeripheralType */ | ||
898 | TRANSPORT(dev)->get_device_type(dev), | ||
899 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
900 | "available" : "notavailable", /* scsiLuStatus */ | ||
901 | "exposed", /* scsiLuState */ | ||
902 | (unsigned long long)dev->num_cmds, | ||
903 | /* scsiLuReadMegaBytes */ | ||
904 | (u32)(dev->read_bytes >> 20), | ||
905 | /* scsiLuWrittenMegaBytes */ | ||
906 | (u32)(dev->write_bytes >> 20), | ||
907 | dev->num_resets, /* scsiLuInResets */ | ||
908 | 0, /* scsiLuOutTaskSetFullStatus */ | ||
909 | 0, /* scsiLuHSInCommands */ | ||
910 | (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * | ||
911 | 100 / HZ)); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static const struct seq_operations scsi_lu_seq_ops = { | ||
917 | .start = scsi_lu_seq_start, | ||
918 | .next = scsi_lu_seq_next, | ||
919 | .stop = scsi_lu_seq_stop, | ||
920 | .show = scsi_lu_seq_show | ||
921 | }; | ||
922 | |||
923 | static int scsi_lu_seq_open(struct inode *inode, struct file *file) | ||
924 | { | ||
925 | return seq_open(file, &scsi_lu_seq_ops); | ||
926 | } | ||
927 | |||
928 | static const struct file_operations scsi_lu_seq_fops = { | ||
929 | .owner = THIS_MODULE, | ||
930 | .open = scsi_lu_seq_open, | ||
931 | .read = seq_read, | ||
932 | .llseek = seq_lseek, | ||
933 | .release = seq_release, | ||
934 | }; | ||
935 | |||
936 | /****************************************************************************/ | ||
937 | |||
938 | /* | ||
939 | * Remove proc fs entries | ||
940 | */ | ||
941 | void remove_scsi_target_mib(void) | ||
942 | { | ||
943 | remove_proc_entry("scsi_target/mib/scsi_inst", NULL); | ||
944 | remove_proc_entry("scsi_target/mib/scsi_dev", NULL); | ||
945 | remove_proc_entry("scsi_target/mib/scsi_port", NULL); | ||
946 | remove_proc_entry("scsi_target/mib/scsi_transport", NULL); | ||
947 | remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); | ||
948 | remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); | ||
949 | remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); | ||
950 | remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); | ||
951 | remove_proc_entry("scsi_target/mib/scsi_lu", NULL); | ||
952 | remove_proc_entry("scsi_target/mib", NULL); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Create proc fs entries for the mib tables | ||
957 | */ | ||
958 | int init_scsi_target_mib(void) | ||
959 | { | ||
960 | struct proc_dir_entry *dir_entry; | ||
961 | struct proc_dir_entry *scsi_inst_entry; | ||
962 | struct proc_dir_entry *scsi_dev_entry; | ||
963 | struct proc_dir_entry *scsi_port_entry; | ||
964 | struct proc_dir_entry *scsi_transport_entry; | ||
965 | struct proc_dir_entry *scsi_tgt_dev_entry; | ||
966 | struct proc_dir_entry *scsi_tgt_port_entry; | ||
967 | struct proc_dir_entry *scsi_auth_intr_entry; | ||
968 | struct proc_dir_entry *scsi_att_intr_port_entry; | ||
969 | struct proc_dir_entry *scsi_lu_entry; | ||
970 | |||
971 | dir_entry = proc_mkdir("scsi_target/mib", NULL); | ||
972 | if (!(dir_entry)) { | ||
973 | printk(KERN_ERR "proc_mkdir() failed.\n"); | ||
974 | return -1; | ||
975 | } | ||
976 | |||
977 | scsi_inst_entry = | ||
978 | create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); | ||
979 | if (scsi_inst_entry) | ||
980 | scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; | ||
981 | else | ||
982 | goto error; | ||
983 | |||
984 | scsi_dev_entry = | ||
985 | create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); | ||
986 | if (scsi_dev_entry) | ||
987 | scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; | ||
988 | else | ||
989 | goto error; | ||
990 | |||
991 | scsi_port_entry = | ||
992 | create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); | ||
993 | if (scsi_port_entry) | ||
994 | scsi_port_entry->proc_fops = &scsi_port_seq_fops; | ||
995 | else | ||
996 | goto error; | ||
997 | |||
998 | scsi_transport_entry = | ||
999 | create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); | ||
1000 | if (scsi_transport_entry) | ||
1001 | scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; | ||
1002 | else | ||
1003 | goto error; | ||
1004 | |||
1005 | scsi_tgt_dev_entry = | ||
1006 | create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); | ||
1007 | if (scsi_tgt_dev_entry) | ||
1008 | scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; | ||
1009 | else | ||
1010 | goto error; | ||
1011 | |||
1012 | scsi_tgt_port_entry = | ||
1013 | create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); | ||
1014 | if (scsi_tgt_port_entry) | ||
1015 | scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; | ||
1016 | else | ||
1017 | goto error; | ||
1018 | |||
1019 | scsi_auth_intr_entry = | ||
1020 | create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); | ||
1021 | if (scsi_auth_intr_entry) | ||
1022 | scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; | ||
1023 | else | ||
1024 | goto error; | ||
1025 | |||
1026 | scsi_att_intr_port_entry = | ||
1027 | create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); | ||
1028 | if (scsi_att_intr_port_entry) | ||
1029 | scsi_att_intr_port_entry->proc_fops = | ||
1030 | &scsi_att_intr_port_seq_fops; | ||
1031 | else | ||
1032 | goto error; | ||
1033 | |||
1034 | scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); | ||
1035 | if (scsi_lu_entry) | ||
1036 | scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; | ||
1037 | else | ||
1038 | goto error; | ||
1039 | |||
1040 | return 0; | ||
1041 | |||
1042 | error: | ||
1043 | printk(KERN_ERR "create_proc_entry() failed.\n"); | ||
1044 | remove_scsi_target_mib(); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Initialize the index table for allocating unique row indexes to various mib | ||
1050 | * tables | ||
1051 | */ | ||
1052 | void init_scsi_index_table(void) | ||
1053 | { | ||
1054 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
1055 | spin_lock_init(&scsi_index_table.lock); | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * Allocate a new row index for the entry type specified | ||
1060 | */ | ||
1061 | u32 scsi_get_new_index(scsi_index_t type) | ||
1062 | { | ||
1063 | u32 new_index; | ||
1064 | |||
1065 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
1066 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
1067 | return -1; | ||
1068 | } | ||
1069 | |||
1070 | spin_lock(&scsi_index_table.lock); | ||
1071 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1072 | if (new_index == 0) | ||
1073 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1074 | spin_unlock(&scsi_index_table.lock); | ||
1075 | |||
1076 | return new_index; | ||
1077 | } | ||
1078 | EXPORT_SYMBOL(scsi_get_new_index); | ||
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h deleted file mode 100644 index 277204633850..000000000000 --- a/drivers/target/target_core_mib.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef TARGET_CORE_MIB_H | ||
2 | #define TARGET_CORE_MIB_H | ||
3 | |||
4 | typedef enum { | ||
5 | SCSI_INST_INDEX, | ||
6 | SCSI_DEVICE_INDEX, | ||
7 | SCSI_AUTH_INTR_INDEX, | ||
8 | SCSI_INDEX_TYPE_MAX | ||
9 | } scsi_index_t; | ||
10 | |||
11 | struct scsi_index_table { | ||
12 | spinlock_t lock; | ||
13 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
14 | } ____cacheline_aligned; | ||
15 | |||
16 | /* SCSI Port stats */ | ||
17 | struct scsi_port_stats { | ||
18 | u64 cmd_pdus; | ||
19 | u64 tx_data_octets; | ||
20 | u64 rx_data_octets; | ||
21 | } ____cacheline_aligned; | ||
22 | |||
23 | extern int init_scsi_target_mib(void); | ||
24 | extern void remove_scsi_target_mib(void); | ||
25 | extern void init_scsi_index_table(void); | ||
26 | extern u32 scsi_get_new_index(scsi_index_t); | ||
27 | |||
28 | #endif /*** TARGET_CORE_MIB_H ***/ | ||
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 742d24609a9b..f2a08477a68c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk( | |||
462 | */ | 462 | */ |
463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
465 | if (!(bd)) { | 465 | if (IS_ERR(bd)) { |
466 | printk("pSCSI: blkdev_get_by_path() failed\n"); | 466 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); |
467 | scsi_device_put(sd); | 467 | scsi_device_put(sd); |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index abfa81a57115..c26f67467623 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
275 | spin_lock_init(&acl->device_list_lock); | 275 | spin_lock_init(&acl->device_list_lock); |
276 | spin_lock_init(&acl->nacl_sess_lock); | 276 | spin_lock_init(&acl->nacl_sess_lock); |
277 | atomic_set(&acl->acl_pr_ref_count, 0); | 277 | atomic_set(&acl->acl_pr_ref_count, 0); |
278 | atomic_set(&acl->mib_ref_count, 0); | ||
279 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | 278 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); |
280 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 279 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
281 | acl->se_tpg = tpg; | 280 | acl->se_tpg = tpg; |
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
318 | cpu_relax(); | 317 | cpu_relax(); |
319 | } | 318 | } |
320 | 319 | ||
321 | void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) | ||
322 | { | ||
323 | while (atomic_read(&nacl->mib_ref_count) != 0) | ||
324 | cpu_relax(); | ||
325 | } | ||
326 | |||
327 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | 320 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) |
328 | { | 321 | { |
329 | int i, ret; | 322 | int i, ret; |
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl( | |||
480 | spin_unlock_bh(&tpg->session_lock); | 473 | spin_unlock_bh(&tpg->session_lock); |
481 | 474 | ||
482 | core_tpg_wait_for_nacl_pr_ref(acl); | 475 | core_tpg_wait_for_nacl_pr_ref(acl); |
483 | core_tpg_wait_for_mib_ref(acl); | ||
484 | core_clear_initiator_node_from_tpg(acl, tpg); | 476 | core_clear_initiator_node_from_tpg(acl, tpg); |
485 | core_free_device_list_for_node(acl, tpg); | 477 | core_free_device_list_for_node(acl, tpg); |
486 | 478 | ||
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register); | |||
701 | 693 | ||
702 | int core_tpg_deregister(struct se_portal_group *se_tpg) | 694 | int core_tpg_deregister(struct se_portal_group *se_tpg) |
703 | { | 695 | { |
696 | struct se_node_acl *nacl, *nacl_tmp; | ||
697 | |||
704 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 698 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" |
705 | " for endpoint: %s Portal Tag %u\n", | 699 | " for endpoint: %s Portal Tag %u\n", |
706 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 700 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
714 | 708 | ||
715 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 709 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
716 | cpu_relax(); | 710 | cpu_relax(); |
711 | /* | ||
712 | * Release any remaining demo-mode generated se_node_acl that have | ||
713 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | ||
714 | * in transport_deregister_session(). | ||
715 | */ | ||
716 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
717 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | ||
718 | acl_list) { | ||
719 | list_del(&nacl->acl_list); | ||
720 | se_tpg->num_node_acls--; | ||
721 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
722 | |||
723 | core_tpg_wait_for_nacl_pr_ref(nacl); | ||
724 | core_free_device_list_for_node(nacl, se_tpg); | ||
725 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); | ||
726 | |||
727 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
728 | } | ||
729 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
717 | 730 | ||
718 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 731 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
719 | core_tpg_release_virtual_lun0(se_tpg); | 732 | core_tpg_release_virtual_lun0(se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 28b6292ff298..236e22d8cfae 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -379,6 +379,40 @@ void release_se_global(void) | |||
379 | se_global = NULL; | 379 | se_global = NULL; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* SCSI statistics table index */ | ||
383 | static struct scsi_index_table scsi_index_table; | ||
384 | |||
385 | /* | ||
386 | * Initialize the index table for allocating unique row indexes to various mib | ||
387 | * tables. | ||
388 | */ | ||
389 | void init_scsi_index_table(void) | ||
390 | { | ||
391 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
392 | spin_lock_init(&scsi_index_table.lock); | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Allocate a new row index for the entry type specified | ||
397 | */ | ||
398 | u32 scsi_get_new_index(scsi_index_t type) | ||
399 | { | ||
400 | u32 new_index; | ||
401 | |||
402 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
403 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
404 | return -EINVAL; | ||
405 | } | ||
406 | |||
407 | spin_lock(&scsi_index_table.lock); | ||
408 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
409 | if (new_index == 0) | ||
410 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
411 | spin_unlock(&scsi_index_table.lock); | ||
412 | |||
413 | return new_index; | ||
414 | } | ||
415 | |||
382 | void transport_init_queue_obj(struct se_queue_obj *qobj) | 416 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
383 | { | 417 | { |
384 | atomic_set(&qobj->queue_cnt, 0); | 418 | atomic_set(&qobj->queue_cnt, 0); |
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void) | |||
437 | } | 471 | } |
438 | INIT_LIST_HEAD(&se_sess->sess_list); | 472 | INIT_LIST_HEAD(&se_sess->sess_list); |
439 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 473 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
440 | atomic_set(&se_sess->mib_ref_count, 0); | ||
441 | 474 | ||
442 | return se_sess; | 475 | return se_sess; |
443 | } | 476 | } |
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
546 | transport_free_session(se_sess); | 579 | transport_free_session(se_sess); |
547 | return; | 580 | return; |
548 | } | 581 | } |
549 | /* | ||
550 | * Wait for possible reference in drivers/target/target_core_mib.c: | ||
551 | * scsi_att_intr_port_seq_show() | ||
552 | */ | ||
553 | while (atomic_read(&se_sess->mib_ref_count) != 0) | ||
554 | cpu_relax(); | ||
555 | 582 | ||
556 | spin_lock_bh(&se_tpg->session_lock); | 583 | spin_lock_bh(&se_tpg->session_lock); |
557 | list_del(&se_sess->sess_list); | 584 | list_del(&se_sess->sess_list); |
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
574 | spin_unlock_bh(&se_tpg->acl_node_lock); | 601 | spin_unlock_bh(&se_tpg->acl_node_lock); |
575 | 602 | ||
576 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 603 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
577 | core_tpg_wait_for_mib_ref(se_nacl); | ||
578 | core_free_device_list_for_node(se_nacl, se_tpg); | 604 | core_free_device_list_for_node(se_nacl, se_tpg); |
579 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 605 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, |
580 | se_nacl); | 606 | se_nacl); |
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map( | |||
4827 | 4853 | ||
4828 | return ret; | 4854 | return ret; |
4829 | } | 4855 | } |
4856 | |||
4857 | BUG_ON(list_empty(se_mem_list)); | ||
4830 | /* | 4858 | /* |
4831 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | 4859 | * This is the normal path for all normal non BIDI and BIDI-COMMAND |
4832 | * WRITE payloads.. If we need to do BIDI READ passthrough for | 4860 | * WRITE payloads.. If we need to do BIDI READ passthrough for |
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5008 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 5036 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
5009 | u32 se_mem_cnt = 0, task_offset = 0; | 5037 | u32 se_mem_cnt = 0, task_offset = 0; |
5010 | 5038 | ||
5011 | BUG_ON(list_empty(cmd->t_task->t_mem_list)); | 5039 | if (!list_empty(T_TASK(cmd)->t_mem_list)) |
5040 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | ||
5041 | struct se_mem, se_list); | ||
5012 | 5042 | ||
5013 | ret = transport_do_se_mem_map(dev, task, | 5043 | ret = transport_do_se_mem_map(dev, task, |
5014 | cmd->t_task->t_mem_list, NULL, se_mem, | 5044 | cmd->t_task->t_mem_list, NULL, se_mem, |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07fdfb6b9a9a..0828b6c8610a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <scsi/scsi_cmnd.h> | 8 | #include <scsi/scsi_cmnd.h> |
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | #include "target_core_mib.h" | ||
12 | 11 | ||
13 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | 12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" |
14 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
@@ -195,6 +194,21 @@ typedef enum { | |||
195 | SAM_TASK_ATTR_EMULATED | 194 | SAM_TASK_ATTR_EMULATED |
196 | } t10_task_attr_index_t; | 195 | } t10_task_attr_index_t; |
197 | 196 | ||
197 | /* | ||
198 | * Used for target SCSI statistics | ||
199 | */ | ||
200 | typedef enum { | ||
201 | SCSI_INST_INDEX, | ||
202 | SCSI_DEVICE_INDEX, | ||
203 | SCSI_AUTH_INTR_INDEX, | ||
204 | SCSI_INDEX_TYPE_MAX | ||
205 | } scsi_index_t; | ||
206 | |||
207 | struct scsi_index_table { | ||
208 | spinlock_t lock; | ||
209 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
210 | } ____cacheline_aligned; | ||
211 | |||
198 | struct se_cmd; | 212 | struct se_cmd; |
199 | 213 | ||
200 | struct t10_alua { | 214 | struct t10_alua { |
@@ -578,8 +592,6 @@ struct se_node_acl { | |||
578 | spinlock_t stats_lock; | 592 | spinlock_t stats_lock; |
579 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 593 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
580 | atomic_t acl_pr_ref_count; | 594 | atomic_t acl_pr_ref_count; |
581 | /* Used for MIB access */ | ||
582 | atomic_t mib_ref_count; | ||
583 | struct se_dev_entry *device_list; | 595 | struct se_dev_entry *device_list; |
584 | struct se_session *nacl_sess; | 596 | struct se_session *nacl_sess; |
585 | struct se_portal_group *se_tpg; | 597 | struct se_portal_group *se_tpg; |
@@ -595,8 +607,6 @@ struct se_node_acl { | |||
595 | } ____cacheline_aligned; | 607 | } ____cacheline_aligned; |
596 | 608 | ||
597 | struct se_session { | 609 | struct se_session { |
598 | /* Used for MIB access */ | ||
599 | atomic_t mib_ref_count; | ||
600 | u64 sess_bin_isid; | 610 | u64 sess_bin_isid; |
601 | struct se_node_acl *se_node_acl; | 611 | struct se_node_acl *se_node_acl; |
602 | struct se_portal_group *se_tpg; | 612 | struct se_portal_group *se_tpg; |
@@ -806,7 +816,6 @@ struct se_hba { | |||
806 | /* Virtual iSCSI devices attached. */ | 816 | /* Virtual iSCSI devices attached. */ |
807 | u32 dev_count; | 817 | u32 dev_count; |
808 | u32 hba_index; | 818 | u32 hba_index; |
809 | atomic_t dev_mib_access_count; | ||
810 | atomic_t load_balance_queue; | 819 | atomic_t load_balance_queue; |
811 | atomic_t left_queue_depth; | 820 | atomic_t left_queue_depth; |
812 | /* Maximum queue depth the HBA can handle. */ | 821 | /* Maximum queue depth the HBA can handle. */ |
@@ -845,6 +854,12 @@ struct se_lun { | |||
845 | 854 | ||
846 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | 855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) |
847 | 856 | ||
857 | struct scsi_port_stats { | ||
858 | u64 cmd_pdus; | ||
859 | u64 tx_data_octets; | ||
860 | u64 rx_data_octets; | ||
861 | } ____cacheline_aligned; | ||
862 | |||
848 | struct se_port { | 863 | struct se_port { |
849 | /* RELATIVE TARGET PORT IDENTIFER */ | 864 | /* RELATIVE TARGET PORT IDENTIFER */ |
850 | u16 sep_rtpi; | 865 | u16 sep_rtpi; |
@@ -867,6 +882,7 @@ struct se_port { | |||
867 | } ____cacheline_aligned; | 882 | } ____cacheline_aligned; |
868 | 883 | ||
869 | struct se_tpg_np { | 884 | struct se_tpg_np { |
885 | struct se_portal_group *tpg_np_parent; | ||
870 | struct config_group tpg_np_group; | 886 | struct config_group tpg_np_group; |
871 | } ____cacheline_aligned; | 887 | } ____cacheline_aligned; |
872 | 888 | ||
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 66f44e56eb80..246940511579 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -111,6 +111,8 @@ struct se_subsystem_api; | |||
111 | 111 | ||
112 | extern int init_se_global(void); | 112 | extern int init_se_global(void); |
113 | extern void release_se_global(void); | 113 | extern void release_se_global(void); |
114 | extern void init_scsi_index_table(void); | ||
115 | extern u32 scsi_get_new_index(scsi_index_t); | ||
114 | extern void transport_init_queue_obj(struct se_queue_obj *); | 116 | extern void transport_init_queue_obj(struct se_queue_obj *); |
115 | extern int transport_subsystem_check_init(void); | 117 | extern int transport_subsystem_check_init(void); |
116 | extern int transport_subsystem_register(struct se_subsystem_api *); | 118 | extern int transport_subsystem_register(struct se_subsystem_api *); |