aboutsummaryrefslogtreecommitdiffstats
path: root/include/target
diff options
context:
space:
mode:
Diffstat (limited to 'include/target')
-rw-r--r--include/target/target_core_base.h228
-rw-r--r--include/target/target_core_device.h6
-rw-r--r--include/target/target_core_fabric_ops.h15
-rw-r--r--include/target/target_core_transport.h43
4 files changed, 110 insertions, 182 deletions
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 561ac99def5..27040653005 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -9,12 +9,13 @@
9#include <net/sock.h> 9#include <net/sock.h>
10#include <net/tcp.h> 10#include <net/tcp.h>
11 11
12#define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" 12#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
13#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) 13#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
14 14
15/* Used by transport_generic_allocate_iovecs() */ 15/* Used by transport_generic_allocate_iovecs() */
16#define TRANSPORT_IOV_DATA_BUFFER 5 16#define TRANSPORT_IOV_DATA_BUFFER 5
17/* Maximum Number of LUNs per Target Portal Group */ 17/* Maximum Number of LUNs per Target Portal Group */
18/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
18#define TRANSPORT_MAX_LUNS_PER_TPG 256 19#define TRANSPORT_MAX_LUNS_PER_TPG 256
19/* 20/*
20 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. 21 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
@@ -99,6 +100,7 @@ enum transport_state_table {
99 TRANSPORT_FREE = 15, 100 TRANSPORT_FREE = 15,
100 TRANSPORT_NEW_CMD_MAP = 16, 101 TRANSPORT_NEW_CMD_MAP = 16,
101 TRANSPORT_FREE_CMD_INTR = 17, 102 TRANSPORT_FREE_CMD_INTR = 17,
103 TRANSPORT_COMPLETE_QF_WP = 18,
102}; 104};
103 105
104/* Used for struct se_cmd->se_cmd_flags */ 106/* Used for struct se_cmd->se_cmd_flags */
@@ -108,27 +110,22 @@ enum se_cmd_flags_table {
108 SCF_EMULATED_TASK_SENSE = 0x00000004, 110 SCF_EMULATED_TASK_SENSE = 0x00000004,
109 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, 111 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
110 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, 112 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
111 SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
112 SCF_SCSI_NON_DATA_CDB = 0x00000040, 113 SCF_SCSI_NON_DATA_CDB = 0x00000040,
113 SCF_SCSI_CDB_EXCEPTION = 0x00000080, 114 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
114 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, 115 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
115 SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200,
116 SCF_SE_CMD_FAILED = 0x00000400, 116 SCF_SE_CMD_FAILED = 0x00000400,
117 SCF_SE_LUN_CMD = 0x00000800, 117 SCF_SE_LUN_CMD = 0x00000800,
118 SCF_SE_ALLOW_EOO = 0x00001000, 118 SCF_SE_ALLOW_EOO = 0x00001000,
119 SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000,
120 SCF_SENT_CHECK_CONDITION = 0x00004000, 119 SCF_SENT_CHECK_CONDITION = 0x00004000,
121 SCF_OVERFLOW_BIT = 0x00008000, 120 SCF_OVERFLOW_BIT = 0x00008000,
122 SCF_UNDERFLOW_BIT = 0x00010000, 121 SCF_UNDERFLOW_BIT = 0x00010000,
123 SCF_SENT_DELAYED_TAS = 0x00020000, 122 SCF_SENT_DELAYED_TAS = 0x00020000,
124 SCF_ALUA_NON_OPTIMIZED = 0x00040000, 123 SCF_ALUA_NON_OPTIMIZED = 0x00040000,
125 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, 124 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
126 SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, 125 SCF_UNUSED = 0x00100000,
127 SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
128 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, 126 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
129 SCF_EMULATE_SYNC_CACHE = 0x00800000,
130 SCF_EMULATE_CDB_ASYNC = 0x01000000, 127 SCF_EMULATE_CDB_ASYNC = 0x01000000,
131 SCF_EMULATE_SYNC_UNMAP = 0x02000000 128 SCF_EMULATE_QUEUE_FULL = 0x02000000,
132}; 129};
133 130
134/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 131/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -205,11 +202,6 @@ typedef enum {
205 SCSI_INDEX_TYPE_MAX 202 SCSI_INDEX_TYPE_MAX
206} scsi_index_t; 203} scsi_index_t;
207 204
208struct scsi_index_table {
209 spinlock_t lock;
210 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
211} ____cacheline_aligned;
212
213struct se_cmd; 205struct se_cmd;
214 206
215struct t10_alua { 207struct t10_alua {
@@ -235,7 +227,7 @@ struct t10_alua_lu_gp {
235 atomic_t lu_gp_ref_cnt; 227 atomic_t lu_gp_ref_cnt;
236 spinlock_t lu_gp_lock; 228 spinlock_t lu_gp_lock;
237 struct config_group lu_gp_group; 229 struct config_group lu_gp_group;
238 struct list_head lu_gp_list; 230 struct list_head lu_gp_node;
239 struct list_head lu_gp_mem_list; 231 struct list_head lu_gp_mem_list;
240} ____cacheline_aligned; 232} ____cacheline_aligned;
241 233
@@ -291,10 +283,10 @@ struct t10_vpd {
291} ____cacheline_aligned; 283} ____cacheline_aligned;
292 284
293struct t10_wwn { 285struct t10_wwn {
294 unsigned char vendor[8]; 286 char vendor[8];
295 unsigned char model[16]; 287 char model[16];
296 unsigned char revision[4]; 288 char revision[4];
297 unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 289 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
298 spinlock_t t10_vpd_lock; 290 spinlock_t t10_vpd_lock;
299 struct se_subsystem_dev *t10_sub_dev; 291 struct se_subsystem_dev *t10_sub_dev;
300 struct config_group t10_wwn_group; 292 struct config_group t10_wwn_group;
@@ -366,13 +358,13 @@ struct t10_reservation_ops {
366 int (*t10_pr_clear)(struct se_cmd *); 358 int (*t10_pr_clear)(struct se_cmd *);
367}; 359};
368 360
369struct t10_reservation_template { 361struct t10_reservation {
370 /* Reservation effects all target ports */ 362 /* Reservation effects all target ports */
371 int pr_all_tg_pt; 363 int pr_all_tg_pt;
372 /* Activate Persistence across Target Power Loss enabled 364 /* Activate Persistence across Target Power Loss enabled
373 * for SCSI device */ 365 * for SCSI device */
374 int pr_aptpl_active; 366 int pr_aptpl_active;
375 /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ 367 /* Used by struct t10_reservation->pr_aptpl_buf_len */
376#define PR_APTPL_BUF_LEN 8192 368#define PR_APTPL_BUF_LEN 8192
377 u32 pr_aptpl_buf_len; 369 u32 pr_aptpl_buf_len;
378 u32 pr_generation; 370 u32 pr_generation;
@@ -397,7 +389,7 @@ struct t10_reservation_template {
397 389
398struct se_queue_req { 390struct se_queue_req {
399 int state; 391 int state;
400 void *cmd; 392 struct se_cmd *cmd;
401 struct list_head qr_list; 393 struct list_head qr_list;
402} ____cacheline_aligned; 394} ____cacheline_aligned;
403 395
@@ -408,64 +400,10 @@ struct se_queue_obj {
408 wait_queue_head_t thread_wq; 400 wait_queue_head_t thread_wq;
409} ____cacheline_aligned; 401} ____cacheline_aligned;
410 402
411/*
412 * Used one per struct se_cmd to hold all extra struct se_task
413 * metadata. This structure is setup and allocated in
414 * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
415 */
416struct se_transport_task {
417 unsigned char *t_task_cdb;
418 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
419 unsigned long long t_task_lba;
420 int t_tasks_failed;
421 int t_tasks_fua;
422 bool t_tasks_bidi;
423 u32 t_task_cdbs;
424 u32 t_tasks_check;
425 u32 t_tasks_no;
426 u32 t_tasks_sectors;
427 u32 t_tasks_se_num;
428 u32 t_tasks_se_bidi_num;
429 u32 t_tasks_sg_chained_no;
430 atomic_t t_fe_count;
431 atomic_t t_se_count;
432 atomic_t t_task_cdbs_left;
433 atomic_t t_task_cdbs_ex_left;
434 atomic_t t_task_cdbs_timeout_left;
435 atomic_t t_task_cdbs_sent;
436 atomic_t t_transport_aborted;
437 atomic_t t_transport_active;
438 atomic_t t_transport_complete;
439 atomic_t t_transport_queue_active;
440 atomic_t t_transport_sent;
441 atomic_t t_transport_stop;
442 atomic_t t_transport_timeout;
443 atomic_t transport_dev_active;
444 atomic_t transport_lun_active;
445 atomic_t transport_lun_fe_stop;
446 atomic_t transport_lun_stop;
447 spinlock_t t_state_lock;
448 struct completion t_transport_stop_comp;
449 struct completion transport_lun_fe_stop_comp;
450 struct completion transport_lun_stop_comp;
451 struct scatterlist *t_tasks_sg_chained;
452 struct scatterlist t_tasks_sg_bounce;
453 void *t_task_buf;
454 /*
455 * Used for pre-registered fabric SGL passthrough WRITE and READ
456 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
457 * and other HW target mode fabric modules.
458 */
459 struct scatterlist *t_task_pt_sgl;
460 struct list_head *t_mem_list;
461 /* Used for BIDI READ */
462 struct list_head *t_mem_bidi_list;
463 struct list_head t_task_list;
464} ____cacheline_aligned;
465
466struct se_task { 403struct se_task {
467 unsigned char task_sense; 404 unsigned char task_sense;
468 struct scatterlist *task_sg; 405 struct scatterlist *task_sg;
406 u32 task_sg_nents;
469 struct scatterlist *task_sg_bidi; 407 struct scatterlist *task_sg_bidi;
470 u8 task_scsi_status; 408 u8 task_scsi_status;
471 u8 task_flags; 409 u8 task_flags;
@@ -476,8 +414,6 @@ struct se_task {
476 u32 task_no; 414 u32 task_no;
477 u32 task_sectors; 415 u32 task_sectors;
478 u32 task_size; 416 u32 task_size;
479 u32 task_sg_num;
480 u32 task_sg_offset;
481 enum dma_data_direction task_data_direction; 417 enum dma_data_direction task_data_direction;
482 struct se_cmd *task_se_cmd; 418 struct se_cmd *task_se_cmd;
483 struct se_device *se_dev; 419 struct se_device *se_dev;
@@ -495,9 +431,6 @@ struct se_task {
495 struct list_head t_state_list; 431 struct list_head t_state_list;
496} ____cacheline_aligned; 432} ____cacheline_aligned;
497 433
498#define TASK_CMD(task) ((task)->task_se_cmd)
499#define TASK_DEV(task) ((task)->se_dev)
500
501struct se_cmd { 434struct se_cmd {
502 /* SAM response code being sent to initiator */ 435 /* SAM response code being sent to initiator */
503 u8 scsi_status; 436 u8 scsi_status;
@@ -531,9 +464,10 @@ struct se_cmd {
531 atomic_t transport_sent; 464 atomic_t transport_sent;
532 /* Used for sense data */ 465 /* Used for sense data */
533 void *sense_buffer; 466 void *sense_buffer;
534 struct list_head se_delayed_list; 467 struct list_head se_delayed_node;
535 struct list_head se_ordered_list; 468 struct list_head se_ordered_node;
536 struct list_head se_lun_list; 469 struct list_head se_lun_node;
470 struct list_head se_qf_node;
537 struct se_device *se_dev; 471 struct se_device *se_dev;
538 struct se_dev_entry *se_deve; 472 struct se_dev_entry *se_deve;
539 struct se_device *se_obj_ptr; 473 struct se_device *se_obj_ptr;
@@ -542,18 +476,62 @@ struct se_cmd {
542 /* Only used for internal passthrough and legacy TCM fabric modules */ 476 /* Only used for internal passthrough and legacy TCM fabric modules */
543 struct se_session *se_sess; 477 struct se_session *se_sess;
544 struct se_tmr_req *se_tmr_req; 478 struct se_tmr_req *se_tmr_req;
545 /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ 479 struct list_head se_queue_node;
546 struct se_transport_task *t_task;
547 struct se_transport_task t_task_backstore;
548 struct target_core_fabric_ops *se_tfo; 480 struct target_core_fabric_ops *se_tfo;
549 int (*transport_emulate_cdb)(struct se_cmd *); 481 int (*transport_emulate_cdb)(struct se_cmd *);
550 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); 482 void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
551 void (*transport_wait_for_tasks)(struct se_cmd *, int, int); 483 void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
552 void (*transport_complete_callback)(struct se_cmd *); 484 void (*transport_complete_callback)(struct se_cmd *);
553} ____cacheline_aligned; 485 int (*transport_qf_callback)(struct se_cmd *);
554 486
555#define T_TASK(cmd) ((cmd)->t_task) 487 unsigned char *t_task_cdb;
556#define CMD_TFO(cmd) ((cmd)->se_tfo) 488 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
489 unsigned long long t_task_lba;
490 int t_tasks_failed;
491 int t_tasks_fua;
492 bool t_tasks_bidi;
493 u32 t_tasks_sg_chained_no;
494 atomic_t t_fe_count;
495 atomic_t t_se_count;
496 atomic_t t_task_cdbs_left;
497 atomic_t t_task_cdbs_ex_left;
498 atomic_t t_task_cdbs_timeout_left;
499 atomic_t t_task_cdbs_sent;
500 atomic_t t_transport_aborted;
501 atomic_t t_transport_active;
502 atomic_t t_transport_complete;
503 atomic_t t_transport_queue_active;
504 atomic_t t_transport_sent;
505 atomic_t t_transport_stop;
506 atomic_t t_transport_timeout;
507 atomic_t transport_dev_active;
508 atomic_t transport_lun_active;
509 atomic_t transport_lun_fe_stop;
510 atomic_t transport_lun_stop;
511 spinlock_t t_state_lock;
512 struct completion t_transport_stop_comp;
513 struct completion transport_lun_fe_stop_comp;
514 struct completion transport_lun_stop_comp;
515 struct scatterlist *t_tasks_sg_chained;
516
517 /*
518 * Used for pre-registered fabric SGL passthrough WRITE and READ
519 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
520 * and other HW target mode fabric modules.
521 */
522 struct scatterlist *t_task_pt_sgl;
523 u32 t_task_pt_sgl_num;
524
525 struct scatterlist *t_data_sg;
526 unsigned int t_data_nents;
527 struct scatterlist *t_bidi_data_sg;
528 unsigned int t_bidi_data_nents;
529
530 /* Used for BIDI READ */
531 struct list_head t_task_list;
532 u32 t_task_list_num;
533
534} ____cacheline_aligned;
557 535
558struct se_tmr_req { 536struct se_tmr_req {
559 /* Task Management function to be preformed */ 537 /* Task Management function to be preformed */
@@ -617,9 +595,6 @@ struct se_session {
617 struct list_head sess_acl_list; 595 struct list_head sess_acl_list;
618} ____cacheline_aligned; 596} ____cacheline_aligned;
619 597
620#define SE_SESS(cmd) ((cmd)->se_sess)
621#define SE_NODE_ACL(sess) ((sess)->se_node_acl)
622
623struct se_device; 598struct se_device;
624struct se_transform_info; 599struct se_transform_info;
625struct scatterlist; 600struct scatterlist;
@@ -640,8 +615,6 @@ struct se_lun_acl {
640 struct se_ml_stat_grps ml_stat_grps; 615 struct se_ml_stat_grps ml_stat_grps;
641} ____cacheline_aligned; 616} ____cacheline_aligned;
642 617
643#define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps)
644
645struct se_dev_entry { 618struct se_dev_entry {
646 bool def_pr_registered; 619 bool def_pr_registered;
647 /* See transport_lunflags_table */ 620 /* See transport_lunflags_table */
@@ -688,6 +661,8 @@ struct se_dev_attrib {
688 int emulate_reservations; 661 int emulate_reservations;
689 int emulate_alua; 662 int emulate_alua;
690 int enforce_pr_isids; 663 int enforce_pr_isids;
664 int is_nonrot;
665 int emulate_rest_reord;
691 u32 hw_block_size; 666 u32 hw_block_size;
692 u32 block_size; 667 u32 block_size;
693 u32 hw_max_sectors; 668 u32 hw_max_sectors;
@@ -727,10 +702,10 @@ struct se_subsystem_dev {
727 /* T10 Inquiry and VPD WWN Information */ 702 /* T10 Inquiry and VPD WWN Information */
728 struct t10_wwn t10_wwn; 703 struct t10_wwn t10_wwn;
729 /* T10 SPC-2 + SPC-3 Reservations */ 704 /* T10 SPC-2 + SPC-3 Reservations */
730 struct t10_reservation_template t10_reservation; 705 struct t10_reservation t10_pr;
731 spinlock_t se_dev_lock; 706 spinlock_t se_dev_lock;
732 void *se_dev_su_ptr; 707 void *se_dev_su_ptr;
733 struct list_head g_se_dev_list; 708 struct list_head se_dev_node;
734 struct config_group se_dev_group; 709 struct config_group se_dev_group;
735 /* For T10 Reservations */ 710 /* For T10 Reservations */
736 struct config_group se_dev_pr_group; 711 struct config_group se_dev_pr_group;
@@ -738,11 +713,6 @@ struct se_subsystem_dev {
738 struct se_dev_stat_grps dev_stat_grps; 713 struct se_dev_stat_grps dev_stat_grps;
739} ____cacheline_aligned; 714} ____cacheline_aligned;
740 715
741#define T10_ALUA(su_dev) (&(su_dev)->t10_alua)
742#define T10_RES(su_dev) (&(su_dev)->t10_reservation)
743#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops)
744#define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps)
745
746struct se_device { 716struct se_device {
747 /* Set to 1 if thread is NOT sleeping on thread_sem */ 717 /* Set to 1 if thread is NOT sleeping on thread_sem */
748 u8 thread_active; 718 u8 thread_active;
@@ -780,11 +750,11 @@ struct se_device {
780 atomic_t dev_status_thr_count; 750 atomic_t dev_status_thr_count;
781 atomic_t dev_hoq_count; 751 atomic_t dev_hoq_count;
782 atomic_t dev_ordered_sync; 752 atomic_t dev_ordered_sync;
753 atomic_t dev_qf_count;
783 struct se_obj dev_obj; 754 struct se_obj dev_obj;
784 struct se_obj dev_access_obj; 755 struct se_obj dev_access_obj;
785 struct se_obj dev_export_obj; 756 struct se_obj dev_export_obj;
786 struct se_queue_obj *dev_queue_obj; 757 struct se_queue_obj dev_queue_obj;
787 struct se_queue_obj *dev_status_queue_obj;
788 spinlock_t delayed_cmd_lock; 758 spinlock_t delayed_cmd_lock;
789 spinlock_t ordered_cmd_lock; 759 spinlock_t ordered_cmd_lock;
790 spinlock_t execute_task_lock; 760 spinlock_t execute_task_lock;
@@ -796,6 +766,7 @@ struct se_device {
796 spinlock_t dev_status_thr_lock; 766 spinlock_t dev_status_thr_lock;
797 spinlock_t se_port_lock; 767 spinlock_t se_port_lock;
798 spinlock_t se_tmr_lock; 768 spinlock_t se_tmr_lock;
769 spinlock_t qf_cmd_lock;
799 /* Used for legacy SPC-2 reservationsa */ 770 /* Used for legacy SPC-2 reservationsa */
800 struct se_node_acl *dev_reserved_node_acl; 771 struct se_node_acl *dev_reserved_node_acl;
801 /* Used for ALUA Logical Unit Group membership */ 772 /* Used for ALUA Logical Unit Group membership */
@@ -809,10 +780,12 @@ struct se_device {
809 struct task_struct *process_thread; 780 struct task_struct *process_thread;
810 pid_t process_thread_pid; 781 pid_t process_thread_pid;
811 struct task_struct *dev_mgmt_thread; 782 struct task_struct *dev_mgmt_thread;
783 struct work_struct qf_work_queue;
812 struct list_head delayed_cmd_list; 784 struct list_head delayed_cmd_list;
813 struct list_head ordered_cmd_list; 785 struct list_head ordered_cmd_list;
814 struct list_head execute_task_list; 786 struct list_head execute_task_list;
815 struct list_head state_task_list; 787 struct list_head state_task_list;
788 struct list_head qf_cmd_list;
816 /* Pointer to associated SE HBA */ 789 /* Pointer to associated SE HBA */
817 struct se_hba *se_hba; 790 struct se_hba *se_hba;
818 struct se_subsystem_dev *se_sub_dev; 791 struct se_subsystem_dev *se_sub_dev;
@@ -824,11 +797,6 @@ struct se_device {
824 struct list_head g_se_dev_list; 797 struct list_head g_se_dev_list;
825} ____cacheline_aligned; 798} ____cacheline_aligned;
826 799
827#define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev)
828#define SU_DEV(dev) ((dev)->se_sub_dev)
829#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib)
830#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn)
831
832struct se_hba { 800struct se_hba {
833 u16 hba_tpgt; 801 u16 hba_tpgt;
834 u32 hba_id; 802 u32 hba_id;
@@ -837,24 +805,17 @@ struct se_hba {
837 /* Virtual iSCSI devices attached. */ 805 /* Virtual iSCSI devices attached. */
838 u32 dev_count; 806 u32 dev_count;
839 u32 hba_index; 807 u32 hba_index;
840 atomic_t load_balance_queue;
841 atomic_t left_queue_depth;
842 /* Maximum queue depth the HBA can handle. */
843 atomic_t max_queue_depth;
844 /* Pointer to transport specific host structure. */ 808 /* Pointer to transport specific host structure. */
845 void *hba_ptr; 809 void *hba_ptr;
846 /* Linked list for struct se_device */ 810 /* Linked list for struct se_device */
847 struct list_head hba_dev_list; 811 struct list_head hba_dev_list;
848 struct list_head hba_list; 812 struct list_head hba_node;
849 spinlock_t device_lock; 813 spinlock_t device_lock;
850 spinlock_t hba_queue_lock;
851 struct config_group hba_group; 814 struct config_group hba_group;
852 struct mutex hba_access_mutex; 815 struct mutex hba_access_mutex;
853 struct se_subsystem_api *transport; 816 struct se_subsystem_api *transport;
854} ____cacheline_aligned; 817} ____cacheline_aligned;
855 818
856#define SE_HBA(dev) ((dev)->se_hba)
857
858struct se_port_stat_grps { 819struct se_port_stat_grps {
859 struct config_group stat_group; 820 struct config_group stat_group;
860 struct config_group scsi_port_group; 821 struct config_group scsi_port_group;
@@ -881,9 +842,6 @@ struct se_lun {
881 struct se_port_stat_grps port_stat_grps; 842 struct se_port_stat_grps port_stat_grps;
882} ____cacheline_aligned; 843} ____cacheline_aligned;
883 844
884#define SE_LUN(cmd) ((cmd)->se_lun)
885#define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps)
886
887struct scsi_port_stats { 845struct scsi_port_stats {
888 u64 cmd_pdus; 846 u64 cmd_pdus;
889 u64 tx_data_octets; 847 u64 tx_data_octets;
@@ -930,7 +888,7 @@ struct se_portal_group {
930 spinlock_t tpg_lun_lock; 888 spinlock_t tpg_lun_lock;
931 /* Pointer to $FABRIC_MOD portal group */ 889 /* Pointer to $FABRIC_MOD portal group */
932 void *se_tpg_fabric_ptr; 890 void *se_tpg_fabric_ptr;
933 struct list_head se_tpg_list; 891 struct list_head se_tpg_node;
934 /* linked list for initiator ACL list */ 892 /* linked list for initiator ACL list */
935 struct list_head acl_node_list; 893 struct list_head acl_node_list;
936 struct se_lun *tpg_lun_list; 894 struct se_lun *tpg_lun_list;
@@ -949,8 +907,6 @@ struct se_portal_group {
949 struct config_group tpg_param_group; 907 struct config_group tpg_param_group;
950} ____cacheline_aligned; 908} ____cacheline_aligned;
951 909
952#define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo)
953
954struct se_wwn { 910struct se_wwn {
955 struct target_fabric_configfs *wwn_tf; 911 struct target_fabric_configfs *wwn_tf;
956 struct config_group wwn_group; 912 struct config_group wwn_group;
@@ -958,28 +914,4 @@ struct se_wwn {
958 struct config_group fabric_stat_group; 914 struct config_group fabric_stat_group;
959} ____cacheline_aligned; 915} ____cacheline_aligned;
960 916
961struct se_global {
962 u16 alua_lu_gps_counter;
963 int g_sub_api_initialized;
964 u32 in_shutdown;
965 u32 alua_lu_gps_count;
966 u32 g_hba_id_counter;
967 struct config_group target_core_hbagroup;
968 struct config_group alua_group;
969 struct config_group alua_lu_gps_group;
970 struct list_head g_lu_gps_list;
971 struct list_head g_se_tpg_list;
972 struct list_head g_hba_list;
973 struct list_head g_se_dev_list;
974 struct se_hba *g_lun0_hba;
975 struct se_subsystem_dev *g_lun0_su_dev;
976 struct se_device *g_lun0_dev;
977 struct t10_alua_lu_gp *default_lu_gp;
978 spinlock_t g_device_lock;
979 spinlock_t hba_lock;
980 spinlock_t se_tpg_lock;
981 spinlock_t lu_gps_lock;
982 spinlock_t plugin_class_lock;
983} ____cacheline_aligned;
984
985#endif /* TARGET_CORE_BASE_H */ 917#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
index 52b18a5752c..46571912086 100644
--- a/include/target/target_core_device.h
+++ b/include/target/target_core_device.h
@@ -1,8 +1,8 @@
1#ifndef TARGET_CORE_DEVICE_H 1#ifndef TARGET_CORE_DEVICE_H
2#define TARGET_CORE_DEVICE_H 2#define TARGET_CORE_DEVICE_H
3 3
4extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); 4extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
5extern int transport_get_lun_for_tmr(struct se_cmd *, u32); 5extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
6extern struct se_dev_entry *core_get_se_deve_from_rtpi( 6extern struct se_dev_entry *core_get_se_deve_from_rtpi(
7 struct se_node_acl *, u16); 7 struct se_node_acl *, u16);
8extern int core_free_device_list_for_node(struct se_node_acl *, 8extern int core_free_device_list_for_node(struct se_node_acl *,
@@ -39,6 +39,8 @@ extern int se_dev_set_emulate_tas(struct se_device *, int);
39extern int se_dev_set_emulate_tpu(struct se_device *, int); 39extern int se_dev_set_emulate_tpu(struct se_device *, int);
40extern int se_dev_set_emulate_tpws(struct se_device *, int); 40extern int se_dev_set_emulate_tpws(struct se_device *, int);
41extern int se_dev_set_enforce_pr_isids(struct se_device *, int); 41extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
42extern int se_dev_set_is_nonrot(struct se_device *, int);
43extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
42extern int se_dev_set_queue_depth(struct se_device *, u32); 44extern int se_dev_set_queue_depth(struct se_device *, u32);
43extern int se_dev_set_max_sectors(struct se_device *, u32); 45extern int se_dev_set_max_sectors(struct se_device *, u32);
44extern int se_dev_set_optimal_sectors(struct se_device *, u32); 46extern int se_dev_set_optimal_sectors(struct se_device *, u32);
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index 747e1404dca..126c675f4f1 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -27,6 +27,12 @@ struct target_core_fabric_ops {
27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *); 27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); 28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); 29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
30 /*
31 * Optionally used by fabrics to allow demo-mode login, but not
32 * expose any TPG LUNs, and return 'not connected' in standard
33 * inquiry response
34 */
35 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
30 struct se_node_acl *(*tpg_alloc_fabric_acl)( 36 struct se_node_acl *(*tpg_alloc_fabric_acl)(
31 struct se_portal_group *); 37 struct se_portal_group *);
32 void (*tpg_release_fabric_acl)(struct se_portal_group *, 38 void (*tpg_release_fabric_acl)(struct se_portal_group *,
@@ -39,17 +45,11 @@ struct target_core_fabric_ops {
39 */ 45 */
40 int (*new_cmd_map)(struct se_cmd *); 46 int (*new_cmd_map)(struct se_cmd *);
41 /* 47 /*
42 * Optional function pointer for TCM fabric modules that use
43 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
44 */
45 int (*alloc_cmd_iovecs)(struct se_cmd *);
46 /*
47 * Optional to release struct se_cmd and fabric dependent allocated 48 * Optional to release struct se_cmd and fabric dependent allocated
48 * I/O descriptor in transport_cmd_check_stop() 49 * I/O descriptor in transport_cmd_check_stop()
49 */ 50 */
50 void (*check_stop_free)(struct se_cmd *); 51 void (*check_stop_free)(struct se_cmd *);
51 void (*release_cmd_to_pool)(struct se_cmd *); 52 void (*release_cmd)(struct se_cmd *);
52 void (*release_cmd_direct)(struct se_cmd *);
53 /* 53 /*
54 * Called with spin_lock_bh(struct se_portal_group->session_lock held. 54 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
55 */ 55 */
@@ -70,7 +70,6 @@ struct target_core_fabric_ops {
70 void (*set_default_node_attributes)(struct se_node_acl *); 70 void (*set_default_node_attributes)(struct se_node_acl *);
71 u32 (*get_task_tag)(struct se_cmd *); 71 u32 (*get_task_tag)(struct se_cmd *);
72 int (*get_cmd_state)(struct se_cmd *); 72 int (*get_cmd_state)(struct se_cmd *);
73 void (*new_cmd_failure)(struct se_cmd *);
74 int (*queue_data_in)(struct se_cmd *); 73 int (*queue_data_in)(struct se_cmd *);
75 int (*queue_status)(struct se_cmd *); 74 int (*queue_status)(struct se_cmd *);
76 int (*queue_tm_rsp)(struct se_cmd *); 75 int (*queue_tm_rsp)(struct se_cmd *);
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 24a1c6cb83c..46aae4f94ed 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -101,6 +101,10 @@
101#define DA_ENFORCE_PR_ISIDS 1 101#define DA_ENFORCE_PR_ISIDS 1
102#define DA_STATUS_MAX_SECTORS_MIN 16 102#define DA_STATUS_MAX_SECTORS_MIN 16
103#define DA_STATUS_MAX_SECTORS_MAX 8192 103#define DA_STATUS_MAX_SECTORS_MAX 8192
104/* By default don't report non-rotating (solid state) medium */
105#define DA_IS_NONROT 0
106/* Queue Algorithm Modifier default for restricted reordering in control mode page */
107#define DA_EMULATE_REST_REORD 0
104 108
105#define SE_MODE_PAGE_BUF 512 109#define SE_MODE_PAGE_BUF 512
106 110
@@ -111,9 +115,8 @@ struct se_subsystem_api;
111 115
112extern struct kmem_cache *se_mem_cache; 116extern struct kmem_cache *se_mem_cache;
113 117
114extern int init_se_global(void); 118extern int init_se_kmem_caches(void);
115extern void release_se_global(void); 119extern void release_se_kmem_caches(void);
116extern void init_scsi_index_table(void);
117extern u32 scsi_get_new_index(scsi_index_t); 120extern u32 scsi_get_new_index(scsi_index_t);
118extern void transport_init_queue_obj(struct se_queue_obj *); 121extern void transport_init_queue_obj(struct se_queue_obj *);
119extern int transport_subsystem_check_init(void); 122extern int transport_subsystem_check_init(void);
@@ -160,36 +163,38 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
160 struct se_subsystem_dev *, u32, 163 struct se_subsystem_dev *, u32,
161 void *, struct se_dev_limits *, 164 void *, struct se_dev_limits *,
162 const char *, const char *); 165 const char *, const char *);
163extern void transport_device_setup_cmd(struct se_cmd *);
164extern void transport_init_se_cmd(struct se_cmd *, 166extern void transport_init_se_cmd(struct se_cmd *,
165 struct target_core_fabric_ops *, 167 struct target_core_fabric_ops *,
166 struct se_session *, u32, int, int, 168 struct se_session *, u32, int, int,
167 unsigned char *); 169 unsigned char *);
170void *transport_kmap_first_data_page(struct se_cmd *cmd);
171void transport_kunmap_first_data_page(struct se_cmd *cmd);
168extern void transport_free_se_cmd(struct se_cmd *); 172extern void transport_free_se_cmd(struct se_cmd *);
169extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); 173extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
170extern int transport_generic_handle_cdb(struct se_cmd *); 174extern int transport_generic_handle_cdb(struct se_cmd *);
175extern int transport_handle_cdb_direct(struct se_cmd *);
171extern int transport_generic_handle_cdb_map(struct se_cmd *); 176extern int transport_generic_handle_cdb_map(struct se_cmd *);
172extern int transport_generic_handle_data(struct se_cmd *); 177extern int transport_generic_handle_data(struct se_cmd *);
173extern void transport_new_cmd_failure(struct se_cmd *); 178extern void transport_new_cmd_failure(struct se_cmd *);
174extern int transport_generic_handle_tmr(struct se_cmd *); 179extern int transport_generic_handle_tmr(struct se_cmd *);
175extern void transport_generic_free_cmd_intr(struct se_cmd *); 180extern void transport_generic_free_cmd_intr(struct se_cmd *);
176extern void __transport_stop_task_timer(struct se_task *, unsigned long *); 181extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
177extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
178extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, 182extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
179 struct scatterlist *, u32); 183 struct scatterlist *, u32);
180extern int transport_clear_lun_from_sessions(struct se_lun *); 184extern int transport_clear_lun_from_sessions(struct se_lun *);
181extern int transport_check_aborted_status(struct se_cmd *, int); 185extern int transport_check_aborted_status(struct se_cmd *, int);
182extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); 186extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
183extern void transport_send_task_abort(struct se_cmd *); 187extern void transport_send_task_abort(struct se_cmd *);
184extern void transport_release_cmd_to_pool(struct se_cmd *); 188extern void transport_release_cmd(struct se_cmd *);
185extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); 189extern void transport_generic_free_cmd(struct se_cmd *, int, int);
186extern void transport_generic_wait_for_cmds(struct se_cmd *, int); 190extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
187extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); 191extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
188extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, 192extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
189 void *, struct se_mem *, 193 struct scatterlist *, struct se_mem *,
190 struct se_mem **, u32 *, u32 *); 194 struct se_mem **, u32 *, u32 *);
191extern void transport_do_task_sg_chain(struct se_cmd *); 195extern void transport_do_task_sg_chain(struct se_cmd *);
192extern void transport_generic_process_write(struct se_cmd *); 196extern void transport_generic_process_write(struct se_cmd *);
197extern int transport_generic_new_cmd(struct se_cmd *);
193extern int transport_generic_do_tmr(struct se_cmd *); 198extern int transport_generic_do_tmr(struct se_cmd *);
194/* From target_core_alua.c */ 199/* From target_core_alua.c */
195extern int core_alua_check_nonop_delay(struct se_cmd *); 200extern int core_alua_check_nonop_delay(struct se_cmd *);
@@ -235,13 +240,13 @@ struct se_subsystem_api {
235 */ 240 */
236 int (*cdb_none)(struct se_task *); 241 int (*cdb_none)(struct se_task *);
237 /* 242 /*
238 * For SCF_SCSI_CONTROL_NONSG_IO_CDB 243 * For SCF_SCSI_DATA_SG_IO_CDB
239 */ 244 */
240 int (*map_task_non_SG)(struct se_task *); 245 int (*map_data_SG)(struct se_task *);
241 /* 246 /*
242 * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB 247 * For SCF_SCSI_CONTROL_SG_IO_CDB
243 */ 248 */
244 int (*map_task_SG)(struct se_task *); 249 int (*map_control_SG)(struct se_task *);
245 /* 250 /*
246 * attach_hba(): 251 * attach_hba():
247 */ 252 */
@@ -292,7 +297,7 @@ struct se_subsystem_api {
292 * drivers. Provided out of convenience. 297 * drivers. Provided out of convenience.
293 */ 298 */
294 int (*transport_complete)(struct se_task *task); 299 int (*transport_complete)(struct se_task *task);
295 struct se_task *(*alloc_task)(struct se_cmd *); 300 struct se_task *(*alloc_task)(unsigned char *cdb);
296 /* 301 /*
297 * do_task(): 302 * do_task():
298 */ 303 */
@@ -342,19 +347,9 @@ struct se_subsystem_api {
342 */ 347 */
343 sector_t (*get_blocks)(struct se_device *); 348 sector_t (*get_blocks)(struct se_device *);
344 /* 349 /*
345 * do_se_mem_map():
346 */
347 int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
348 struct se_mem *, struct se_mem **, u32 *, u32 *);
349 /*
350 * get_sense_buffer(): 350 * get_sense_buffer():
351 */ 351 */
352 unsigned char *(*get_sense_buffer)(struct se_task *); 352 unsigned char *(*get_sense_buffer)(struct se_task *);
353} ____cacheline_aligned; 353} ____cacheline_aligned;
354 354
355#define TRANSPORT(dev) ((dev)->transport)
356#define HBA_TRANSPORT(hba) ((hba)->transport)
357
358extern struct se_global *se_global;
359
360#endif /* TARGET_CORE_TRANSPORT_H */ 355#endif /* TARGET_CORE_TRANSPORT_H */