diff options
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 5 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 995 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_cmd.c | 25 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_io.c | 58 |
6 files changed, 307 insertions, 780 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index fe11a336b598..99603bc45786 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -175,10 +175,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | |||
175 | sgl_bidi_count = sdb->table.nents; | 175 | sgl_bidi_count = sdb->table.nents; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | /* Tell the core about our preallocated memory */ |
179 | * Map the SG memory into struct se_mem->page linked list using the same | ||
180 | * physical memory at sg->page_link. | ||
181 | */ | ||
182 | ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), | 179 | ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), |
183 | scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); | 180 | scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); |
184 | if (ret < 0) | 181 | if (ret < 0) |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 392e75fb1087..164b72106b88 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -634,7 +634,7 @@ static int iblock_map_task_SG(struct se_task *task) | |||
634 | hbio = tbio = bio; | 634 | hbio = tbio = bio; |
635 | /* | 635 | /* |
636 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist | 636 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist |
637 | * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. | 637 | * from task->task_sg -> struct scatterlist memory. |
638 | */ | 638 | */ |
639 | for_each_sg(task->task_sg, sg, task->task_sg_num, i) { | 639 | for_each_sg(task->task_sg, sg, task->task_sg_num, i) { |
640 | DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" | 640 | DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index d9569242e3dc..318ef14fe37d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -1097,7 +1097,7 @@ static int __pscsi_map_task_SG( | |||
1097 | return 0; | 1097 | return 0; |
1098 | /* | 1098 | /* |
1099 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup | 1099 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup |
1100 | * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> | 1100 | * the bio_vec maplist from task->task_sg -> |
1101 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs | 1101 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs |
1102 | * to be attached to struct bios for submission to Linux/SCSI using | 1102 | * to be attached to struct bios for submission to Linux/SCSI using |
1103 | * struct request to struct scsi_device->request_queue. | 1103 | * struct request to struct scsi_device->request_queue. |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b499d14f4637..c743d94baf77 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -190,7 +190,6 @@ static struct kmem_cache *se_cmd_cache; | |||
190 | static struct kmem_cache *se_sess_cache; | 190 | static struct kmem_cache *se_sess_cache; |
191 | struct kmem_cache *se_tmr_req_cache; | 191 | struct kmem_cache *se_tmr_req_cache; |
192 | struct kmem_cache *se_ua_cache; | 192 | struct kmem_cache *se_ua_cache; |
193 | struct kmem_cache *se_mem_cache; | ||
194 | struct kmem_cache *t10_pr_reg_cache; | 193 | struct kmem_cache *t10_pr_reg_cache; |
195 | struct kmem_cache *t10_alua_lu_gp_cache; | 194 | struct kmem_cache *t10_alua_lu_gp_cache; |
196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | 195 | struct kmem_cache *t10_alua_lu_gp_mem_cache; |
@@ -210,17 +209,12 @@ static void transport_handle_queue_full(struct se_cmd *cmd, | |||
210 | static void transport_direct_request_timeout(struct se_cmd *cmd); | 209 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
211 | static void transport_free_dev_tasks(struct se_cmd *cmd); | 210 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
212 | static u32 transport_allocate_tasks(struct se_cmd *cmd, | 211 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
213 | unsigned long long starting_lba, u32 sectors, | 212 | unsigned long long starting_lba, |
214 | enum dma_data_direction data_direction, | 213 | enum dma_data_direction data_direction, |
215 | struct list_head *mem_list, int set_counts); | 214 | struct scatterlist *sgl, unsigned int nents); |
216 | static int transport_generic_get_mem(struct se_cmd *cmd); | 215 | static int transport_generic_get_mem(struct se_cmd *cmd); |
217 | static int transport_generic_remove(struct se_cmd *cmd, | 216 | static int transport_generic_remove(struct se_cmd *cmd, |
218 | int session_reinstatement); | 217 | int session_reinstatement); |
219 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); | ||
220 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | ||
221 | struct list_head *se_mem_list, struct scatterlist *sgl); | ||
222 | static void transport_memcpy_se_mem_read_contig(unsigned char *dst, | ||
223 | struct list_head *se_mem_list, u32 len); | ||
224 | static void transport_release_fe_cmd(struct se_cmd *cmd); | 218 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
225 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 219 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
226 | struct se_queue_obj *qobj); | 220 | struct se_queue_obj *qobj); |
@@ -258,12 +252,6 @@ int init_se_kmem_caches(void) | |||
258 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | 252 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); |
259 | goto out; | 253 | goto out; |
260 | } | 254 | } |
261 | se_mem_cache = kmem_cache_create("se_mem_cache", | ||
262 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | ||
263 | if (!(se_mem_cache)) { | ||
264 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | ||
265 | goto out; | ||
266 | } | ||
267 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | 255 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
268 | sizeof(struct t10_pr_registration), | 256 | sizeof(struct t10_pr_registration), |
269 | __alignof__(struct t10_pr_registration), 0, NULL); | 257 | __alignof__(struct t10_pr_registration), 0, NULL); |
@@ -317,8 +305,6 @@ out: | |||
317 | kmem_cache_destroy(se_sess_cache); | 305 | kmem_cache_destroy(se_sess_cache); |
318 | if (se_ua_cache) | 306 | if (se_ua_cache) |
319 | kmem_cache_destroy(se_ua_cache); | 307 | kmem_cache_destroy(se_ua_cache); |
320 | if (se_mem_cache) | ||
321 | kmem_cache_destroy(se_mem_cache); | ||
322 | if (t10_pr_reg_cache) | 308 | if (t10_pr_reg_cache) |
323 | kmem_cache_destroy(t10_pr_reg_cache); | 309 | kmem_cache_destroy(t10_pr_reg_cache); |
324 | if (t10_alua_lu_gp_cache) | 310 | if (t10_alua_lu_gp_cache) |
@@ -338,7 +324,6 @@ void release_se_kmem_caches(void) | |||
338 | kmem_cache_destroy(se_tmr_req_cache); | 324 | kmem_cache_destroy(se_tmr_req_cache); |
339 | kmem_cache_destroy(se_sess_cache); | 325 | kmem_cache_destroy(se_sess_cache); |
340 | kmem_cache_destroy(se_ua_cache); | 326 | kmem_cache_destroy(se_ua_cache); |
341 | kmem_cache_destroy(se_mem_cache); | ||
342 | kmem_cache_destroy(t10_pr_reg_cache); | 327 | kmem_cache_destroy(t10_pr_reg_cache); |
343 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 328 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
344 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 329 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
@@ -1702,7 +1687,6 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1702 | { | 1687 | { |
1703 | struct se_task *task; | 1688 | struct se_task *task; |
1704 | struct se_device *dev = cmd->se_dev; | 1689 | struct se_device *dev = cmd->se_dev; |
1705 | unsigned long flags; | ||
1706 | 1690 | ||
1707 | task = dev->transport->alloc_task(cmd); | 1691 | task = dev->transport->alloc_task(cmd); |
1708 | if (!task) { | 1692 | if (!task) { |
@@ -1718,10 +1702,6 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1718 | task->se_dev = dev; | 1702 | task->se_dev = dev; |
1719 | task->task_data_direction = data_direction; | 1703 | task->task_data_direction = data_direction; |
1720 | 1704 | ||
1721 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
1722 | list_add_tail(&task->t_list, &cmd->t_task_list); | ||
1723 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
1724 | |||
1725 | return task; | 1705 | return task; |
1726 | } | 1706 | } |
1727 | 1707 | ||
@@ -1745,8 +1725,6 @@ void transport_init_se_cmd( | |||
1745 | INIT_LIST_HEAD(&cmd->se_ordered_node); | 1725 | INIT_LIST_HEAD(&cmd->se_ordered_node); |
1746 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1726 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1747 | 1727 | ||
1748 | INIT_LIST_HEAD(&cmd->t_mem_list); | ||
1749 | INIT_LIST_HEAD(&cmd->t_mem_bidi_list); | ||
1750 | INIT_LIST_HEAD(&cmd->t_task_list); | 1728 | INIT_LIST_HEAD(&cmd->t_task_list); |
1751 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1729 | init_completion(&cmd->transport_lun_fe_stop_comp); |
1752 | init_completion(&cmd->transport_lun_stop_comp); | 1730 | init_completion(&cmd->transport_lun_stop_comp); |
@@ -2838,9 +2816,10 @@ EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | |||
2838 | static void transport_xor_callback(struct se_cmd *cmd) | 2816 | static void transport_xor_callback(struct se_cmd *cmd) |
2839 | { | 2817 | { |
2840 | unsigned char *buf, *addr; | 2818 | unsigned char *buf, *addr; |
2841 | struct se_mem *se_mem; | 2819 | struct scatterlist *sg; |
2842 | unsigned int offset; | 2820 | unsigned int offset; |
2843 | int i; | 2821 | int i; |
2822 | int count; | ||
2844 | /* | 2823 | /* |
2845 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | 2824 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command |
2846 | * | 2825 | * |
@@ -2858,28 +2837,32 @@ static void transport_xor_callback(struct se_cmd *cmd) | |||
2858 | return; | 2837 | return; |
2859 | } | 2838 | } |
2860 | /* | 2839 | /* |
2861 | * Copy the scatterlist WRITE buffer located at cmd->t_mem_list | 2840 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
2862 | * into the locally allocated *buf | 2841 | * into the locally allocated *buf |
2863 | */ | 2842 | */ |
2864 | transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list, | 2843 | sg_copy_to_buffer(cmd->t_data_sg, |
2865 | cmd->data_length); | 2844 | cmd->t_data_nents, |
2845 | buf, | ||
2846 | cmd->data_length); | ||
2847 | |||
2866 | /* | 2848 | /* |
2867 | * Now perform the XOR against the BIDI read memory located at | 2849 | * Now perform the XOR against the BIDI read memory located at |
2868 | * cmd->t_mem_bidi_list | 2850 | * cmd->t_mem_bidi_list |
2869 | */ | 2851 | */ |
2870 | 2852 | ||
2871 | offset = 0; | 2853 | offset = 0; |
2872 | list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) { | 2854 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2873 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | 2855 | addr = kmap_atomic(sg_page(sg), KM_USER0); |
2874 | if (!(addr)) | 2856 | if (!addr) |
2875 | goto out; | 2857 | goto out; |
2876 | 2858 | ||
2877 | for (i = 0; i < se_mem->se_len; i++) | 2859 | for (i = 0; i < sg->length; i++) |
2878 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | 2860 | *(addr + sg->offset + i) ^= *(buf + offset + i); |
2879 | 2861 | ||
2880 | offset += se_mem->se_len; | 2862 | offset += sg->length; |
2881 | kunmap_atomic(addr, KM_USER0); | 2863 | kunmap_atomic(addr, KM_USER0); |
2882 | } | 2864 | } |
2865 | |||
2883 | out: | 2866 | out: |
2884 | kfree(buf); | 2867 | kfree(buf); |
2885 | } | 2868 | } |
@@ -2971,6 +2954,35 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) | |||
2971 | return -EINVAL; | 2954 | return -EINVAL; |
2972 | } | 2955 | } |
2973 | 2956 | ||
2957 | static inline long long transport_dev_end_lba(struct se_device *dev) | ||
2958 | { | ||
2959 | return dev->transport->get_blocks(dev) + 1; | ||
2960 | } | ||
2961 | |||
2962 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | ||
2963 | { | ||
2964 | struct se_device *dev = cmd->se_dev; | ||
2965 | u32 sectors; | ||
2966 | |||
2967 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | ||
2968 | return 0; | ||
2969 | |||
2970 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | ||
2971 | |||
2972 | if ((cmd->t_task_lba + sectors) > | ||
2973 | transport_dev_end_lba(dev)) { | ||
2974 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | ||
2975 | " transport_dev_end_lba(): %llu\n", | ||
2976 | cmd->t_task_lba, sectors, | ||
2977 | transport_dev_end_lba(dev)); | ||
2978 | printk(KERN_ERR " We should return CHECK_CONDITION" | ||
2979 | " but we don't yet\n"); | ||
2980 | return 0; | ||
2981 | } | ||
2982 | |||
2983 | return sectors; | ||
2984 | } | ||
2985 | |||
2974 | /* transport_generic_cmd_sequencer(): | 2986 | /* transport_generic_cmd_sequencer(): |
2975 | * | 2987 | * |
2976 | * Generic Command Sequencer that should work for most DAS transport | 2988 | * Generic Command Sequencer that should work for most DAS transport |
@@ -3580,28 +3592,6 @@ out_invalid_cdb_field: | |||
3580 | return -EINVAL; | 3592 | return -EINVAL; |
3581 | } | 3593 | } |
3582 | 3594 | ||
3583 | static inline void transport_release_tasks(struct se_cmd *); | ||
3584 | |||
3585 | static void transport_memcpy_se_mem_read_contig( | ||
3586 | unsigned char *dst, | ||
3587 | struct list_head *se_mem_list, | ||
3588 | u32 tot_len) | ||
3589 | { | ||
3590 | struct se_mem *se_mem; | ||
3591 | void *src; | ||
3592 | u32 length; | ||
3593 | |||
3594 | list_for_each_entry(se_mem, se_mem_list, se_list) { | ||
3595 | length = min_t(u32, se_mem->se_len, tot_len); | ||
3596 | src = page_address(se_mem->se_page) + se_mem->se_off; | ||
3597 | memcpy(dst, src, length); | ||
3598 | tot_len -= length; | ||
3599 | if (!tot_len) | ||
3600 | break; | ||
3601 | dst += length; | ||
3602 | } | ||
3603 | } | ||
3604 | |||
3605 | /* | 3595 | /* |
3606 | * Called from transport_generic_complete_ok() and | 3596 | * Called from transport_generic_complete_ok() and |
3607 | * transport_generic_request_failure() to determine which dormant/delayed | 3597 | * transport_generic_request_failure() to determine which dormant/delayed |
@@ -3684,7 +3674,7 @@ static int transport_complete_qf(struct se_cmd *cmd) | |||
3684 | ret = cmd->se_tfo->queue_data_in(cmd); | 3674 | ret = cmd->se_tfo->queue_data_in(cmd); |
3685 | break; | 3675 | break; |
3686 | case DMA_TO_DEVICE: | 3676 | case DMA_TO_DEVICE: |
3687 | if (!list_empty(&cmd->t_mem_bidi_list)) { | 3677 | if (cmd->t_bidi_data_sg) { |
3688 | ret = cmd->se_tfo->queue_data_in(cmd); | 3678 | ret = cmd->se_tfo->queue_data_in(cmd); |
3689 | if (ret < 0) | 3679 | if (ret < 0) |
3690 | return ret; | 3680 | return ret; |
@@ -3794,7 +3784,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3794 | /* | 3784 | /* |
3795 | * Check if we need to send READ payload for BIDI-COMMAND | 3785 | * Check if we need to send READ payload for BIDI-COMMAND |
3796 | */ | 3786 | */ |
3797 | if (!list_empty(&cmd->t_mem_bidi_list)) { | 3787 | if (cmd->t_bidi_data_sg) { |
3798 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3788 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3799 | if (cmd->se_lun->lun_sep) { | 3789 | if (cmd->se_lun->lun_sep) { |
3800 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | 3790 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
@@ -3856,41 +3846,42 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3856 | 3846 | ||
3857 | static inline void transport_free_pages(struct se_cmd *cmd) | 3847 | static inline void transport_free_pages(struct se_cmd *cmd) |
3858 | { | 3848 | { |
3859 | struct se_mem *se_mem, *se_mem_tmp; | 3849 | struct scatterlist *sg; |
3860 | int free_page = 1; | 3850 | int free_page = 1; |
3851 | int count; | ||
3861 | 3852 | ||
3862 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | 3853 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) |
3863 | free_page = 0; | 3854 | free_page = 0; |
3864 | if (cmd->se_dev->transport->do_se_mem_map) | 3855 | if (cmd->se_dev->transport->do_se_mem_map) |
3865 | free_page = 0; | 3856 | free_page = 0; |
3866 | 3857 | ||
3867 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3858 | for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { |
3868 | &cmd->t_mem_list, se_list) { | ||
3869 | /* | 3859 | /* |
3870 | * We only release call __free_page(struct se_mem->se_page) when | 3860 | * Only called if |
3871 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3861 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
3872 | */ | 3862 | */ |
3873 | if (free_page) | 3863 | if (free_page) |
3874 | __free_page(se_mem->se_page); | 3864 | __free_page(sg_page(sg)); |
3875 | 3865 | ||
3876 | list_del(&se_mem->se_list); | ||
3877 | kmem_cache_free(se_mem_cache, se_mem); | ||
3878 | } | 3866 | } |
3879 | cmd->t_tasks_se_num = 0; | 3867 | if (free_page) |
3868 | kfree(cmd->t_data_sg); | ||
3869 | cmd->t_data_sg = NULL; | ||
3870 | cmd->t_data_nents = 0; | ||
3880 | 3871 | ||
3881 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3872 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
3882 | &cmd->t_mem_bidi_list, se_list) { | ||
3883 | /* | 3873 | /* |
3884 | * We only release call __free_page(struct se_mem->se_page) when | 3874 | * Only called if |
3885 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3875 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
3886 | */ | 3876 | */ |
3887 | if (free_page) | 3877 | if (free_page) |
3888 | __free_page(se_mem->se_page); | 3878 | __free_page(sg_page(sg)); |
3889 | 3879 | ||
3890 | list_del(&se_mem->se_list); | ||
3891 | kmem_cache_free(se_mem_cache, se_mem); | ||
3892 | } | 3880 | } |
3893 | cmd->t_tasks_se_bidi_num = 0; | 3881 | if (free_page) |
3882 | kfree(cmd->t_bidi_data_sg); | ||
3883 | cmd->t_bidi_data_sg = NULL; | ||
3884 | cmd->t_bidi_data_nents = 0; | ||
3894 | } | 3885 | } |
3895 | 3886 | ||
3896 | static inline void transport_release_tasks(struct se_cmd *cmd) | 3887 | static inline void transport_release_tasks(struct se_cmd *cmd) |
@@ -3979,7 +3970,8 @@ free_pages: | |||
3979 | } | 3970 | } |
3980 | 3971 | ||
3981 | /* | 3972 | /* |
3982 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | 3973 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3974 | * allocating in the core. | ||
3983 | * @cmd: Associated se_cmd descriptor | 3975 | * @cmd: Associated se_cmd descriptor |
3984 | * @mem: SGL style memory for TCM WRITE / READ | 3976 | * @mem: SGL style memory for TCM WRITE / READ |
3985 | * @sg_mem_num: Number of SGL elements | 3977 | * @sg_mem_num: Number of SGL elements |
@@ -3996,35 +3988,18 @@ int transport_generic_map_mem_to_cmd( | |||
3996 | struct scatterlist *sgl_bidi, | 3988 | struct scatterlist *sgl_bidi, |
3997 | u32 sgl_bidi_count) | 3989 | u32 sgl_bidi_count) |
3998 | { | 3990 | { |
3999 | int ret; | ||
4000 | |||
4001 | if (!sgl || !sgl_count) | 3991 | if (!sgl || !sgl_count) |
4002 | return 0; | 3992 | return 0; |
4003 | 3993 | ||
4004 | /* | ||
4005 | * Convert sgls (sgl, sgl_bidi) to list of se_mems | ||
4006 | */ | ||
4007 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | 3994 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
4008 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | 3995 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { |
4009 | /* | ||
4010 | * For CDB using TCM struct se_mem linked list scatterlist memory | ||
4011 | * processed into a TCM struct se_subsystem_dev, we do the mapping | ||
4012 | * from the passed physical memory to struct se_mem->se_page here. | ||
4013 | */ | ||
4014 | ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl); | ||
4015 | if (ret < 0) | ||
4016 | return -ENOMEM; | ||
4017 | 3996 | ||
4018 | cmd->t_tasks_se_num = ret; | 3997 | cmd->t_data_sg = sgl; |
4019 | /* | 3998 | cmd->t_data_nents = sgl_count; |
4020 | * Setup BIDI READ list of struct se_mem elements | ||
4021 | */ | ||
4022 | if (sgl_bidi && sgl_bidi_count) { | ||
4023 | ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi); | ||
4024 | if (ret < 0) | ||
4025 | return -ENOMEM; | ||
4026 | 3999 | ||
4027 | cmd->t_tasks_se_bidi_num = ret; | 4000 | if (sgl_bidi && sgl_bidi_count) { |
4001 | cmd->t_bidi_data_sg = sgl_bidi; | ||
4002 | cmd->t_bidi_data_nents = sgl_bidi_count; | ||
4028 | } | 4003 | } |
4029 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 4004 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
4030 | } | 4005 | } |
@@ -4033,91 +4008,58 @@ int transport_generic_map_mem_to_cmd( | |||
4033 | } | 4008 | } |
4034 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 4009 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
4035 | 4010 | ||
4036 | |||
4037 | static inline long long transport_dev_end_lba(struct se_device *dev) | ||
4038 | { | ||
4039 | return dev->transport->get_blocks(dev) + 1; | ||
4040 | } | ||
4041 | |||
4042 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | ||
4043 | { | ||
4044 | struct se_device *dev = cmd->se_dev; | ||
4045 | u32 sectors; | ||
4046 | |||
4047 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | ||
4048 | return 0; | ||
4049 | |||
4050 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | ||
4051 | |||
4052 | if ((cmd->t_task_lba + sectors) > | ||
4053 | transport_dev_end_lba(dev)) { | ||
4054 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | ||
4055 | " transport_dev_end_lba(): %llu\n", | ||
4056 | cmd->t_task_lba, sectors, | ||
4057 | transport_dev_end_lba(dev)); | ||
4058 | return 0; | ||
4059 | } | ||
4060 | |||
4061 | return sectors; | ||
4062 | } | ||
4063 | |||
4064 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 4011 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
4065 | { | 4012 | { |
4066 | struct se_device *dev = cmd->se_dev; | 4013 | struct se_device *dev = cmd->se_dev; |
4067 | u32 task_cdbs; | 4014 | u32 task_cdbs; |
4068 | u32 rc; | 4015 | u32 rc; |
4016 | int set_counts = 1; | ||
4069 | 4017 | ||
4070 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | 4018 | /* |
4071 | task_cdbs = 1; | 4019 | * Setup any BIDI READ tasks and memory from |
4072 | cmd->t_task_list_num = 1; | 4020 | * cmd->t_mem_bidi_list so the READ struct se_tasks |
4073 | } else { | 4021 | * are queued first for the non pSCSI passthrough case. |
4074 | int set_counts = 1; | 4022 | */ |
4075 | 4023 | if (cmd->t_bidi_data_sg && | |
4076 | /* | 4024 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4077 | * Setup any BIDI READ tasks and memory from | 4025 | rc = transport_allocate_tasks(cmd, |
4078 | * cmd->t_mem_bidi_list so the READ struct se_tasks | 4026 | cmd->t_task_lba, |
4079 | * are queued first for the non pSCSI passthrough case. | 4027 | DMA_FROM_DEVICE, |
4080 | */ | 4028 | cmd->t_bidi_data_sg, |
4081 | if (!list_empty(&cmd->t_mem_bidi_list) && | 4029 | cmd->t_bidi_data_nents); |
4082 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | 4030 | if (!rc) { |
4083 | rc = transport_allocate_tasks(cmd, | ||
4084 | cmd->t_task_lba, | ||
4085 | transport_cmd_get_valid_sectors(cmd), | ||
4086 | DMA_FROM_DEVICE, &cmd->t_mem_bidi_list, | ||
4087 | set_counts); | ||
4088 | if (!(rc)) { | ||
4089 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4090 | cmd->scsi_sense_reason = | ||
4091 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
4092 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
4093 | } | ||
4094 | set_counts = 0; | ||
4095 | } | ||
4096 | /* | ||
4097 | * Setup the tasks and memory from cmd->t_mem_list | ||
4098 | * Note for BIDI transfers this will contain the WRITE payload | ||
4099 | */ | ||
4100 | task_cdbs = transport_allocate_tasks(cmd, | ||
4101 | cmd->t_task_lba, | ||
4102 | transport_cmd_get_valid_sectors(cmd), | ||
4103 | cmd->data_direction, &cmd->t_mem_list, | ||
4104 | set_counts); | ||
4105 | if (!(task_cdbs)) { | ||
4106 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4031 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4107 | cmd->scsi_sense_reason = | 4032 | cmd->scsi_sense_reason = |
4108 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 4033 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4109 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 4034 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4110 | } | 4035 | } |
4111 | cmd->t_task_list_num = task_cdbs; | 4036 | atomic_inc(&cmd->t_fe_count); |
4037 | atomic_inc(&cmd->t_se_count); | ||
4038 | set_counts = 0; | ||
4039 | } | ||
4040 | /* | ||
4041 | * Setup the tasks and memory from cmd->t_mem_list | ||
4042 | * Note for BIDI transfers this will contain the WRITE payload | ||
4043 | */ | ||
4044 | task_cdbs = transport_allocate_tasks(cmd, | ||
4045 | cmd->t_task_lba, | ||
4046 | cmd->data_direction, | ||
4047 | cmd->t_data_sg, | ||
4048 | cmd->t_data_nents); | ||
4049 | if (!task_cdbs) { | ||
4050 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4051 | cmd->scsi_sense_reason = | ||
4052 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
4053 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
4054 | } | ||
4112 | 4055 | ||
4113 | #if 0 | 4056 | if (set_counts) { |
4114 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | 4057 | atomic_inc(&cmd->t_fe_count); |
4115 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | 4058 | atomic_inc(&cmd->t_se_count); |
4116 | cmd->t_task_lba, cmd->t_tasks_sectors, | ||
4117 | cmd->t_task_cdbs); | ||
4118 | #endif | ||
4119 | } | 4059 | } |
4120 | 4060 | ||
4061 | cmd->t_task_list_num = task_cdbs; | ||
4062 | |||
4121 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); | 4063 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); |
4122 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | 4064 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); |
4123 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | 4065 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); |
@@ -4126,39 +4068,31 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
4126 | 4068 | ||
4127 | void *transport_kmap_first_data_page(struct se_cmd *cmd) | 4069 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
4128 | { | 4070 | { |
4129 | struct se_mem *se_mem; | 4071 | struct scatterlist *sg = cmd->t_data_sg; |
4130 | |||
4131 | BUG_ON(list_empty(&cmd->t_mem_list)); | ||
4132 | |||
4133 | se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); | ||
4134 | 4072 | ||
4073 | BUG_ON(!sg); | ||
4135 | /* | 4074 | /* |
4136 | * 1st se_mem should point to a page, and we shouldn't need more than | 4075 | * We need to take into account a possible offset here for fabrics like |
4137 | * that for this cmd | 4076 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for |
4077 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | ||
4138 | */ | 4078 | */ |
4139 | BUG_ON(cmd->data_length > PAGE_SIZE); | 4079 | return kmap(sg_page(sg)) + sg->offset; |
4140 | |||
4141 | return kmap(se_mem->se_page); | ||
4142 | } | 4080 | } |
4143 | EXPORT_SYMBOL(transport_kmap_first_data_page); | 4081 | EXPORT_SYMBOL(transport_kmap_first_data_page); |
4144 | 4082 | ||
4145 | void transport_kunmap_first_data_page(struct se_cmd *cmd) | 4083 | void transport_kunmap_first_data_page(struct se_cmd *cmd) |
4146 | { | 4084 | { |
4147 | struct se_mem *se_mem; | 4085 | kunmap(sg_page(cmd->t_data_sg)); |
4148 | |||
4149 | BUG_ON(list_empty(&cmd->t_mem_list)); | ||
4150 | |||
4151 | se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); | ||
4152 | |||
4153 | kunmap(se_mem->se_page); | ||
4154 | } | 4086 | } |
4155 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | 4087 | EXPORT_SYMBOL(transport_kunmap_first_data_page); |
4156 | 4088 | ||
4157 | static int | 4089 | static int |
4158 | transport_generic_get_mem(struct se_cmd *cmd) | 4090 | transport_generic_get_mem(struct se_cmd *cmd) |
4159 | { | 4091 | { |
4160 | struct se_mem *se_mem; | 4092 | u32 length = cmd->data_length; |
4161 | int length = cmd->data_length; | 4093 | unsigned int nents; |
4094 | struct page *page; | ||
4095 | int i = 0; | ||
4162 | 4096 | ||
4163 | /* | 4097 | /* |
4164 | * If the device uses memory mapping this is enough. | 4098 | * If the device uses memory mapping this is enough. |
@@ -4166,161 +4100,34 @@ transport_generic_get_mem(struct se_cmd *cmd) | |||
4166 | if (cmd->se_dev->transport->do_se_mem_map) | 4100 | if (cmd->se_dev->transport->do_se_mem_map) |
4167 | return 0; | 4101 | return 0; |
4168 | 4102 | ||
4169 | /* Even cmds with length 0 will get here, btw */ | 4103 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
4170 | while (length) { | 4104 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); |
4171 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 4105 | if (!cmd->t_data_sg) |
4172 | if (!(se_mem)) { | 4106 | return -ENOMEM; |
4173 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4174 | goto out; | ||
4175 | } | ||
4176 | |||
4177 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | ||
4178 | se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); | ||
4179 | if (!(se_mem->se_page)) { | ||
4180 | printk(KERN_ERR "alloc_pages() failed\n"); | ||
4181 | goto out; | ||
4182 | } | ||
4183 | 4107 | ||
4184 | INIT_LIST_HEAD(&se_mem->se_list); | 4108 | cmd->t_data_nents = nents; |
4185 | se_mem->se_len = min_t(u32, length, PAGE_SIZE); | 4109 | sg_init_table(cmd->t_data_sg, nents); |
4186 | list_add_tail(&se_mem->se_list, &cmd->t_mem_list); | ||
4187 | cmd->t_tasks_se_num++; | ||
4188 | 4110 | ||
4189 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | 4111 | while (length) { |
4190 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | 4112 | u32 page_len = min_t(u32, length, PAGE_SIZE); |
4191 | se_mem->se_off); | 4113 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
4114 | if (!page) | ||
4115 | goto out; | ||
4192 | 4116 | ||
4193 | length -= se_mem->se_len; | 4117 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
4118 | length -= page_len; | ||
4119 | i++; | ||
4194 | } | 4120 | } |
4195 | |||
4196 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | ||
4197 | cmd->t_tasks_se_num); | ||
4198 | |||
4199 | return 0; | 4121 | return 0; |
4200 | out: | ||
4201 | if (se_mem) | ||
4202 | __free_pages(se_mem->se_page, 0); | ||
4203 | kmem_cache_free(se_mem_cache, se_mem); | ||
4204 | return -ENOMEM; | ||
4205 | } | ||
4206 | |||
4207 | int transport_init_task_sg( | ||
4208 | struct se_task *task, | ||
4209 | struct se_mem *in_se_mem, | ||
4210 | u32 task_offset) | ||
4211 | { | ||
4212 | struct se_cmd *se_cmd = task->task_se_cmd; | ||
4213 | struct se_device *se_dev = se_cmd->se_dev; | ||
4214 | struct se_mem *se_mem = in_se_mem; | ||
4215 | struct target_core_fabric_ops *tfo = se_cmd->se_tfo; | ||
4216 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | ||
4217 | |||
4218 | while (task_size != 0) { | ||
4219 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | ||
4220 | " se_mem->se_off(%u) task_offset(%u)\n", | ||
4221 | se_mem->se_page, se_mem->se_len, | ||
4222 | se_mem->se_off, task_offset); | ||
4223 | |||
4224 | if (task_offset == 0) { | ||
4225 | if (task_size >= se_mem->se_len) { | ||
4226 | sg_length = se_mem->se_len; | ||
4227 | |||
4228 | if (!(list_is_last(&se_mem->se_list, | ||
4229 | &se_cmd->t_mem_list))) | ||
4230 | se_mem = list_entry(se_mem->se_list.next, | ||
4231 | struct se_mem, se_list); | ||
4232 | } else { | ||
4233 | sg_length = task_size; | ||
4234 | task_size -= sg_length; | ||
4235 | goto next; | ||
4236 | } | ||
4237 | |||
4238 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | ||
4239 | sg_length, task_size); | ||
4240 | } else { | ||
4241 | if ((se_mem->se_len - task_offset) > task_size) { | ||
4242 | sg_length = task_size; | ||
4243 | task_size -= sg_length; | ||
4244 | goto next; | ||
4245 | } else { | ||
4246 | sg_length = (se_mem->se_len - task_offset); | ||
4247 | |||
4248 | if (!(list_is_last(&se_mem->se_list, | ||
4249 | &se_cmd->t_mem_list))) | ||
4250 | se_mem = list_entry(se_mem->se_list.next, | ||
4251 | struct se_mem, se_list); | ||
4252 | } | ||
4253 | 4122 | ||
4254 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | 4123 | out: |
4255 | sg_length, task_size); | 4124 | while (i >= 0) { |
4256 | 4125 | __free_page(sg_page(&cmd->t_data_sg[i])); | |
4257 | task_offset = 0; | 4126 | i--; |
4258 | } | ||
4259 | task_size -= sg_length; | ||
4260 | next: | ||
4261 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | ||
4262 | task->task_no, task_size); | ||
4263 | |||
4264 | task->task_sg_num++; | ||
4265 | } | ||
4266 | /* | ||
4267 | * Check if the fabric module driver is requesting that all | ||
4268 | * struct se_task->task_sg[] be chained together.. If so, | ||
4269 | * then allocate an extra padding SG entry for linking and | ||
4270 | * marking the end of the chained SGL. | ||
4271 | */ | ||
4272 | if (tfo->task_sg_chaining) { | ||
4273 | task_sg_num_padded = (task->task_sg_num + 1); | ||
4274 | task->task_padded_sg = 1; | ||
4275 | } else | ||
4276 | task_sg_num_padded = task->task_sg_num; | ||
4277 | |||
4278 | task->task_sg = kzalloc(task_sg_num_padded * | ||
4279 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4280 | if (!(task->task_sg)) { | ||
4281 | printk(KERN_ERR "Unable to allocate memory for" | ||
4282 | " task->task_sg\n"); | ||
4283 | return -ENOMEM; | ||
4284 | } | ||
4285 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | ||
4286 | /* | ||
4287 | * Setup task->task_sg_bidi for SCSI READ payload for | ||
4288 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | ||
4289 | */ | ||
4290 | if (!list_empty(&se_cmd->t_mem_bidi_list) && | ||
4291 | (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | ||
4292 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | ||
4293 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4294 | if (!(task->task_sg_bidi)) { | ||
4295 | kfree(task->task_sg); | ||
4296 | task->task_sg = NULL; | ||
4297 | printk(KERN_ERR "Unable to allocate memory for" | ||
4298 | " task->task_sg_bidi\n"); | ||
4299 | return -ENOMEM; | ||
4300 | } | ||
4301 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | ||
4302 | } | ||
4303 | /* | ||
4304 | * For the chaining case, setup the proper end of SGL for the | ||
4305 | * initial submission struct task into struct se_subsystem_api. | ||
4306 | * This will be cleared later by transport_do_task_sg_chain() | ||
4307 | */ | ||
4308 | if (task->task_padded_sg) { | ||
4309 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | ||
4310 | /* | ||
4311 | * Added the 'if' check before marking end of bi-directional | ||
4312 | * scatterlist (which gets created only in case of request | ||
4313 | * (RD + WR). | ||
4314 | */ | ||
4315 | if (task->task_sg_bidi) | ||
4316 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | ||
4317 | } | 4127 | } |
4318 | 4128 | kfree(cmd->t_data_sg); | |
4319 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | 4129 | cmd->t_data_sg = NULL; |
4320 | " task_sg_num_padded(%u)\n", task->task_sg_num, | 4130 | return -ENOMEM; |
4321 | task_sg_num_padded); | ||
4322 | |||
4323 | return task->task_sg_num; | ||
4324 | } | 4131 | } |
4325 | 4132 | ||
4326 | /* Reduce sectors if they are too long for the device */ | 4133 | /* Reduce sectors if they are too long for the device */ |
@@ -4338,165 +4145,6 @@ static inline sector_t transport_limit_task_sectors( | |||
4338 | return sectors; | 4145 | return sectors; |
4339 | } | 4146 | } |
4340 | 4147 | ||
4341 | /* | ||
4342 | * Convert a sgl into a linked list of se_mems. | ||
4343 | */ | ||
4344 | static int transport_map_sg_to_mem( | ||
4345 | struct se_cmd *cmd, | ||
4346 | struct list_head *se_mem_list, | ||
4347 | struct scatterlist *sg) | ||
4348 | { | ||
4349 | struct se_mem *se_mem; | ||
4350 | u32 cmd_size = cmd->data_length; | ||
4351 | int sg_count = 0; | ||
4352 | |||
4353 | WARN_ON(!sg); | ||
4354 | |||
4355 | while (cmd_size) { | ||
4356 | /* | ||
4357 | * NOTE: it is safe to return -ENOMEM at any time in creating this | ||
4358 | * list because transport_free_pages() will eventually be called, and is | ||
4359 | * smart enough to deallocate all list items for sg and sg_bidi lists. | ||
4360 | */ | ||
4361 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
4362 | if (!(se_mem)) { | ||
4363 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4364 | return -ENOMEM; | ||
4365 | } | ||
4366 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4367 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | ||
4368 | " sg_page: %p offset: %d length: %d\n", cmd_size, | ||
4369 | sg_page(sg), sg->offset, sg->length); | ||
4370 | |||
4371 | se_mem->se_page = sg_page(sg); | ||
4372 | se_mem->se_off = sg->offset; | ||
4373 | |||
4374 | if (cmd_size > sg->length) { | ||
4375 | se_mem->se_len = sg->length; | ||
4376 | sg = sg_next(sg); | ||
4377 | } else | ||
4378 | se_mem->se_len = cmd_size; | ||
4379 | |||
4380 | cmd_size -= se_mem->se_len; | ||
4381 | sg_count++; | ||
4382 | |||
4383 | DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", | ||
4384 | sg_count, cmd_size); | ||
4385 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | ||
4386 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | ||
4387 | |||
4388 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
4389 | } | ||
4390 | |||
4391 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); | ||
4392 | |||
4393 | return sg_count; | ||
4394 | } | ||
4395 | |||
4396 | /* transport_map_mem_to_sg(): | ||
4397 | * | ||
4398 | * | ||
4399 | */ | ||
4400 | int transport_map_mem_to_sg( | ||
4401 | struct se_task *task, | ||
4402 | struct list_head *se_mem_list, | ||
4403 | struct scatterlist *sg, | ||
4404 | struct se_mem *in_se_mem, | ||
4405 | struct se_mem **out_se_mem, | ||
4406 | u32 *se_mem_cnt, | ||
4407 | u32 *task_offset) | ||
4408 | { | ||
4409 | struct se_cmd *se_cmd = task->task_se_cmd; | ||
4410 | struct se_mem *se_mem = in_se_mem; | ||
4411 | u32 task_size = task->task_size, sg_no = 0; | ||
4412 | |||
4413 | if (!sg) { | ||
4414 | printk(KERN_ERR "Unable to locate valid struct" | ||
4415 | " scatterlist pointer\n"); | ||
4416 | return -EINVAL; | ||
4417 | } | ||
4418 | |||
4419 | while (task_size != 0) { | ||
4420 | /* | ||
4421 | * Setup the contiguous array of scatterlists for | ||
4422 | * this struct se_task. | ||
4423 | */ | ||
4424 | sg_assign_page(sg, se_mem->se_page); | ||
4425 | |||
4426 | if (*task_offset == 0) { | ||
4427 | sg->offset = se_mem->se_off; | ||
4428 | |||
4429 | if (task_size >= se_mem->se_len) { | ||
4430 | sg->length = se_mem->se_len; | ||
4431 | |||
4432 | if (!(list_is_last(&se_mem->se_list, | ||
4433 | &se_cmd->t_mem_list))) { | ||
4434 | se_mem = list_entry(se_mem->se_list.next, | ||
4435 | struct se_mem, se_list); | ||
4436 | (*se_mem_cnt)++; | ||
4437 | } | ||
4438 | } else { | ||
4439 | sg->length = task_size; | ||
4440 | /* | ||
4441 | * Determine if we need to calculate an offset | ||
4442 | * into the struct se_mem on the next go around.. | ||
4443 | */ | ||
4444 | task_size -= sg->length; | ||
4445 | if (!(task_size)) | ||
4446 | *task_offset = sg->length; | ||
4447 | |||
4448 | goto next; | ||
4449 | } | ||
4450 | |||
4451 | } else { | ||
4452 | sg->offset = (*task_offset + se_mem->se_off); | ||
4453 | |||
4454 | if ((se_mem->se_len - *task_offset) > task_size) { | ||
4455 | sg->length = task_size; | ||
4456 | /* | ||
4457 | * Determine if we need to calculate an offset | ||
4458 | * into the struct se_mem on the next go around.. | ||
4459 | */ | ||
4460 | task_size -= sg->length; | ||
4461 | if (!(task_size)) | ||
4462 | *task_offset += sg->length; | ||
4463 | |||
4464 | goto next; | ||
4465 | } else { | ||
4466 | sg->length = (se_mem->se_len - *task_offset); | ||
4467 | |||
4468 | if (!(list_is_last(&se_mem->se_list, | ||
4469 | &se_cmd->t_mem_list))) { | ||
4470 | se_mem = list_entry(se_mem->se_list.next, | ||
4471 | struct se_mem, se_list); | ||
4472 | (*se_mem_cnt)++; | ||
4473 | } | ||
4474 | } | ||
4475 | |||
4476 | *task_offset = 0; | ||
4477 | } | ||
4478 | task_size -= sg->length; | ||
4479 | next: | ||
4480 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | ||
4481 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | ||
4482 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | ||
4483 | |||
4484 | sg_no++; | ||
4485 | if (!(task_size)) | ||
4486 | break; | ||
4487 | |||
4488 | sg = sg_next(sg); | ||
4489 | |||
4490 | if (task_size > se_cmd->data_length) | ||
4491 | BUG(); | ||
4492 | } | ||
4493 | *out_se_mem = se_mem; | ||
4494 | |||
4495 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | ||
4496 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | ||
4497 | |||
4498 | return 0; | ||
4499 | } | ||
4500 | 4148 | ||
4501 | /* | 4149 | /* |
4502 | * This function can be used by HW target mode drivers to create a linked | 4150 | * This function can be used by HW target mode drivers to create a linked |
@@ -4506,81 +4154,43 @@ next: | |||
4506 | */ | 4154 | */ |
4507 | void transport_do_task_sg_chain(struct se_cmd *cmd) | 4155 | void transport_do_task_sg_chain(struct se_cmd *cmd) |
4508 | { | 4156 | { |
4509 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | 4157 | struct scatterlist *sg_first = NULL; |
4510 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | 4158 | struct scatterlist *sg_prev = NULL; |
4511 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | 4159 | int sg_prev_nents = 0; |
4160 | struct scatterlist *sg; | ||
4512 | struct se_task *task; | 4161 | struct se_task *task; |
4513 | struct target_core_fabric_ops *tfo = cmd->se_tfo; | 4162 | u32 chained_nents = 0; |
4514 | u32 task_sg_num = 0, sg_count = 0; | ||
4515 | int i; | 4163 | int i; |
4516 | 4164 | ||
4517 | if (tfo->task_sg_chaining == 0) { | 4165 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
4518 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | 4166 | |
4519 | " %s\n", tfo->get_fabric_name()); | ||
4520 | dump_stack(); | ||
4521 | return; | ||
4522 | } | ||
4523 | /* | 4167 | /* |
4524 | * Walk the struct se_task list and setup scatterlist chains | 4168 | * Walk the struct se_task list and setup scatterlist chains |
4525 | * for each contiguously allocated struct se_task->task_sg[]. | 4169 | * for each contiguously allocated struct se_task->task_sg[]. |
4526 | */ | 4170 | */ |
4527 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | 4171 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
4528 | if (!(task->task_sg) || !(task->task_padded_sg)) | 4172 | if (!task->task_sg) |
4529 | continue; | 4173 | continue; |
4530 | 4174 | ||
4531 | if (sg_head && sg_link) { | 4175 | BUG_ON(!task->task_padded_sg); |
4532 | sg_head_cur = &task->task_sg[0]; | ||
4533 | sg_link_cur = &task->task_sg[task->task_sg_num]; | ||
4534 | /* | ||
4535 | * Either add chain or mark end of scatterlist | ||
4536 | */ | ||
4537 | if (!(list_is_last(&task->t_list, | ||
4538 | &cmd->t_task_list))) { | ||
4539 | /* | ||
4540 | * Clear existing SGL termination bit set in | ||
4541 | * transport_init_task_sg(), see sg_mark_end() | ||
4542 | */ | ||
4543 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | ||
4544 | sg_end_cur->page_link &= ~0x02; | ||
4545 | |||
4546 | sg_chain(sg_head, task_sg_num, sg_head_cur); | ||
4547 | sg_count += task->task_sg_num; | ||
4548 | task_sg_num = (task->task_sg_num + 1); | ||
4549 | } else { | ||
4550 | sg_chain(sg_head, task_sg_num, sg_head_cur); | ||
4551 | sg_count += task->task_sg_num; | ||
4552 | task_sg_num = task->task_sg_num; | ||
4553 | } | ||
4554 | 4176 | ||
4555 | sg_head = sg_head_cur; | 4177 | if (!sg_first) { |
4556 | sg_link = sg_link_cur; | 4178 | sg_first = task->task_sg; |
4557 | continue; | 4179 | chained_nents = task->task_sg_num; |
4558 | } | ||
4559 | sg_head = sg_first = &task->task_sg[0]; | ||
4560 | sg_link = &task->task_sg[task->task_sg_num]; | ||
4561 | /* | ||
4562 | * Check for single task.. | ||
4563 | */ | ||
4564 | if (!(list_is_last(&task->t_list, &cmd->t_task_list))) { | ||
4565 | /* | ||
4566 | * Clear existing SGL termination bit set in | ||
4567 | * transport_init_task_sg(), see sg_mark_end() | ||
4568 | */ | ||
4569 | sg_end = &task->task_sg[task->task_sg_num - 1]; | ||
4570 | sg_end->page_link &= ~0x02; | ||
4571 | sg_count += task->task_sg_num; | ||
4572 | task_sg_num = (task->task_sg_num + 1); | ||
4573 | } else { | 4180 | } else { |
4574 | sg_count += task->task_sg_num; | 4181 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4575 | task_sg_num = task->task_sg_num; | 4182 | chained_nents += task->task_sg_num; |
4576 | } | 4183 | } |
4184 | |||
4185 | sg_prev = task->task_sg; | ||
4186 | sg_prev_nents = task->task_sg_num; | ||
4577 | } | 4187 | } |
4578 | /* | 4188 | /* |
4579 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4189 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
4580 | * padding SGs for linking and to mark the end. | 4190 | * padding SGs for linking and to mark the end. |
4581 | */ | 4191 | */ |
4582 | cmd->t_tasks_sg_chained = sg_first; | 4192 | cmd->t_tasks_sg_chained = sg_first; |
4583 | cmd->t_tasks_sg_chained_no = sg_count; | 4193 | cmd->t_tasks_sg_chained_no = chained_nents; |
4584 | 4194 | ||
4585 | DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" | 4195 | DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
4586 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, | 4196 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
@@ -4599,129 +4209,46 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4599 | } | 4209 | } |
4600 | EXPORT_SYMBOL(transport_do_task_sg_chain); | 4210 | EXPORT_SYMBOL(transport_do_task_sg_chain); |
4601 | 4211 | ||
4602 | static int transport_do_se_mem_map( | ||
4603 | struct se_device *dev, | ||
4604 | struct se_task *task, | ||
4605 | struct list_head *se_mem_list, | ||
4606 | void *in_mem, | ||
4607 | struct se_mem *in_se_mem, | ||
4608 | struct se_mem **out_se_mem, | ||
4609 | u32 *se_mem_cnt, | ||
4610 | u32 *task_offset_in) | ||
4611 | { | ||
4612 | u32 task_offset = *task_offset_in; | ||
4613 | int ret = 0; | ||
4614 | /* | ||
4615 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | ||
4616 | * has been done by the transport plugin. | ||
4617 | */ | ||
4618 | if (dev->transport->do_se_mem_map) { | ||
4619 | ret = dev->transport->do_se_mem_map(task, se_mem_list, | ||
4620 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | ||
4621 | task_offset_in); | ||
4622 | if (ret == 0) | ||
4623 | task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; | ||
4624 | |||
4625 | return ret; | ||
4626 | } | ||
4627 | |||
4628 | BUG_ON(list_empty(se_mem_list)); | ||
4629 | /* | ||
4630 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | ||
4631 | * WRITE payloads.. If we need to do BIDI READ passthrough for | ||
4632 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | ||
4633 | * transport_init_task_sg() -> transport_map_mem_to_sg() will do the | ||
4634 | * allocation for task->task_sg_bidi, and the subsequent call to | ||
4635 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | ||
4636 | */ | ||
4637 | if (!(task->task_sg_bidi)) { | ||
4638 | /* | ||
4639 | * Assume default that transport plugin speaks preallocated | ||
4640 | * scatterlists. | ||
4641 | */ | ||
4642 | ret = transport_init_task_sg(task, in_se_mem, task_offset); | ||
4643 | if (ret <= 0) | ||
4644 | return ret; | ||
4645 | /* | ||
4646 | * struct se_task->task_sg now contains the struct scatterlist array. | ||
4647 | */ | ||
4648 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | ||
4649 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4650 | task_offset_in); | ||
4651 | } | ||
4652 | /* | ||
4653 | * Handle the se_mem_list -> struct task->task_sg_bidi | ||
4654 | * memory map for the extra BIDI READ payload | ||
4655 | */ | ||
4656 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | ||
4657 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4658 | task_offset_in); | ||
4659 | } | ||
4660 | |||
4661 | /* | 4212 | /* |
4662 | * Break up cmd into chunks transport can handle | 4213 | * Break up cmd into chunks transport can handle |
4663 | */ | 4214 | */ |
4664 | static u32 transport_allocate_tasks( | 4215 | static int transport_allocate_data_tasks( |
4665 | struct se_cmd *cmd, | 4216 | struct se_cmd *cmd, |
4666 | unsigned long long lba, | 4217 | unsigned long long lba, |
4667 | u32 sectors, | ||
4668 | enum dma_data_direction data_direction, | 4218 | enum dma_data_direction data_direction, |
4669 | struct list_head *mem_list, | 4219 | struct scatterlist *sgl, |
4670 | int set_counts) | 4220 | unsigned int sgl_nents) |
4671 | { | 4221 | { |
4672 | unsigned char *cdb = NULL; | 4222 | unsigned char *cdb = NULL; |
4673 | struct se_task *task; | 4223 | struct se_task *task; |
4674 | struct se_mem *se_mem = NULL; | ||
4675 | struct se_mem *se_mem_lout = NULL; | ||
4676 | struct se_mem *se_mem_bidi = NULL; | ||
4677 | struct se_mem *se_mem_bidi_lout = NULL; | ||
4678 | struct se_device *dev = cmd->se_dev; | 4224 | struct se_device *dev = cmd->se_dev; |
4679 | int ret; | 4225 | unsigned long flags; |
4680 | u32 task_offset_in = 0; | 4226 | sector_t sectors; |
4681 | u32 se_mem_cnt = 0; | 4227 | int task_count; |
4682 | u32 se_mem_bidi_cnt = 0; | 4228 | int i; |
4683 | u32 task_cdbs = 0; | 4229 | sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
4684 | 4230 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; | |
4685 | BUG_ON(!mem_list); | 4231 | struct scatterlist *sg; |
4686 | /* | 4232 | struct scatterlist *cmd_sg; |
4687 | * While using RAMDISK_DR backstores is the only case where | ||
4688 | * mem_list will ever be empty at this point. | ||
4689 | */ | ||
4690 | if (!(list_empty(mem_list))) | ||
4691 | se_mem = list_first_entry(mem_list, struct se_mem, se_list); | ||
4692 | /* | ||
4693 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | ||
4694 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | ||
4695 | */ | ||
4696 | if (!list_empty(&cmd->t_mem_bidi_list) && | ||
4697 | (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | ||
4698 | se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list, | ||
4699 | struct se_mem, se_list); | ||
4700 | 4233 | ||
4701 | while (sectors) { | 4234 | WARN_ON(cmd->data_length % sector_size); |
4702 | sector_t limited_sectors; | 4235 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); |
4236 | task_count = DIV_ROUND_UP(sectors, dev_max_sectors); | ||
4703 | 4237 | ||
4704 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | 4238 | cmd_sg = sgl; |
4705 | cmd->se_tfo->get_task_tag(cmd), lba, sectors, | 4239 | for (i = 0; i < task_count; i++) { |
4706 | transport_dev_end_lba(dev)); | 4240 | unsigned int task_size; |
4707 | 4241 | int count; | |
4708 | limited_sectors = transport_limit_task_sectors(dev, lba, sectors); | ||
4709 | if (!limited_sectors) | ||
4710 | break; | ||
4711 | 4242 | ||
4712 | task = transport_generic_get_task(cmd, data_direction); | 4243 | task = transport_generic_get_task(cmd, data_direction); |
4713 | if (!task) | 4244 | if (!task) |
4714 | goto out; | 4245 | return -ENOMEM; |
4715 | 4246 | ||
4716 | task->task_lba = lba; | 4247 | task->task_lba = lba; |
4717 | task->task_sectors = limited_sectors; | 4248 | task->task_sectors = min(sectors, dev_max_sectors); |
4718 | lba += task->task_sectors; | 4249 | task->task_size = task->task_sectors * sector_size; |
4719 | sectors -= task->task_sectors; | ||
4720 | task->task_size = (task->task_sectors * | ||
4721 | dev->se_sub_dev->se_dev_attrib.block_size); | ||
4722 | 4250 | ||
4723 | cdb = dev->transport->get_cdb(task); | 4251 | cdb = dev->transport->get_cdb(task); |
4724 | /* Should be part of task, can't fail */ | ||
4725 | BUG_ON(!cdb); | 4252 | BUG_ON(!cdb); |
4726 | 4253 | ||
4727 | memcpy(cdb, cmd->t_task_cdb, | 4254 | memcpy(cdb, cmd->t_task_cdb, |
@@ -4731,94 +4258,86 @@ static u32 transport_allocate_tasks( | |||
4731 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); | 4258 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
4732 | 4259 | ||
4733 | /* | 4260 | /* |
4734 | * Perform the SE OBJ plugin and/or Transport plugin specific | 4261 | * Check if the fabric module driver is requesting that all |
4735 | * mapping for cmd->t_mem_list. And setup the | 4262 | * struct se_task->task_sg[] be chained together.. If so, |
4736 | * task->task_sg and if necessary task->task_sg_bidi | 4263 | * then allocate an extra padding SG entry for linking and |
4264 | * marking the end of the chained SGL. | ||
4265 | * Possibly over-allocate task sgl size by using cmd sgl size. | ||
4266 | * It's so much easier and only a waste when task_count > 1. | ||
4267 | * That is extremely rare. | ||
4737 | */ | 4268 | */ |
4738 | ret = transport_do_se_mem_map(dev, task, mem_list, | 4269 | task->task_sg_num = sgl_nents; |
4739 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | 4270 | if (cmd->se_tfo->task_sg_chaining) { |
4740 | &task_offset_in); | 4271 | task->task_sg_num++; |
4741 | if (ret < 0) | 4272 | task->task_padded_sg = 1; |
4742 | goto out; | 4273 | } |
4743 | 4274 | ||
4744 | se_mem = se_mem_lout; | 4275 | task->task_sg = kmalloc(sizeof(struct scatterlist) * \ |
4745 | /* | 4276 | task->task_sg_num, GFP_KERNEL); |
4746 | * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi | 4277 | if (!task->task_sg) { |
4747 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | 4278 | cmd->se_dev->transport->free_task(task); |
4748 | * | 4279 | return -ENOMEM; |
4749 | * Note that the first call to transport_do_se_mem_map() above will | 4280 | } |
4750 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | ||
4751 | * -> transport_init_task_sg(), and the second here will do the | ||
4752 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | ||
4753 | */ | ||
4754 | if (task->task_sg_bidi != NULL) { | ||
4755 | ret = transport_do_se_mem_map(dev, task, | ||
4756 | &cmd->t_mem_bidi_list, NULL, | ||
4757 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | ||
4758 | &task_offset_in); | ||
4759 | if (ret < 0) | ||
4760 | goto out; | ||
4761 | 4281 | ||
4762 | se_mem_bidi = se_mem_bidi_lout; | 4282 | sg_init_table(task->task_sg, task->task_sg_num); |
4283 | |||
4284 | task_size = task->task_size; | ||
4285 | |||
4286 | /* Build new sgl, only up to task_size */ | ||
4287 | for_each_sg(task->task_sg, sg, task->task_sg_num, count) { | ||
4288 | if (cmd_sg->length > task_size) | ||
4289 | break; | ||
4290 | |||
4291 | *sg = *cmd_sg; | ||
4292 | task_size -= cmd_sg->length; | ||
4293 | cmd_sg = sg_next(cmd_sg); | ||
4763 | } | 4294 | } |
4764 | task_cdbs++; | ||
4765 | 4295 | ||
4766 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | 4296 | lba += task->task_sectors; |
4767 | task_cdbs, task->task_sg_num); | 4297 | sectors -= task->task_sectors; |
4768 | } | ||
4769 | 4298 | ||
4770 | if (set_counts) { | 4299 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4771 | atomic_inc(&cmd->t_fe_count); | 4300 | list_add_tail(&task->t_list, &cmd->t_task_list); |
4772 | atomic_inc(&cmd->t_se_count); | 4301 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4773 | } | 4302 | } |
4774 | 4303 | ||
4775 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | 4304 | return task_count; |
4776 | cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | ||
4777 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | ||
4778 | |||
4779 | return task_cdbs; | ||
4780 | out: | ||
4781 | return 0; | ||
4782 | } | 4305 | } |
4783 | 4306 | ||
4784 | static int | 4307 | static int |
4785 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | 4308 | transport_allocate_control_task(struct se_cmd *cmd) |
4786 | { | 4309 | { |
4787 | struct se_device *dev = cmd->se_dev; | 4310 | struct se_device *dev = cmd->se_dev; |
4788 | unsigned char *cdb; | 4311 | unsigned char *cdb; |
4789 | struct se_task *task; | 4312 | struct se_task *task; |
4790 | int ret; | 4313 | unsigned long flags; |
4791 | 4314 | ||
4792 | task = transport_generic_get_task(cmd, cmd->data_direction); | 4315 | task = transport_generic_get_task(cmd, cmd->data_direction); |
4793 | if (!task) | 4316 | if (!task) |
4794 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 4317 | return -ENOMEM; |
4795 | 4318 | ||
4796 | cdb = dev->transport->get_cdb(task); | 4319 | cdb = dev->transport->get_cdb(task); |
4797 | BUG_ON(!cdb); | 4320 | BUG_ON(!cdb); |
4798 | memcpy(cdb, cmd->t_task_cdb, | 4321 | memcpy(cdb, cmd->t_task_cdb, |
4799 | scsi_command_size(cmd->t_task_cdb)); | 4322 | scsi_command_size(cmd->t_task_cdb)); |
4800 | 4323 | ||
4324 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, | ||
4325 | GFP_KERNEL); | ||
4326 | if (!task->task_sg) { | ||
4327 | cmd->se_dev->transport->free_task(task); | ||
4328 | return -ENOMEM; | ||
4329 | } | ||
4330 | |||
4331 | memcpy(task->task_sg, cmd->t_data_sg, | ||
4332 | sizeof(struct scatterlist) * cmd->t_data_nents); | ||
4801 | task->task_size = cmd->data_length; | 4333 | task->task_size = cmd->data_length; |
4802 | task->task_sg_num = | 4334 | task->task_sg_num = cmd->t_data_nents; |
4803 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | ||
4804 | 4335 | ||
4805 | atomic_inc(&cmd->t_fe_count); | 4336 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4806 | atomic_inc(&cmd->t_se_count); | 4337 | list_add_tail(&task->t_list, &cmd->t_task_list); |
4338 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4807 | 4339 | ||
4808 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | 4340 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { |
4809 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | ||
4810 | u32 se_mem_cnt = 0, task_offset = 0; | ||
4811 | |||
4812 | if (!list_empty(&cmd->t_mem_list)) | ||
4813 | se_mem = list_first_entry(&cmd->t_mem_list, | ||
4814 | struct se_mem, se_list); | ||
4815 | |||
4816 | ret = transport_do_se_mem_map(dev, task, | ||
4817 | &cmd->t_mem_list, NULL, se_mem, | ||
4818 | &se_mem_lout, &se_mem_cnt, &task_offset); | ||
4819 | if (ret < 0) | ||
4820 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
4821 | |||
4822 | if (dev->transport->map_task_SG) | 4341 | if (dev->transport->map_task_SG) |
4823 | return dev->transport->map_task_SG(task); | 4342 | return dev->transport->map_task_SG(task); |
4824 | return 0; | 4343 | return 0; |
@@ -4828,10 +4347,32 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
4828 | return 0; | 4347 | return 0; |
4829 | } else { | 4348 | } else { |
4830 | BUG(); | 4349 | BUG(); |
4831 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 4350 | return -ENOMEM; |
4832 | } | 4351 | } |
4833 | } | 4352 | } |
4834 | 4353 | ||
4354 | static u32 transport_allocate_tasks( | ||
4355 | struct se_cmd *cmd, | ||
4356 | unsigned long long lba, | ||
4357 | enum dma_data_direction data_direction, | ||
4358 | struct scatterlist *sgl, | ||
4359 | unsigned int sgl_nents) | ||
4360 | { | ||
4361 | int ret; | ||
4362 | |||
4363 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
4364 | return transport_allocate_data_tasks(cmd, lba, data_direction, | ||
4365 | sgl, sgl_nents); | ||
4366 | } else { | ||
4367 | ret = transport_allocate_control_task(cmd); | ||
4368 | if (ret < 0) | ||
4369 | return ret; | ||
4370 | else | ||
4371 | return 1; | ||
4372 | } | ||
4373 | } | ||
4374 | |||
4375 | |||
4835 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | 4376 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
4836 | * | 4377 | * |
4837 | * Allocate storage transport resources from a set of values predefined | 4378 | * Allocate storage transport resources from a set of values predefined |
@@ -4850,10 +4391,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
4850 | /* | 4391 | /* |
4851 | * Determine is the TCM fabric module has already allocated physical | 4392 | * Determine is the TCM fabric module has already allocated physical |
4852 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 4393 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
4853 | * to setup beforehand the linked list of physical memory at | 4394 | * beforehand. |
4854 | * cmd->t_mem_list of struct se_mem->se_page | ||
4855 | */ | 4395 | */ |
4856 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | 4396 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
4397 | cmd->data_length) { | ||
4857 | ret = transport_generic_get_mem(cmd); | 4398 | ret = transport_generic_get_mem(cmd); |
4858 | if (ret < 0) | 4399 | if (ret < 0) |
4859 | return ret; | 4400 | return ret; |
@@ -4863,19 +4404,13 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
4863 | if (ret < 0) | 4404 | if (ret < 0) |
4864 | return ret; | 4405 | return ret; |
4865 | 4406 | ||
4866 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | 4407 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
4867 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | 4408 | if (atomic_read(&task->task_sent)) |
4868 | if (atomic_read(&task->task_sent)) | 4409 | continue; |
4869 | continue; | 4410 | if (!dev->transport->map_task_SG) |
4870 | if (!dev->transport->map_task_SG) | 4411 | continue; |
4871 | continue; | ||
4872 | 4412 | ||
4873 | ret = dev->transport->map_task_SG(task); | 4413 | ret = dev->transport->map_task_SG(task); |
4874 | if (ret < 0) | ||
4875 | return ret; | ||
4876 | } | ||
4877 | } else { | ||
4878 | ret = transport_map_control_cmd_to_task(cmd); | ||
4879 | if (ret < 0) | 4414 | if (ret < 0) |
4880 | return ret; | 4415 | return ret; |
4881 | } | 4416 | } |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 1017f56bbbcc..9365e53947ad 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -59,7 +59,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
59 | struct fc_exch *ep; | 59 | struct fc_exch *ep; |
60 | struct fc_seq *sp; | 60 | struct fc_seq *sp; |
61 | struct se_cmd *se_cmd; | 61 | struct se_cmd *se_cmd; |
62 | struct se_mem *mem; | 62 | struct scatterlist *sg; |
63 | int count; | ||
63 | 64 | ||
64 | if (!(ft_debug_logging & FT_DEBUG_IO)) | 65 | if (!(ft_debug_logging & FT_DEBUG_IO)) |
65 | return; | 66 | return; |
@@ -71,15 +72,16 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
71 | caller, cmd, cmd->cdb); | 72 | caller, cmd, cmd->cdb); |
72 | printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); | 73 | printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); |
73 | 74 | ||
74 | printk(KERN_INFO "%s: cmd %p se_num %u len %u se_cmd_flags <0x%x>\n", | 75 | printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", |
75 | caller, cmd, se_cmd->t_tasks_se_num, | 76 | caller, cmd, se_cmd->t_data_nents, |
76 | se_cmd->data_length, se_cmd->se_cmd_flags); | 77 | se_cmd->data_length, se_cmd->se_cmd_flags); |
77 | 78 | ||
78 | list_for_each_entry(mem, &se_cmd->t_mem_list, se_list) | 79 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) |
79 | printk(KERN_INFO "%s: cmd %p mem %p page %p " | 80 | printk(KERN_INFO "%s: cmd %p sg %p page %p " |
80 | "len 0x%x off 0x%x\n", | 81 | "len 0x%x off 0x%x\n", |
81 | caller, cmd, mem, | 82 | caller, cmd, sg, |
82 | mem->se_page, mem->se_len, mem->se_off); | 83 | sg_page(sg), sg->length, sg->offset); |
84 | |||
83 | sp = cmd->seq; | 85 | sp = cmd->seq; |
84 | if (sp) { | 86 | if (sp) { |
85 | ep = fc_seq_exch(sp); | 87 | ep = fc_seq_exch(sp); |
@@ -256,10 +258,9 @@ int ft_write_pending(struct se_cmd *se_cmd) | |||
256 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { | 258 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { |
257 | if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | 259 | if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
258 | /* | 260 | /* |
259 | * Map se_mem list to scatterlist, so that | 261 | * cmd may have been broken up into multiple |
260 | * DDP can be setup. DDP setup function require | 262 | * tasks. Link their sgs together so we can |
261 | * scatterlist. se_mem_list is internal to | 263 | * operate on them all at once. |
262 | * TCM/LIO target | ||
263 | */ | 264 | */ |
264 | transport_do_task_sg_chain(se_cmd); | 265 | transport_do_task_sg_chain(se_cmd); |
265 | cmd->sg = se_cmd->t_tasks_sg_chained; | 266 | cmd->sg = se_cmd->t_tasks_sg_chained; |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 837660728563..3563a9029c4a 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -68,17 +68,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
68 | struct fc_frame *fp = NULL; | 68 | struct fc_frame *fp = NULL; |
69 | struct fc_exch *ep; | 69 | struct fc_exch *ep; |
70 | struct fc_lport *lport; | 70 | struct fc_lport *lport; |
71 | struct se_mem *mem; | 71 | struct scatterlist *sg = NULL; |
72 | size_t remaining; | 72 | size_t remaining; |
73 | u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; | 73 | u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; |
74 | u32 mem_off; | 74 | u32 mem_off = 0; |
75 | u32 fh_off = 0; | 75 | u32 fh_off = 0; |
76 | u32 frame_off = 0; | 76 | u32 frame_off = 0; |
77 | size_t frame_len = 0; | 77 | size_t frame_len = 0; |
78 | size_t mem_len; | 78 | size_t mem_len = 0; |
79 | size_t tlen; | 79 | size_t tlen; |
80 | size_t off_in_page; | 80 | size_t off_in_page; |
81 | struct page *page; | 81 | struct page *page = NULL; |
82 | int use_sg; | 82 | int use_sg; |
83 | int error; | 83 | int error; |
84 | void *page_addr; | 84 | void *page_addr; |
@@ -94,13 +94,12 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
94 | /* | 94 | /* |
95 | * Setup to use first mem list entry, unless no data. | 95 | * Setup to use first mem list entry, unless no data. |
96 | */ | 96 | */ |
97 | BUG_ON(remaining && list_empty(&se_cmd->t_mem_list)); | 97 | BUG_ON(remaining && !se_cmd->t_data_sg); |
98 | if (remaining) { | 98 | if (remaining) { |
99 | mem = list_first_entry(&se_cmd->t_mem_list, | 99 | sg = se_cmd->t_data_sg; |
100 | struct se_mem, se_list); | 100 | mem_len = sg->length; |
101 | mem_len = mem->se_len; | 101 | mem_off = sg->offset; |
102 | mem_off = mem->se_off; | 102 | page = sg_page(sg); |
103 | page = mem->se_page; | ||
104 | } | 103 | } |
105 | 104 | ||
106 | /* no scatter/gather in skb for odd word length due to fc_seq_send() */ | 105 | /* no scatter/gather in skb for odd word length due to fc_seq_send() */ |
@@ -108,12 +107,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
108 | 107 | ||
109 | while (remaining) { | 108 | while (remaining) { |
110 | if (!mem_len) { | 109 | if (!mem_len) { |
111 | BUG_ON(!mem); | 110 | sg = sg_next(sg); |
112 | mem = list_entry(mem->se_list.next, | 111 | mem_len = min((size_t)sg->length, remaining); |
113 | struct se_mem, se_list); | 112 | mem_off = sg->offset; |
114 | mem_len = min((size_t)mem->se_len, remaining); | 113 | page = sg_page(sg); |
115 | mem_off = mem->se_off; | ||
116 | page = mem->se_page; | ||
117 | } | 114 | } |
118 | if (!frame_len) { | 115 | if (!frame_len) { |
119 | /* | 116 | /* |
@@ -200,13 +197,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
200 | struct fc_exch *ep; | 197 | struct fc_exch *ep; |
201 | struct fc_lport *lport; | 198 | struct fc_lport *lport; |
202 | struct fc_frame_header *fh; | 199 | struct fc_frame_header *fh; |
203 | struct se_mem *mem; | 200 | struct scatterlist *sg = NULL; |
204 | u32 mem_off; | 201 | u32 mem_off = 0; |
205 | u32 rel_off; | 202 | u32 rel_off; |
206 | size_t frame_len; | 203 | size_t frame_len; |
207 | size_t mem_len; | 204 | size_t mem_len = 0; |
208 | size_t tlen; | 205 | size_t tlen; |
209 | struct page *page; | 206 | struct page *page = NULL; |
210 | void *page_addr; | 207 | void *page_addr; |
211 | void *from; | 208 | void *from; |
212 | void *to; | 209 | void *to; |
@@ -288,23 +285,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
288 | /* | 285 | /* |
289 | * Setup to use first mem list entry, unless no data. | 286 | * Setup to use first mem list entry, unless no data. |
290 | */ | 287 | */ |
291 | BUG_ON(frame_len && list_empty(&se_cmd->t_mem_list)); | 288 | BUG_ON(frame_len && !se_cmd->t_data_sg); |
292 | if (frame_len) { | 289 | if (frame_len) { |
293 | mem = list_first_entry(&se_cmd->t_mem_list, | 290 | sg = se_cmd->t_data_sg; |
294 | struct se_mem, se_list); | 291 | mem_len = sg->length; |
295 | mem_len = mem->se_len; | 292 | mem_off = sg->offset; |
296 | mem_off = mem->se_off; | 293 | page = sg_page(sg); |
297 | page = mem->se_page; | ||
298 | } | 294 | } |
299 | 295 | ||
300 | while (frame_len) { | 296 | while (frame_len) { |
301 | if (!mem_len) { | 297 | if (!mem_len) { |
302 | BUG_ON(!mem); | 298 | sg = sg_next(sg); |
303 | mem = list_entry(mem->se_list.next, | 299 | mem_len = sg->length; |
304 | struct se_mem, se_list); | 300 | mem_off = sg->offset; |
305 | mem_len = mem->se_len; | 301 | page = sg_page(sg); |
306 | mem_off = mem->se_off; | ||
307 | page = mem->se_page; | ||
308 | } | 302 | } |
309 | if (rel_off >= mem_len) { | 303 | if (rel_off >= mem_len) { |
310 | rel_off -= mem_len; | 304 | rel_off -= mem_len; |