aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2014-01-07 17:40:27 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2014-01-18 21:22:05 -0500
commit4442dc8a92b8f9ad8ee9e7f8438f4c04c03a22dc (patch)
treeb671256303a49d3e4a6d104f611b6f9ce766377a /drivers/target
parent42201b557471f2fef2e9e028b50a773d99ffc401 (diff)
target/rd: Refactor rd_build_device_space + rd_release_device_space
This patch refactors rd_build_device_space() + rd_release_device_space() into rd_allocate_sgl_table() + rd_release_device_space() so that they may be used seperatly for setup + release of protection information scatterlists. Also add explicit memset of pages within rd_allocate_sgl_table() based upon passed 'init_payload' value. v2 changes: - Drop unused sg_table from rd_release_device_space (Wei) Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_rd.c113
1 files changed, 68 insertions, 45 deletions
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4ffe5f2ec0e9..e9fa879ac27f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -78,23 +78,14 @@ static void rd_detach_hba(struct se_hba *hba)
78 hba->hba_ptr = NULL; 78 hba->hba_ptr = NULL;
79} 79}
80 80
81/* rd_release_device_space(): 81static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
82 * 82 u32 sg_table_count)
83 *
84 */
85static void rd_release_device_space(struct rd_dev *rd_dev)
86{ 83{
87 u32 i, j, page_count = 0, sg_per_table;
88 struct rd_dev_sg_table *sg_table;
89 struct page *pg; 84 struct page *pg;
90 struct scatterlist *sg; 85 struct scatterlist *sg;
86 u32 i, j, page_count = 0, sg_per_table;
91 87
92 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 88 for (i = 0; i < sg_table_count; i++) {
93 return;
94
95 sg_table = rd_dev->sg_table_array;
96
97 for (i = 0; i < rd_dev->sg_table_count; i++) {
98 sg = sg_table[i].sg_table; 89 sg = sg_table[i].sg_table;
99 sg_per_table = sg_table[i].rd_sg_count; 90 sg_per_table = sg_table[i].rd_sg_count;
100 91
@@ -105,16 +96,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
105 page_count++; 96 page_count++;
106 } 97 }
107 } 98 }
108
109 kfree(sg); 99 kfree(sg);
110 } 100 }
111 101
102 kfree(sg_table);
103 return page_count;
104}
105
106static void rd_release_device_space(struct rd_dev *rd_dev)
107{
108 u32 page_count;
109
110 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
111 return;
112
113 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
114 rd_dev->sg_table_count);
115
112 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 116 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 117 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
114 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 118 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
115 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 119 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
116 120
117 kfree(sg_table);
118 rd_dev->sg_table_array = NULL; 121 rd_dev->sg_table_array = NULL;
119 rd_dev->sg_table_count = 0; 122 rd_dev->sg_table_count = 0;
120} 123}
@@ -124,38 +127,15 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
124 * 127 *
125 * 128 *
126 */ 129 */
127static int rd_build_device_space(struct rd_dev *rd_dev) 130static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
131 u32 total_sg_needed, unsigned char init_payload)
128{ 132{
129 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; 133 u32 i = 0, j, page_offset = 0, sg_per_table;
130 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 134 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
131 sizeof(struct scatterlist)); 135 sizeof(struct scatterlist));
132 struct rd_dev_sg_table *sg_table;
133 struct page *pg; 136 struct page *pg;
134 struct scatterlist *sg; 137 struct scatterlist *sg;
135 138 unsigned char *p;
136 if (rd_dev->rd_page_count <= 0) {
137 pr_err("Illegal page count: %u for Ramdisk device\n",
138 rd_dev->rd_page_count);
139 return -EINVAL;
140 }
141
142 /* Don't need backing pages for NULLIO */
143 if (rd_dev->rd_flags & RDF_NULLIO)
144 return 0;
145
146 total_sg_needed = rd_dev->rd_page_count;
147
148 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
149
150 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
151 if (!sg_table) {
152 pr_err("Unable to allocate memory for Ramdisk"
153 " scatterlist tables\n");
154 return -ENOMEM;
155 }
156
157 rd_dev->sg_table_array = sg_table;
158 rd_dev->sg_table_count = sg_tables;
159 139
160 while (total_sg_needed) { 140 while (total_sg_needed) {
161 sg_per_table = (total_sg_needed > max_sg_per_table) ? 141 sg_per_table = (total_sg_needed > max_sg_per_table) ?
@@ -186,16 +166,59 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
186 } 166 }
187 sg_assign_page(&sg[j], pg); 167 sg_assign_page(&sg[j], pg);
188 sg[j].length = PAGE_SIZE; 168 sg[j].length = PAGE_SIZE;
169
170 p = kmap(pg);
171 memset(p, init_payload, PAGE_SIZE);
172 kunmap(pg);
189 } 173 }
190 174
191 page_offset += sg_per_table; 175 page_offset += sg_per_table;
192 total_sg_needed -= sg_per_table; 176 total_sg_needed -= sg_per_table;
193 } 177 }
194 178
179 return 0;
180}
181
182static int rd_build_device_space(struct rd_dev *rd_dev)
183{
184 struct rd_dev_sg_table *sg_table;
185 u32 sg_tables, total_sg_needed;
186 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
187 sizeof(struct scatterlist));
188 int rc;
189
190 if (rd_dev->rd_page_count <= 0) {
191 pr_err("Illegal page count: %u for Ramdisk device\n",
192 rd_dev->rd_page_count);
193 return -EINVAL;
194 }
195
196 /* Don't need backing pages for NULLIO */
197 if (rd_dev->rd_flags & RDF_NULLIO)
198 return 0;
199
200 total_sg_needed = rd_dev->rd_page_count;
201
202 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
203
204 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
205 if (!sg_table) {
206 pr_err("Unable to allocate memory for Ramdisk"
207 " scatterlist tables\n");
208 return -ENOMEM;
209 }
210
211 rd_dev->sg_table_array = sg_table;
212 rd_dev->sg_table_count = sg_tables;
213
214 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
215 if (rc)
216 return rc;
217
195 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 218 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
196 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 219 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
197 rd_dev->rd_dev_id, rd_dev->rd_page_count, 220 rd_dev->rd_dev_id, rd_dev->rd_page_count,
198 rd_dev->sg_table_count); 221 rd_dev->sg_table_count);
199 222
200 return 0; 223 return 0;
201} 224}