diff options
author | Christoph Hellwig <hch@infradead.org> | 2012-03-26 04:56:41 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2012-04-14 20:40:30 -0400 |
commit | 8feb58d04b23e65c3d302f063544f3a1ae65e887 (patch) | |
tree | 2d949f1a92bf277c8ccac669c187ecd4f527a74b | |
parent | b0d7994660af1601cc26ef7ab748569fdb9c253b (diff) |
target: misc ramdisk backend cleanups
Remove various leftovers of the old direct/indirect split, as well as the
unused rd_request structure and a couple unused defines and fields.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/target/target_core_rd.c | 147 | ||||
-rw-r--r-- | drivers/target/target_core_rd.h | 19 |
2 files changed, 41 insertions, 125 deletions
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 8b68f7b82631..2a89187d262c 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -199,10 +199,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static void *rd_allocate_virtdevice( | 202 | static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) |
203 | struct se_hba *hba, | ||
204 | const char *name, | ||
205 | int rd_direct) | ||
206 | { | 203 | { |
207 | struct rd_dev *rd_dev; | 204 | struct rd_dev *rd_dev; |
208 | struct rd_host *rd_host = hba->hba_ptr; | 205 | struct rd_host *rd_host = hba->hba_ptr; |
@@ -214,25 +211,12 @@ static void *rd_allocate_virtdevice( | |||
214 | } | 211 | } |
215 | 212 | ||
216 | rd_dev->rd_host = rd_host; | 213 | rd_dev->rd_host = rd_host; |
217 | rd_dev->rd_direct = rd_direct; | ||
218 | 214 | ||
219 | return rd_dev; | 215 | return rd_dev; |
220 | } | 216 | } |
221 | 217 | ||
222 | static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) | 218 | static struct se_device *rd_create_virtdevice(struct se_hba *hba, |
223 | { | 219 | struct se_subsystem_dev *se_dev, void *p) |
224 | return rd_allocate_virtdevice(hba, name, 0); | ||
225 | } | ||
226 | |||
227 | /* rd_create_virtdevice(): | ||
228 | * | ||
229 | * | ||
230 | */ | ||
231 | static struct se_device *rd_create_virtdevice( | ||
232 | struct se_hba *hba, | ||
233 | struct se_subsystem_dev *se_dev, | ||
234 | void *p, | ||
235 | int rd_direct) | ||
236 | { | 220 | { |
237 | struct se_device *dev; | 221 | struct se_device *dev; |
238 | struct se_dev_limits dev_limits; | 222 | struct se_dev_limits dev_limits; |
@@ -247,9 +231,8 @@ static struct se_device *rd_create_virtdevice( | |||
247 | if (ret < 0) | 231 | if (ret < 0) |
248 | goto fail; | 232 | goto fail; |
249 | 233 | ||
250 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); | 234 | snprintf(prod, 16, "RAMDISK-MCP"); |
251 | snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : | 235 | snprintf(rev, 4, "%s", RD_MCP_VERSION); |
252 | RD_MCP_VERSION); | ||
253 | 236 | ||
254 | dev_limits.limits.logical_block_size = RD_BLOCKSIZE; | 237 | dev_limits.limits.logical_block_size = RD_BLOCKSIZE; |
255 | dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; | 238 | dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; |
@@ -264,12 +247,10 @@ static struct se_device *rd_create_virtdevice( | |||
264 | goto fail; | 247 | goto fail; |
265 | 248 | ||
266 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; | 249 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; |
267 | rd_dev->rd_queue_depth = dev->queue_depth; | ||
268 | 250 | ||
269 | pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" | 251 | pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" |
270 | " %u pages in %u tables, %lu total bytes\n", | 252 | " %u pages in %u tables, %lu total bytes\n", |
271 | rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : | 253 | rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, |
272 | "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, | ||
273 | rd_dev->sg_table_count, | 254 | rd_dev->sg_table_count, |
274 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); | 255 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); |
275 | 256 | ||
@@ -280,18 +261,6 @@ fail: | |||
280 | return ERR_PTR(ret); | 261 | return ERR_PTR(ret); |
281 | } | 262 | } |
282 | 263 | ||
283 | static struct se_device *rd_MEMCPY_create_virtdevice( | ||
284 | struct se_hba *hba, | ||
285 | struct se_subsystem_dev *se_dev, | ||
286 | void *p) | ||
287 | { | ||
288 | return rd_create_virtdevice(hba, se_dev, p, 0); | ||
289 | } | ||
290 | |||
291 | /* rd_free_device(): (Part of se_subsystem_api_t template) | ||
292 | * | ||
293 | * | ||
294 | */ | ||
295 | static void rd_free_device(void *p) | 264 | static void rd_free_device(void *p) |
296 | { | 265 | { |
297 | struct rd_dev *rd_dev = p; | 266 | struct rd_dev *rd_dev = p; |
@@ -300,29 +269,12 @@ static void rd_free_device(void *p) | |||
300 | kfree(rd_dev); | 269 | kfree(rd_dev); |
301 | } | 270 | } |
302 | 271 | ||
303 | static inline struct rd_request *RD_REQ(struct se_task *task) | ||
304 | { | ||
305 | return container_of(task, struct rd_request, rd_task); | ||
306 | } | ||
307 | |||
308 | static struct se_task * | 272 | static struct se_task * |
309 | rd_alloc_task(unsigned char *cdb) | 273 | rd_alloc_task(unsigned char *cdb) |
310 | { | 274 | { |
311 | struct rd_request *rd_req; | 275 | return kzalloc(sizeof(struct se_task), GFP_KERNEL); |
312 | |||
313 | rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); | ||
314 | if (!rd_req) { | ||
315 | pr_err("Unable to allocate struct rd_request\n"); | ||
316 | return NULL; | ||
317 | } | ||
318 | |||
319 | return &rd_req->rd_task; | ||
320 | } | 276 | } |
321 | 277 | ||
322 | /* rd_get_sg_table(): | ||
323 | * | ||
324 | * | ||
325 | */ | ||
326 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | 278 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
327 | { | 279 | { |
328 | u32 i; | 280 | u32 i; |
@@ -341,31 +293,41 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
341 | return NULL; | 293 | return NULL; |
342 | } | 294 | } |
343 | 295 | ||
344 | static int rd_MEMCPY(struct rd_request *req, u32 read_rd) | 296 | static int rd_do_task(struct se_task *task) |
345 | { | 297 | { |
346 | struct se_task *task = &req->rd_task; | 298 | struct se_device *se_dev = task->task_se_cmd->se_dev; |
347 | struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; | 299 | struct rd_dev *dev = se_dev->dev_ptr; |
348 | struct rd_dev_sg_table *table; | 300 | struct rd_dev_sg_table *table; |
349 | struct scatterlist *rd_sg; | 301 | struct scatterlist *rd_sg; |
350 | struct sg_mapping_iter m; | 302 | struct sg_mapping_iter m; |
351 | u32 rd_offset = req->rd_offset; | 303 | u32 rd_offset; |
304 | u32 rd_size; | ||
305 | u32 rd_page; | ||
352 | u32 src_len; | 306 | u32 src_len; |
307 | u64 tmp; | ||
353 | 308 | ||
354 | table = rd_get_sg_table(dev, req->rd_page); | 309 | tmp = task->task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; |
310 | rd_offset = do_div(tmp, PAGE_SIZE); | ||
311 | rd_page = tmp; | ||
312 | rd_size = task->task_size; | ||
313 | |||
314 | table = rd_get_sg_table(dev, rd_page); | ||
355 | if (!table) | 315 | if (!table) |
356 | return -EINVAL; | 316 | return -EINVAL; |
357 | 317 | ||
358 | rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; | 318 | rd_sg = &table->sg_table[rd_page - table->page_start_offset]; |
359 | 319 | ||
360 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", | 320 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", |
361 | dev->rd_dev_id, read_rd ? "Read" : "Write", | 321 | dev->rd_dev_id, |
362 | task->task_lba, req->rd_size, req->rd_page, | 322 | task->task_data_direction == DMA_FROM_DEVICE ? |
363 | rd_offset); | 323 | "Read" : "Write", |
324 | task->task_lba, rd_size, rd_page, rd_offset); | ||
364 | 325 | ||
365 | src_len = PAGE_SIZE - rd_offset; | 326 | src_len = PAGE_SIZE - rd_offset; |
366 | sg_miter_start(&m, task->task_sg, task->task_sg_nents, | 327 | sg_miter_start(&m, task->task_sg, task->task_sg_nents, |
367 | read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); | 328 | task->task_data_direction == DMA_FROM_DEVICE ? |
368 | while (req->rd_size) { | 329 | SG_MITER_TO_SG : SG_MITER_FROM_SG); |
330 | while (rd_size) { | ||
369 | u32 len; | 331 | u32 len; |
370 | void *rd_addr; | 332 | void *rd_addr; |
371 | 333 | ||
@@ -375,13 +337,13 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) | |||
375 | 337 | ||
376 | rd_addr = sg_virt(rd_sg) + rd_offset; | 338 | rd_addr = sg_virt(rd_sg) + rd_offset; |
377 | 339 | ||
378 | if (read_rd) | 340 | if (task->task_data_direction == DMA_FROM_DEVICE) |
379 | memcpy(m.addr, rd_addr, len); | 341 | memcpy(m.addr, rd_addr, len); |
380 | else | 342 | else |
381 | memcpy(rd_addr, m.addr, len); | 343 | memcpy(rd_addr, m.addr, len); |
382 | 344 | ||
383 | req->rd_size -= len; | 345 | rd_size -= len; |
384 | if (!req->rd_size) | 346 | if (!rd_size) |
385 | continue; | 347 | continue; |
386 | 348 | ||
387 | src_len -= len; | 349 | src_len -= len; |
@@ -391,15 +353,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) | |||
391 | } | 353 | } |
392 | 354 | ||
393 | /* rd page completed, next one please */ | 355 | /* rd page completed, next one please */ |
394 | req->rd_page++; | 356 | rd_page++; |
395 | rd_offset = 0; | 357 | rd_offset = 0; |
396 | src_len = PAGE_SIZE; | 358 | src_len = PAGE_SIZE; |
397 | if (req->rd_page <= table->page_end_offset) { | 359 | if (rd_page <= table->page_end_offset) { |
398 | rd_sg++; | 360 | rd_sg++; |
399 | continue; | 361 | continue; |
400 | } | 362 | } |
401 | 363 | ||
402 | table = rd_get_sg_table(dev, req->rd_page); | 364 | table = rd_get_sg_table(dev, rd_page); |
403 | if (!table) { | 365 | if (!table) { |
404 | sg_miter_stop(&m); | 366 | sg_miter_stop(&m); |
405 | return -EINVAL; | 367 | return -EINVAL; |
@@ -409,41 +371,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) | |||
409 | rd_sg = table->sg_table; | 371 | rd_sg = table->sg_table; |
410 | } | 372 | } |
411 | sg_miter_stop(&m); | 373 | sg_miter_stop(&m); |
412 | return 0; | ||
413 | } | ||
414 | |||
415 | /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) | ||
416 | * | ||
417 | * | ||
418 | */ | ||
419 | static int rd_MEMCPY_do_task(struct se_task *task) | ||
420 | { | ||
421 | struct se_device *dev = task->task_se_cmd->se_dev; | ||
422 | struct rd_request *req = RD_REQ(task); | ||
423 | u64 tmp; | ||
424 | int ret; | ||
425 | |||
426 | tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; | ||
427 | req->rd_offset = do_div(tmp, PAGE_SIZE); | ||
428 | req->rd_page = tmp; | ||
429 | req->rd_size = task->task_size; | ||
430 | |||
431 | ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); | ||
432 | if (ret != 0) | ||
433 | return ret; | ||
434 | 374 | ||
435 | task->task_scsi_status = GOOD; | 375 | task->task_scsi_status = GOOD; |
436 | transport_complete_task(task, 1); | 376 | transport_complete_task(task, 1); |
437 | return 0; | 377 | return 0; |
438 | } | 378 | } |
439 | 379 | ||
440 | /* rd_free_task(): (Part of se_subsystem_api_t template) | ||
441 | * | ||
442 | * | ||
443 | */ | ||
444 | static void rd_free_task(struct se_task *task) | 380 | static void rd_free_task(struct se_task *task) |
445 | { | 381 | { |
446 | kfree(RD_REQ(task)); | 382 | kfree(task); |
447 | } | 383 | } |
448 | 384 | ||
449 | enum { | 385 | enum { |
@@ -512,9 +448,8 @@ static ssize_t rd_show_configfs_dev_params( | |||
512 | char *b) | 448 | char *b) |
513 | { | 449 | { |
514 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; | 450 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; |
515 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", | 451 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", |
516 | rd_dev->rd_dev_id, (rd_dev->rd_direct) ? | 452 | rd_dev->rd_dev_id); |
517 | "rd_direct" : "rd_mcp"); | ||
518 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" | 453 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" |
519 | " SG_table_count: %u\n", rd_dev->rd_page_count, | 454 | " SG_table_count: %u\n", rd_dev->rd_page_count, |
520 | PAGE_SIZE, rd_dev->sg_table_count); | 455 | PAGE_SIZE, rd_dev->sg_table_count); |
@@ -545,11 +480,11 @@ static struct se_subsystem_api rd_mcp_template = { | |||
545 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | 480 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
546 | .attach_hba = rd_attach_hba, | 481 | .attach_hba = rd_attach_hba, |
547 | .detach_hba = rd_detach_hba, | 482 | .detach_hba = rd_detach_hba, |
548 | .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, | 483 | .allocate_virtdevice = rd_allocate_virtdevice, |
549 | .create_virtdevice = rd_MEMCPY_create_virtdevice, | 484 | .create_virtdevice = rd_create_virtdevice, |
550 | .free_device = rd_free_device, | 485 | .free_device = rd_free_device, |
551 | .alloc_task = rd_alloc_task, | 486 | .alloc_task = rd_alloc_task, |
552 | .do_task = rd_MEMCPY_do_task, | 487 | .do_task = rd_do_task, |
553 | .free_task = rd_free_task, | 488 | .free_task = rd_free_task, |
554 | .check_configfs_dev_params = rd_check_configfs_dev_params, | 489 | .check_configfs_dev_params = rd_check_configfs_dev_params, |
555 | .set_configfs_dev_params = rd_set_configfs_dev_params, | 490 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 784e56a04100..94acec9e872f 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define TARGET_CORE_RD_H | 2 | #define TARGET_CORE_RD_H |
3 | 3 | ||
4 | #define RD_HBA_VERSION "v4.0" | 4 | #define RD_HBA_VERSION "v4.0" |
5 | #define RD_DR_VERSION "4.0" | ||
6 | #define RD_MCP_VERSION "4.0" | 5 | #define RD_MCP_VERSION "4.0" |
7 | 6 | ||
8 | /* Largest piece of memory kmalloc can allocate */ | 7 | /* Largest piece of memory kmalloc can allocate */ |
@@ -16,22 +15,6 @@ | |||
16 | int __init rd_module_init(void); | 15 | int __init rd_module_init(void); |
17 | void rd_module_exit(void); | 16 | void rd_module_exit(void); |
18 | 17 | ||
19 | #define RRF_EMULATE_CDB 0x01 | ||
20 | #define RRF_GOT_LBA 0x02 | ||
21 | |||
22 | struct rd_request { | ||
23 | struct se_task rd_task; | ||
24 | |||
25 | /* Offset from start of page */ | ||
26 | u32 rd_offset; | ||
27 | /* Starting page in Ramdisk for request */ | ||
28 | u32 rd_page; | ||
29 | /* Total number of pages needed for request */ | ||
30 | u32 rd_page_count; | ||
31 | /* Scatterlist count */ | ||
32 | u32 rd_size; | ||
33 | } ____cacheline_aligned; | ||
34 | |||
35 | struct rd_dev_sg_table { | 18 | struct rd_dev_sg_table { |
36 | u32 page_start_offset; | 19 | u32 page_start_offset; |
37 | u32 page_end_offset; | 20 | u32 page_end_offset; |
@@ -42,7 +25,6 @@ struct rd_dev_sg_table { | |||
42 | #define RDF_HAS_PAGE_COUNT 0x01 | 25 | #define RDF_HAS_PAGE_COUNT 0x01 |
43 | 26 | ||
44 | struct rd_dev { | 27 | struct rd_dev { |
45 | int rd_direct; | ||
46 | u32 rd_flags; | 28 | u32 rd_flags; |
47 | /* Unique Ramdisk Device ID in Ramdisk HBA */ | 29 | /* Unique Ramdisk Device ID in Ramdisk HBA */ |
48 | u32 rd_dev_id; | 30 | u32 rd_dev_id; |
@@ -50,7 +32,6 @@ struct rd_dev { | |||
50 | u32 rd_page_count; | 32 | u32 rd_page_count; |
51 | /* Number of SG tables in sg_table_array */ | 33 | /* Number of SG tables in sg_table_array */ |
52 | u32 sg_table_count; | 34 | u32 sg_table_count; |
53 | u32 rd_queue_depth; | ||
54 | /* Array of rd_dev_sg_table_t containing scatterlists */ | 35 | /* Array of rd_dev_sg_table_t containing scatterlists */ |
55 | struct rd_dev_sg_table *sg_table_array; | 36 | struct rd_dev_sg_table *sg_table_array; |
56 | /* Ramdisk HBA device is connected to */ | 37 | /* Ramdisk HBA device is connected to */ |