aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/hv/blkvsc_drv.c
diff options
context:
space:
mode:
authorBill Pemberton <wfp5p@virginia.edu>2009-07-27 16:47:24 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-15 15:01:50 -0400
commit454f18a963cf6519bf317e74e6b9781ffef8d253 (patch)
tree67b206b51dfa5590f3fbdc3db094df1ba5194c6d /drivers/staging/hv/blkvsc_drv.c
parent226408a4998041d7832123ab9ccd743e878197ed (diff)
Staging: hv: Remove C99 comments
Remove C99 // comments with traditional /* */ comments Signed-off-by: Bill Pemberton <wfp5p@virginia.edu> Cc: Hank Janssen <hjanssen@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/hv/blkvsc_drv.c')
-rw-r--r--drivers/staging/hv/blkvsc_drv.c249
1 files changed, 133 insertions, 116 deletions
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index d90265370ab..9c4bfb6dea9 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -38,38 +38,41 @@
38 38
39#include "include/StorVscApi.h" 39#include "include/StorVscApi.h"
40 40
41// 41
42// #defines 42/* #defines */
43// 43
44#define BLKVSC_MINORS 64 44#define BLKVSC_MINORS 64
45 45
46// 46
47// Data types 47/* Data types */
48// 48
49enum blkvsc_device_type { 49enum blkvsc_device_type {
50 UNKNOWN_DEV_TYPE, 50 UNKNOWN_DEV_TYPE,
51 HARDDISK_TYPE, 51 HARDDISK_TYPE,
52 DVD_TYPE, 52 DVD_TYPE,
53}; 53};
54 54
55// This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together 55/*
56// A struct request may be represented by 1 or more struct blkvsc_request 56 * This request ties the struct request and struct
57 * blkvsc_request/STORVSC_REQUEST together A struct request may be
58 * represented by 1 or more struct blkvsc_request
59 */
57struct blkvsc_request_group { 60struct blkvsc_request_group {
58 int outstanding; 61 int outstanding;
59 int status; 62 int status;
60 63
61 struct list_head blkvsc_req_list; // list of blkvsc_requests 64 struct list_head blkvsc_req_list; /* list of blkvsc_requests */
62}; 65};
63 66
64 67
65struct blkvsc_request { 68struct blkvsc_request {
66 struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list 69 struct list_head req_entry; /* blkvsc_request_group.blkvsc_req_list */
67 70
68 struct list_head pend_entry; // block_device_context.pending_list 71 struct list_head pend_entry; /* block_device_context.pending_list */
69 72
70 struct request *req; // This may be null if we generate a request internally 73 struct request *req; /* This may be null if we generate a request internally */
71 struct block_device_context *dev; 74 struct block_device_context *dev;
72 struct blkvsc_request_group *group; // The group this request is part of. Maybe null 75 struct blkvsc_request_group *group; /* The group this request is part of. Maybe null */
73 76
74 wait_queue_head_t wevent; 77 wait_queue_head_t wevent;
75 int cond; 78 int cond;
@@ -83,13 +86,13 @@ struct blkvsc_request {
83 unsigned char cmnd[MAX_COMMAND_SIZE]; 86 unsigned char cmnd[MAX_COMMAND_SIZE];
84 87
85 STORVSC_REQUEST request; 88 STORVSC_REQUEST request;
86 // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - 89 /* !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - */
87 // The extension buffer falls right here and is pointed to by request.Extension; 90 /* The extension buffer falls right here and is pointed to by request.Extension; */
88}; 91};
89 92
90// Per device structure 93/* Per device structure */
91struct block_device_context { 94struct block_device_context {
92 struct device_context *device_ctx; // point back to our device context 95 struct device_context *device_ctx; /* point back to our device context */
93 struct kmem_cache *request_pool; 96 struct kmem_cache *request_pool;
94 spinlock_t lock; 97 spinlock_t lock;
95 struct gendisk *gd; 98 struct gendisk *gd;
@@ -109,14 +112,14 @@ struct block_device_context {
109 int users; 112 int users;
110}; 113};
111 114
112// Per driver 115/* Per driver */
113struct blkvsc_driver_context { 116struct blkvsc_driver_context {
114 // !! These must be the first 2 fields !! 117 /* !! These must be the first 2 fields !! */
115 struct driver_context drv_ctx; 118 struct driver_context drv_ctx;
116 STORVSC_DRIVER_OBJECT drv_obj; 119 STORVSC_DRIVER_OBJECT drv_obj;
117}; 120};
118 121
119// Static decl 122/* Static decl */
120static int blkvsc_probe(struct device *dev); 123static int blkvsc_probe(struct device *dev);
121static int blkvsc_remove(struct device *device); 124static int blkvsc_remove(struct device *device);
122static void blkvsc_shutdown(struct device *device); 125static void blkvsc_shutdown(struct device *device);
@@ -144,7 +147,7 @@ static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
144 147
145static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE; 148static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
146 149
147// The one and only one 150/* The one and only one */
148static struct blkvsc_driver_context g_blkvsc_drv; 151static struct blkvsc_driver_context g_blkvsc_drv;
149 152
150 153
@@ -178,7 +181,7 @@ int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
178 181
179 storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size; 182 storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
180 183
181 // Callback to client driver to complete the initialization 184 /* Callback to client driver to complete the initialization */
182 pfn_drv_init(&storvsc_drv_obj->Base); 185 pfn_drv_init(&storvsc_drv_obj->Base);
183 186
184 drv_ctx->driver.name = storvsc_drv_obj->Base.name; 187 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
@@ -188,7 +191,7 @@ int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
188 drv_ctx->remove = blkvsc_remove; 191 drv_ctx->remove = blkvsc_remove;
189 drv_ctx->shutdown = blkvsc_shutdown; 192 drv_ctx->shutdown = blkvsc_shutdown;
190 193
191 // The driver belongs to vmbus 194 /* The driver belongs to vmbus */
192 vmbus_child_driver_register(drv_ctx); 195 vmbus_child_driver_register(drv_ctx);
193 196
194 DPRINT_EXIT(BLKVSC_DRV); 197 DPRINT_EXIT(BLKVSC_DRV);
@@ -201,7 +204,7 @@ static int blkvsc_drv_exit_cb(struct device *dev, void *data)
201{ 204{
202 struct device **curr = (struct device **)data; 205 struct device **curr = (struct device **)data;
203 *curr = dev; 206 *curr = dev;
204 return 1; // stop iterating 207 return 1; /* stop iterating */
205} 208}
206 209
207/*++ 210/*++
@@ -224,13 +227,13 @@ void blkvsc_drv_exit(void)
224 { 227 {
225 current_dev = NULL; 228 current_dev = NULL;
226 229
227 // Get the device 230 /* Get the device */
228 driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, blkvsc_drv_exit_cb); 231 driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, blkvsc_drv_exit_cb);
229 232
230 if (current_dev == NULL) 233 if (current_dev == NULL)
231 break; 234 break;
232 235
233 // Initiate removal from the top-down 236 /* Initiate removal from the top-down */
234 device_unregister(current_dev); 237 device_unregister(current_dev);
235 } 238 }
236 239
@@ -291,14 +294,14 @@ static int blkvsc_probe(struct device *device)
291 294
292 INIT_LIST_HEAD(&blkdev->pending_list); 295 INIT_LIST_HEAD(&blkdev->pending_list);
293 296
294 // Initialize what we can here 297 /* Initialize what we can here */
295 spin_lock_init(&blkdev->lock); 298 spin_lock_init(&blkdev->lock);
296 299
297 ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request)); 300 ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
298 301
299 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device), 302 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
300 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0, 303 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
301 SLAB_HWCACHE_ALIGN, NULL); 304 SLAB_HWCACHE_ALIGN, NULL);
302 if (!blkdev->request_pool) 305 if (!blkdev->request_pool)
303 { 306 {
304 ret = -ENOMEM; 307 ret = -ENOMEM;
@@ -306,7 +309,7 @@ static int blkvsc_probe(struct device *device)
306 } 309 }
307 310
308 311
309 // Call to the vsc driver to add the device 312 /* Call to the vsc driver to add the device */
310 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info); 313 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
311 if (ret != 0) 314 if (ret != 0)
312 { 315 {
@@ -315,16 +318,16 @@ static int blkvsc_probe(struct device *device)
315 } 318 }
316 319
317 blkdev->device_ctx = device_ctx; 320 blkdev->device_ctx = device_ctx;
318 blkdev->target = device_info.TargetId; // this identified the device 0 or 1 321 blkdev->target = device_info.TargetId; /* this identified the device 0 or 1 */
319 blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1 322 blkdev->path = device_info.PathId; /* this identified the ide ctrl 0 or 1 */
320 323
321 dev_set_drvdata(device, blkdev); 324 dev_set_drvdata(device, blkdev);
322 325
323 // Calculate the major and device num 326 /* Calculate the major and device num */
324 if (blkdev->path == 0) 327 if (blkdev->path == 0)
325 { 328 {
326 major = IDE0_MAJOR; 329 major = IDE0_MAJOR;
327 devnum = blkdev->path + blkdev->target; // 0 or 1 330 devnum = blkdev->path + blkdev->target; /* 0 or 1 */
328 331
329 if (!ide0_registered) 332 if (!ide0_registered)
330 { 333 {
@@ -341,7 +344,7 @@ static int blkvsc_probe(struct device *device)
341 else if (blkdev->path == 1) 344 else if (blkdev->path == 1)
342 { 345 {
343 major = IDE1_MAJOR; 346 major = IDE1_MAJOR;
344 devnum = blkdev->path + blkdev->target + 1; // 2 or 3 347 devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
345 348
346 if (!ide1_registered) 349 if (!ide1_registered)
347 { 350 {
@@ -405,7 +408,7 @@ static int blkvsc_probe(struct device *device)
405 408
406 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); 409 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
407 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size); 410 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
408 // go! 411 /* go! */
409 add_disk(blkdev->gd); 412 add_disk(blkdev->gd);
410 413
411 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size); 414 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size);
@@ -494,7 +497,7 @@ static int blkvsc_do_flush(struct block_device_context *blkdev)
494 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE; 497 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
495 blkvsc_req->cmd_len = 10; 498 blkvsc_req->cmd_len = 10;
496 499
497 // Set this here since the completion routine may be invoked and completed before we return 500 /* Set this here since the completion routine may be invoked and completed before we return */
498 blkvsc_req->cond =0; 501 blkvsc_req->cond =0;
499 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); 502 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
500 503
@@ -505,7 +508,7 @@ static int blkvsc_do_flush(struct block_device_context *blkdev)
505 return 0; 508 return 0;
506} 509}
507 510
508// Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) 511/* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
509static int blkvsc_do_inquiry(struct block_device_context *blkdev) 512static int blkvsc_do_inquiry(struct block_device_context *blkdev)
510{ 513{
511 struct blkvsc_request *blkvsc_req=NULL; 514 struct blkvsc_request *blkvsc_req=NULL;
@@ -539,12 +542,12 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
539 blkvsc_req->request.DataBuffer.Length = 64; 542 blkvsc_req->request.DataBuffer.Length = 64;
540 543
541 blkvsc_req->cmnd[0] = INQUIRY; 544 blkvsc_req->cmnd[0] = INQUIRY;
542 blkvsc_req->cmnd[1] = 0x1; // Get product data 545 blkvsc_req->cmnd[1] = 0x1; /* Get product data */
543 blkvsc_req->cmnd[2] = 0x83; // mode page 83 546 blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
544 blkvsc_req->cmnd[4] = 64; 547 blkvsc_req->cmnd[4] = 64;
545 blkvsc_req->cmd_len = 6; 548 blkvsc_req->cmd_len = 6;
546 549
547 // Set this here since the completion routine may be invoked and completed before we return 550 /* Set this here since the completion routine may be invoked and completed before we return */
548 blkvsc_req->cond =0; 551 blkvsc_req->cond =0;
549 552
550 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); 553 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
@@ -556,7 +559,7 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
556 buf = kmap(page_buf); 559 buf = kmap(page_buf);
557 560
558 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */ 561 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
559 // be to le 562 /* be to le */
560 device_type = buf[0] & 0x1F; 563 device_type = buf[0] & 0x1F;
561 564
562 if (device_type == 0x0) 565 if (device_type == 0x0)
@@ -569,7 +572,7 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
569 } 572 }
570 else 573 else
571 { 574 {
572 // TODO: this is currently unsupported device type 575 /* TODO: this is currently unsupported device type */
573 blkdev->device_type = UNKNOWN_DEV_TYPE; 576 blkdev->device_type = UNKNOWN_DEV_TYPE;
574 } 577 }
575 578
@@ -581,7 +584,7 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
581 584
582 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len); 585 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
583 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id, 586 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
584 * blkdev->device_id_len); */ 587 * blkdev->device_id_len); */
585 588
586 kunmap(page_buf); 589 kunmap(page_buf);
587 590
@@ -592,7 +595,7 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
592 return 0; 595 return 0;
593} 596}
594 597
595// Do a scsi READ_CAPACITY cmd here to get the size of the disk 598/* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
596static int blkvsc_do_read_capacity(struct block_device_context *blkdev) 599static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
597{ 600{
598 struct blkvsc_request *blkvsc_req=NULL; 601 struct blkvsc_request *blkvsc_req=NULL;
@@ -604,7 +607,7 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
604 607
605 blkdev->sector_size = 0; 608 blkdev->sector_size = 0;
606 blkdev->capacity = 0; 609 blkdev->capacity = 0;
607 blkdev->media_not_present = 0; // assume a disk is present 610 blkdev->media_not_present = 0; /* assume a disk is present */
608 611
609 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); 612 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
610 if (!blkvsc_req) 613 if (!blkvsc_req)
@@ -632,7 +635,10 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
632 blkvsc_req->cmnd[0] = READ_CAPACITY; 635 blkvsc_req->cmnd[0] = READ_CAPACITY;
633 blkvsc_req->cmd_len = 16; 636 blkvsc_req->cmd_len = 16;
634 637
635 // Set this here since the completion routine may be invoked and completed before we return 638 /*
639 * Set this here since the completion routine may be invoked
640 * and completed before we return
641 */
636 blkvsc_req->cond =0; 642 blkvsc_req->cond =0;
637 643
638 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); 644 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
@@ -641,12 +647,12 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
641 647
642 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); 648 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
643 649
644 // check error 650 /* check error */
645 if (blkvsc_req->request.Status) 651 if (blkvsc_req->request.Status)
646 { 652 {
647 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 653 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
648 654
649 if (sense_hdr.asc == 0x3A) // Medium not present 655 if (sense_hdr.asc == 0x3A) /* Medium not present */
650 { 656 {
651 blkdev->media_not_present = 1; 657 blkdev->media_not_present = 1;
652 } 658 }
@@ -655,7 +661,7 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
655 } 661 }
656 buf = kmap(page_buf); 662 buf = kmap(page_buf);
657 663
658 // be to le 664 /* be to le */
659 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; 665 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
660 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; 666 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
661 667
@@ -680,7 +686,7 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
680 686
681 blkdev->sector_size = 0; 687 blkdev->sector_size = 0;
682 blkdev->capacity = 0; 688 blkdev->capacity = 0;
683 blkdev->media_not_present = 0; // assume a disk is present 689 blkdev->media_not_present = 0; /* assume a disk is present */
684 690
685 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); 691 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
686 if (!blkvsc_req) 692 if (!blkvsc_req)
@@ -705,10 +711,13 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
705 blkvsc_req->request.DataBuffer.Offset = 0; 711 blkvsc_req->request.DataBuffer.Offset = 0;
706 blkvsc_req->request.DataBuffer.Length = 12; 712 blkvsc_req->request.DataBuffer.Length = 12;
707 713
708 blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16; 714 blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
709 blkvsc_req->cmd_len = 16; 715 blkvsc_req->cmd_len = 16;
710 716
711 // Set this here since the completion routine may be invoked and completed before we return 717 /*
718 * Set this here since the completion routine may be invoked
719 * and completed before we return
720 */
712 blkvsc_req->cond =0; 721 blkvsc_req->cond =0;
713 722
714 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); 723 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
@@ -717,12 +726,12 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
717 726
718 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); 727 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
719 728
720 // check error 729 /* check error */
721 if (blkvsc_req->request.Status) 730 if (blkvsc_req->request.Status)
722 { 731 {
723 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 732 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
724 733
725 if (sense_hdr.asc == 0x3A) // Medium not present 734 if (sense_hdr.asc == 0x3A) /* Medium not present */
726 { 735 {
727 blkdev->media_not_present = 1; 736 blkdev->media_not_present = 1;
728 } 737 }
@@ -731,12 +740,12 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
731 } 740 }
732 buf = kmap(page_buf); 741 buf = kmap(page_buf);
733 742
734 // be to le 743 /* be to le */
735 blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1; 744 blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
736 blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]); 745 blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
737 746
738 //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; 747 /* blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; */
739 //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; 748 /* blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; */
740 749
741 kunmap(page_buf); 750 kunmap(page_buf);
742 751
@@ -777,15 +786,15 @@ static int blkvsc_remove(struct device *device)
777 return -1; 786 return -1;
778 } 787 }
779 788
780 // Call to the vsc driver to let it know that the device is being removed 789 /* Call to the vsc driver to let it know that the device is being removed */
781 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj); 790 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
782 if (ret != 0) 791 if (ret != 0)
783 { 792 {
784 // TODO: 793 /* TODO: */
785 DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret); 794 DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
786 } 795 }
787 796
788 // Get to a known state 797 /* Get to a known state */
789 spin_lock_irqsave(&blkdev->lock, flags); 798 spin_lock_irqsave(&blkdev->lock, flags);
790 799
791 blkdev->shutting_down = 1; 800 blkdev->shutting_down = 1;
@@ -922,7 +931,7 @@ static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*reque
922 storvsc_req->Host = blkdev->port; 931 storvsc_req->Host = blkdev->port;
923 storvsc_req->Bus = blkdev->path; 932 storvsc_req->Bus = blkdev->path;
924 storvsc_req->TargetId = blkdev->target; 933 storvsc_req->TargetId = blkdev->target;
925 storvsc_req->LunId = 0; // this is not really used at all 934 storvsc_req->LunId = 0; /* this is not really used at all */
926 935
927 storvsc_req->CdbLen = blkvsc_req->cmd_len; 936 storvsc_req->CdbLen = blkvsc_req->cmd_len;
928 storvsc_req->Cdb = blkvsc_req->cmnd; 937 storvsc_req->Cdb = blkvsc_req->cmnd;
@@ -939,11 +948,13 @@ static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*reque
939 return ret; 948 return ret;
940} 949}
941 950
942// 951
943// We break the request into 1 or more blkvsc_requests and submit them. 952/*
944// If we cant submit them all, we put them on the pending_list. The 953 * We break the request into 1 or more blkvsc_requests and submit
945// blkvsc_request() will work on the pending_list. 954 * them. If we cant submit them all, we put them on the
946// 955 * pending_list. The blkvsc_request() will work on the pending_list.
956 */
957
947static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req) 958static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
948{ 959{
949 struct bio *bio=NULL; 960 struct bio *bio=NULL;
@@ -963,7 +974,7 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
963 974
964 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, blk_rq_pos(req)); 975 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, blk_rq_pos(req));
965 976
966 // Create a group to tie req to list of blkvsc_reqs 977 /* Create a group to tie req to list of blkvsc_reqs */
967 group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); 978 group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
968 if (!group) 979 if (!group)
969 { 980 {
@@ -975,23 +986,23 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
975 986
976 start_sector = blk_rq_pos(req); 987 start_sector = blk_rq_pos(req);
977 988
978 // foreach bio in the request 989 /* foreach bio in the request */
979 if (req->bio) 990 if (req->bio)
980 for (bio = req->bio; bio; bio = bio->bi_next) 991 for (bio = req->bio; bio; bio = bio->bi_next)
981 { 992 {
982 // Map this bio into an existing or new storvsc request 993 /* Map this bio into an existing or new storvsc request */
983 bio_for_each_segment (bvec, bio, seg_idx) 994 bio_for_each_segment (bvec, bio, seg_idx)
984 { 995 {
985 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n", 996 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
986 req, bio, bvec, seg_idx, databuf_idx); 997 req, bio, bvec, seg_idx, databuf_idx);
987 998
988 // Get a new storvsc request 999 /* Get a new storvsc request */
989 if ( (!blkvsc_req) || // 1st-time 1000 if ( (!blkvsc_req) || /* 1st-time */
990 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) || 1001 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
991 (bvec->bv_offset != 0) || // hole at the begin of page 1002 (bvec->bv_offset != 0) || /* hole at the begin of page */
992 (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page 1003 (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) /* hold at the end of page */
993 { 1004 {
994 // submit the prev one 1005 /* submit the prev one */
995 if (blkvsc_req) 1006 if (blkvsc_req)
996 { 1007 {
997 blkvsc_req->sector_start = start_sector; 1008 blkvsc_req->sector_start = start_sector;
@@ -1002,11 +1013,11 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
1002 blkvsc_init_rw(blkvsc_req); 1013 blkvsc_init_rw(blkvsc_req);
1003 } 1014 }
1004 1015
1005 // Create new blkvsc_req to represent the current bvec 1016 /* Create new blkvsc_req to represent the current bvec */
1006 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); 1017 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1007 if (!blkvsc_req) 1018 if (!blkvsc_req)
1008 { 1019 {
1009 // free up everything 1020 /* free up everything */
1010 list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry) 1021 list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1011 { 1022 {
1012 list_del(&blkvsc_req->req_entry); 1023 list_del(&blkvsc_req->req_entry);
@@ -1024,7 +1035,7 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
1024 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset; 1035 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1025 blkvsc_req->request.DataBuffer.Length = 0; 1036 blkvsc_req->request.DataBuffer.Length = 0;
1026 1037
1027 // Add to the group 1038 /* Add to the group */
1028 blkvsc_req->group = group; 1039 blkvsc_req->group = group;
1029 blkvsc_req->group->outstanding++; 1040 blkvsc_req->group->outstanding++;
1030 list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list); 1041 list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
@@ -1034,7 +1045,7 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
1034 databuf_idx = 0; 1045 databuf_idx = 0;
1035 } 1046 }
1036 1047
1037 // Add the curr bvec/segment to the curr blkvsc_req 1048 /* Add the curr bvec/segment to the curr blkvsc_req */
1038 blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page); 1049 blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1039 blkvsc_req->request.DataBuffer.Length += bvec->bv_len; 1050 blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1040 1051
@@ -1043,11 +1054,11 @@ static int blkvsc_do_request(struct block_device_context *blkdev, struct request
1043 databuf_idx++; 1054 databuf_idx++;
1044 num_sectors += bvec->bv_len >> 9; 1055 num_sectors += bvec->bv_len >> 9;
1045 1056
1046 } // bio_for_each_segment 1057 } /* bio_for_each_segment */
1047 1058
1048 } // rq_for_each_bio 1059 } /* rq_for_each_bio */
1049 1060
1050 // Handle the last one 1061 /* Handle the last one */
1051 if (blkvsc_req) 1062 if (blkvsc_req)
1052 { 1063 {
1053 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding); 1064 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
@@ -1134,8 +1145,11 @@ static void blkvsc_request_completion(STORVSC_REQUEST* request)
1134 blkdev->num_outstanding_reqs--; 1145 blkdev->num_outstanding_reqs--;
1135 blkvsc_req->group->outstanding--; 1146 blkvsc_req->group->outstanding--;
1136 1147
1137 // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order 1148 /*
1138 // blkvsc_req completion when calling end_that_request_first() 1149 * Only start processing when all the blkvsc_reqs are
1150 * completed. This guarantees no out-of-order blkvsc_req
1151 * completion when calling end_that_request_first()
1152 */
1139 if (blkvsc_req->group->outstanding == 0) 1153 if (blkvsc_req->group->outstanding == 0)
1140 { 1154 {
1141 list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry) 1155 list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
@@ -1152,7 +1166,7 @@ static void blkvsc_request_completion(STORVSC_REQUEST* request)
1152 (!comp_req->request.Status ? 0: -EIO), 1166 (!comp_req->request.Status ? 0: -EIO),
1153 comp_req->sector_count * blkdev->sector_size)) 1167 comp_req->sector_count * blkdev->sector_size))
1154 { 1168 {
1155 //All the sectors have been xferred ie the request is done 1169 /* All the sectors have been xferred ie the request is done */
1156 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req); 1170 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1157 kmem_cache_free(blkdev->request_pool, comp_req->group); 1171 kmem_cache_free(blkdev->request_pool, comp_req->group);
1158 } 1172 }
@@ -1180,11 +1194,14 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1180 1194
1181 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()"); 1195 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1182 1196
1183 // Flush the pending list first 1197 /* Flush the pending list first */
1184 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) 1198 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1185 { 1199 {
1186 // The pend_req could be part of a partially completed request. If so, complete those req first 1200 /*
1187 // until we hit the pend_req 1201 * The pend_req could be part of a partially completed
1202 * request. If so, complete those req first until we
1203 * hit the pend_req
1204 */
1188 list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry) 1205 list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1189 { 1206 {
1190 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %ld \n", 1207 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %ld \n",
@@ -1222,7 +1239,7 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1222 -EIO, 1239 -EIO,
1223 pend_req->sector_count * blkdev->sector_size)) 1240 pend_req->sector_count * blkdev->sector_size))
1224 { 1241 {
1225 //All the sectors have been xferred ie the request is done 1242 /* All the sectors have been xferred ie the request is done */
1226 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req); 1243 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1227 kmem_cache_free(blkdev->request_pool, pend_req->group); 1244 kmem_cache_free(blkdev->request_pool, pend_req->group);
1228 } 1245 }
@@ -1239,7 +1256,7 @@ static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1239 struct blkvsc_request *pend_req, *tmp; 1256 struct blkvsc_request *pend_req, *tmp;
1240 int ret=0; 1257 int ret=0;
1241 1258
1242 // Flush the pending list first 1259 /* Flush the pending list first */
1243 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) 1260 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1244 { 1261 {
1245 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req); 1262 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
@@ -1378,51 +1395,51 @@ int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1378 int rem=0; 1395 int rem=0;
1379 1396
1380 if (total_sectors > (65535 * 16 * 255)) { 1397 if (total_sectors > (65535 * 16 * 255)) {
1381 total_sectors = (65535 * 16 * 255); 1398 total_sectors = (65535 * 16 * 255);
1382 } 1399 }
1383 1400
1384 if (total_sectors >= (65535 * 16 * 63)) { 1401 if (total_sectors >= (65535 * 16 * 63)) {
1385 sectors_per_track = 255; 1402 sectors_per_track = 255;
1386 heads = 16; 1403 heads = 16;
1387 1404
1388 cylinder_times_heads = total_sectors; 1405 cylinder_times_heads = total_sectors;
1389 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads 1406 rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1390 } 1407 }
1391 else 1408 else
1392 { 1409 {
1393 sectors_per_track = 17; 1410 sectors_per_track = 17;
1394 1411
1395 cylinder_times_heads = total_sectors; 1412 cylinder_times_heads = total_sectors;
1396 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads 1413 rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1397 1414
1398 temp = cylinder_times_heads + 1023; 1415 temp = cylinder_times_heads + 1023;
1399 rem = sector_div(temp, 1024); // sector_div stores the quotient in temp 1416 rem = sector_div(temp, 1024); /* sector_div stores the quotient in temp */
1400 1417
1401 heads = temp; 1418 heads = temp;
1402 1419
1403 if (heads < 4) { 1420 if (heads < 4) {
1404 heads = 4; 1421 heads = 4;
1405 } 1422 }
1406 1423
1407 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) { 1424 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1408 sectors_per_track = 31; 1425 sectors_per_track = 31;
1409 heads = 16; 1426 heads = 16;
1410 1427
1411 cylinder_times_heads = total_sectors; 1428 cylinder_times_heads = total_sectors;
1412 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads 1429 rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1413 } 1430 }
1414 1431
1415 if (cylinder_times_heads >= (heads * 1024)) { 1432 if (cylinder_times_heads >= (heads * 1024)) {
1416 sectors_per_track = 63; 1433 sectors_per_track = 63;
1417 heads = 16; 1434 heads = 16;
1418 1435
1419 cylinder_times_heads = total_sectors; 1436 cylinder_times_heads = total_sectors;
1420 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads 1437 rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1421 } 1438 }
1422 } 1439 }
1423 1440
1424 temp = cylinder_times_heads; 1441 temp = cylinder_times_heads;
1425 rem = sector_div(temp, heads); // sector_div stores the quotient in temp 1442 rem = sector_div(temp, heads); /* sector_div stores the quotient in temp */
1426 cylinders = temp; 1443 cylinders = temp;
1427 1444
1428 hg->heads = heads; 1445 hg->heads = heads;
@@ -1442,8 +1459,8 @@ static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, u
1442 1459
1443 switch (cmd) 1460 switch (cmd)
1444 { 1461 {
1445 // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just 1462 /* TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just */
1446 // a GUID. Commented it out for now. 1463 /* a GUID. Commented it out for now. */
1447 /*case HDIO_GET_IDENTITY: 1464 /*case HDIO_GET_IDENTITY:
1448 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n"); 1465 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1449 1466
@@ -1468,7 +1485,7 @@ static int __init blkvsc_init(void)
1468{ 1485{
1469 int ret; 1486 int ret;
1470 1487
1471 ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set 1488 ASSERT(sizeof(sector_t) == 8); /* Make sure CONFIG_LBD is set */
1472 1489
1473 DPRINT_ENTER(BLKVSC_DRV); 1490 DPRINT_ENTER(BLKVSC_DRV);
1474 1491
@@ -1495,4 +1512,4 @@ module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1495module_init(blkvsc_init); 1512module_init(blkvsc_init);
1496module_exit(blkvsc_exit); 1513module_exit(blkvsc_exit);
1497 1514
1498// eof 1515/* eof */