aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/sunvdc.c
diff options
context:
space:
mode:
authorDwight Engen <dwight.engen@oracle.com>2014-09-19 09:43:02 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-30 17:37:35 -0400
commitd0aedcd4f14a22e23b313f42b7e6e6ebfc0fbc31 (patch)
treefda0cda8ed2b6e255146ef35e19f8b205d2a6b11 /drivers/block/sunvdc.c
parent5eed69ffd248c9f68f56c710caf07db134aef28b (diff)
vio: fix reuse of vio_dring slot
vio_dring_avail() will allow use of every dring entry, but when the last entry is allocated then dr->prod == dr->cons which is indistinguishable from the ring empty condition. This causes the next allocation to reuse an entry. When this happens in sunvdc, the server side vds driver begins nack'ing the messages and ends up resetting the ldc channel. This problem does not effect sunvnet since it checks for < 2. The fix here is to just never allocate the very last dring slot so that full and empty are not the same condition. The request start path was changed to check for the ring being full a bit earlier, and to stop the blk_queue if there is no space left. The blk_queue will be restarted once the ring is only half full again. The number of ring entries was increased to 512 which matches the sunvnet and Solaris vdc drivers, and greatly reduces the frequency of hitting the ring full condition and the associated blk_queue stop/starting. The checks in sunvent were adjusted to account for vio_dring_avail() returning 1 less. Orabug: 19441666 OraBZ: 14983 Signed-off-by: Dwight Engen <dwight.engen@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/block/sunvdc.c')
-rw-r--r--drivers/block/sunvdc.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 1a9360da1f54..756b8ec00f16 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -33,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
33MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
34MODULE_VERSION(DRV_MODULE_VERSION); 34MODULE_VERSION(DRV_MODULE_VERSION);
35 35
36#define VDC_TX_RING_SIZE 256 36#define VDC_TX_RING_SIZE 512
37 37
38#define WAITING_FOR_LINK_UP 0x01 38#define WAITING_FOR_LINK_UP 0x01
39#define WAITING_FOR_TX_SPACE 0x02 39#define WAITING_FOR_TX_SPACE 0x02
@@ -283,7 +283,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
283 283
284 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); 284 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
285 285
286 if (blk_queue_stopped(port->disk->queue)) 286 /* restart blk queue when ring is half emptied */
287 if (blk_queue_stopped(port->disk->queue) &&
288 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
287 blk_start_queue(port->disk->queue); 289 blk_start_queue(port->disk->queue);
288} 290}
289 291
@@ -435,12 +437,6 @@ static int __send_request(struct request *req)
435 for (i = 0; i < nsg; i++) 437 for (i = 0; i < nsg; i++)
436 len += sg[i].length; 438 len += sg[i].length;
437 439
438 if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
439 blk_stop_queue(port->disk->queue);
440 err = -ENOMEM;
441 goto out;
442 }
443
444 desc = vio_dring_cur(dr); 440 desc = vio_dring_cur(dr);
445 441
446 err = ldc_map_sg(port->vio.lp, sg, nsg, 442 err = ldc_map_sg(port->vio.lp, sg, nsg,
@@ -480,21 +476,32 @@ static int __send_request(struct request *req)
480 port->req_id++; 476 port->req_id++;
481 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); 477 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
482 } 478 }
483out:
484 479
485 return err; 480 return err;
486} 481}
487 482
488static void do_vdc_request(struct request_queue *q) 483static void do_vdc_request(struct request_queue *rq)
489{ 484{
490 while (1) { 485 struct request *req;
491 struct request *req = blk_fetch_request(q);
492 486
493 if (!req) 487 while ((req = blk_peek_request(rq)) != NULL) {
494 break; 488 struct vdc_port *port;
489 struct vio_dring_state *dr;
495 490
496 if (__send_request(req) < 0) 491 port = req->rq_disk->private_data;
497 __blk_end_request_all(req, -EIO); 492 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
493 if (unlikely(vdc_tx_dring_avail(dr) < 1))
494 goto wait;
495
496 blk_start_request(req);
497
498 if (__send_request(req) < 0) {
499 blk_requeue_request(rq, req);
500wait:
501 /* Avoid pointless unplugs. */
502 blk_stop_queue(rq);
503 break;
504 }
498 } 505 }
499} 506}
500 507