aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/sunvdc.c
diff options
context:
space:
mode:
authorDwight Engen <dwight.engen@oracle.com>2014-12-11 12:26:17 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-11 21:52:45 -0500
commit76e74bbe0a38c6720217425ed64dbb448c643b9d (patch)
treee66d6c9c301acf4462e46c713fe072e8e167f907 /drivers/block/sunvdc.c
parent1678c2bd131313b58c7492487c00dca982be7f28 (diff)
sunvdc: reconnect ldc after vds service domain restarts
This change enables the sunvdc driver to reconnect and recover if a vds service domain is disconnected or bounced. By default, it will wait indefinitely for the service domain to become available again, but will honor a non-zero vdc-timout md property if one is set. If a timeout is reached, any in-progress I/O's are completed with -EIO. Signed-off-by: Dwight Engen <dwight.engen@oracle.com> Reviewed-by: Chris Hyser <chris.hyser@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/block/sunvdc.c')
-rw-r--r--drivers/block/sunvdc.c205
1 files changed, 183 insertions, 22 deletions
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 65cec156cfb4..4b911ed96ea3 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -23,8 +23,8 @@
23 23
24#define DRV_MODULE_NAME "sunvdc" 24#define DRV_MODULE_NAME "sunvdc"
25#define PFX DRV_MODULE_NAME ": " 25#define PFX DRV_MODULE_NAME ": "
26#define DRV_MODULE_VERSION "1.1" 26#define DRV_MODULE_VERSION "1.2"
27#define DRV_MODULE_RELDATE "February 13, 2013" 27#define DRV_MODULE_RELDATE "November 24, 2014"
28 28
29static char version[] = 29static char version[] =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
40#define WAITING_FOR_GEN_CMD 0x04 40#define WAITING_FOR_GEN_CMD 0x04
41#define WAITING_FOR_ANY -1 41#define WAITING_FOR_ANY -1
42 42
43static struct workqueue_struct *sunvdc_wq;
44
43struct vdc_req_entry { 45struct vdc_req_entry {
44 struct request *req; 46 struct request *req;
45}; 47};
@@ -60,6 +62,10 @@ struct vdc_port {
60 u64 max_xfer_size; 62 u64 max_xfer_size;
61 u32 vdisk_block_size; 63 u32 vdisk_block_size;
62 64
65 u64 ldc_timeout;
66 struct timer_list ldc_reset_timer;
67 struct work_struct ldc_reset_work;
68
63 /* The server fills these in for us in the disk attribute 69 /* The server fills these in for us in the disk attribute
64 * ACK packet. 70 * ACK packet.
65 */ 71 */
@@ -71,6 +77,10 @@ struct vdc_port {
71 char disk_name[32]; 77 char disk_name[32];
72}; 78};
73 79
80static void vdc_ldc_reset(struct vdc_port *port);
81static void vdc_ldc_reset_work(struct work_struct *work);
82static void vdc_ldc_reset_timer(unsigned long _arg);
83
74static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 84static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
75{ 85{
76 return container_of(vio, struct vdc_port, vio); 86 return container_of(vio, struct vdc_port, vio);
@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
150 .ioctl = vdc_ioctl, 160 .ioctl = vdc_ioctl,
151}; 161};
152 162
163static void vdc_blk_queue_start(struct vdc_port *port)
164{
165 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
166
167 /* restart blk queue when ring is half emptied. also called after
168 * handshake completes, so check for initial handshake before we've
169 * allocated a disk.
170 */
171 if (port->disk && blk_queue_stopped(port->disk->queue) &&
172 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
173 blk_start_queue(port->disk->queue);
174 }
175
176}
177
153static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) 178static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
154{ 179{
155 if (vio->cmp && 180 if (vio->cmp &&
@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
163 188
164static void vdc_handshake_complete(struct vio_driver_state *vio) 189static void vdc_handshake_complete(struct vio_driver_state *vio)
165{ 190{
191 struct vdc_port *port = to_vdc_port(vio);
192
193 del_timer(&port->ldc_reset_timer);
166 vdc_finish(vio, 0, WAITING_FOR_LINK_UP); 194 vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
195 vdc_blk_queue_start(port);
167} 196}
168 197
169static int vdc_handle_unknown(struct vdc_port *port, void *arg) 198static int vdc_handle_unknown(struct vdc_port *port, void *arg)
@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
281 310
282 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); 311 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
283 312
284 /* restart blk queue when ring is half emptied */ 313 vdc_blk_queue_start(port);
285 if (blk_queue_stopped(port->disk->queue) &&
286 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
287 blk_start_queue(port->disk->queue);
288} 314}
289 315
290static int vdc_ack(struct vdc_port *port, void *msgbuf) 316static int vdc_ack(struct vdc_port *port, void *msgbuf)
@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event)
317 343
318 spin_lock_irqsave(&vio->lock, flags); 344 spin_lock_irqsave(&vio->lock, flags);
319 345
320 if (unlikely(event == LDC_EVENT_RESET || 346 if (unlikely(event == LDC_EVENT_RESET)) {
321 event == LDC_EVENT_UP)) {
322 vio_link_state_change(vio, event); 347 vio_link_state_change(vio, event);
323 spin_unlock_irqrestore(&vio->lock, flags); 348 queue_work(sunvdc_wq, &port->ldc_reset_work);
324 return; 349 goto out;
350 }
351
352 if (unlikely(event == LDC_EVENT_UP)) {
353 vio_link_state_change(vio, event);
354 goto out;
325 } 355 }
326 356
327 if (unlikely(event != LDC_EVENT_DATA_READY)) { 357 if (unlikely(event != LDC_EVENT_DATA_READY)) {
328 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 358 pr_warn(PFX "Unexpected LDC event %d\n", event);
329 spin_unlock_irqrestore(&vio->lock, flags); 359 goto out;
330 return;
331 } 360 }
332 361
333 err = 0; 362 err = 0;
@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event)
371 } 400 }
372 if (err < 0) 401 if (err < 0)
373 vdc_finish(&port->vio, err, WAITING_FOR_ANY); 402 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
403out:
374 spin_unlock_irqrestore(&vio->lock, flags); 404 spin_unlock_irqrestore(&vio->lock, flags);
375} 405}
376 406
@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
403 delay = 128; 433 delay = 128;
404 } while (err == -EAGAIN); 434 } while (err == -EAGAIN);
405 435
436 if (err == -ENOTCONN)
437 vdc_ldc_reset(port);
406 return err; 438 return err;
407} 439}
408 440
@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
690 } 722 }
691} 723}
692 724
693static int probe_disk(struct vdc_port *port) 725static int vdc_port_up(struct vdc_port *port)
694{ 726{
695 struct vio_completion comp; 727 struct vio_completion comp;
696 struct request_queue *q;
697 struct gendisk *g;
698 int err;
699 728
700 init_completion(&comp.com); 729 init_completion(&comp.com);
701 comp.err = 0; 730 comp.err = 0;
@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port)
703 port->vio.cmp = &comp; 732 port->vio.cmp = &comp;
704 733
705 vio_port_up(&port->vio); 734 vio_port_up(&port->vio);
706
707 wait_for_completion(&comp.com); 735 wait_for_completion(&comp.com);
708 if (comp.err) 736 return comp.err;
709 return comp.err; 737}
738
739static void vdc_port_down(struct vdc_port *port)
740{
741 ldc_disconnect(port->vio.lp);
742 ldc_unbind(port->vio.lp);
743 vdc_free_tx_ring(port);
744 vio_ldc_free(&port->vio);
745}
746
747static int probe_disk(struct vdc_port *port)
748{
749 struct request_queue *q;
750 struct gendisk *g;
751 int err;
752
753 err = vdc_port_up(port);
754 if (err)
755 return err;
710 756
711 if (vdc_version_supported(port, 1, 1)) { 757 if (vdc_version_supported(port, 1, 1)) {
712 /* vdisk_size should be set during the handshake, if it wasn't 758 /* vdisk_size should be set during the handshake, if it wasn't
@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
819 struct mdesc_handle *hp; 865 struct mdesc_handle *hp;
820 struct vdc_port *port; 866 struct vdc_port *port;
821 int err; 867 int err;
868 const u64 *ldc_timeout;
822 869
823 print_version(); 870 print_version();
824 871
@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
848 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); 895 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
849 port->vdisk_size = -1; 896 port->vdisk_size = -1;
850 897
898 /* Actual wall time may be double due to do_generic_file_read() doing
899 * a readahead I/O first, and once that fails it will try to read a
900 * single page.
901 */
902 ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
903 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
904 setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
905 (unsigned long)port);
906 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
907
851 err = vio_driver_init(&port->vio, vdev, VDEV_DISK, 908 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
852 vdc_versions, ARRAY_SIZE(vdc_versions), 909 vdc_versions, ARRAY_SIZE(vdc_versions),
853 &vdc_vio_ops, port->disk_name); 910 &vdc_vio_ops, port->disk_name);
@@ -902,6 +959,8 @@ static int vdc_port_remove(struct vio_dev *vdev)
902 blk_stop_queue(port->disk->queue); 959 blk_stop_queue(port->disk->queue);
903 spin_unlock_irqrestore(&port->vio.lock, flags); 960 spin_unlock_irqrestore(&port->vio.lock, flags);
904 961
962 flush_work(&port->ldc_reset_work);
963 del_timer_sync(&port->ldc_reset_timer);
905 del_timer_sync(&port->vio.timer); 964 del_timer_sync(&port->vio.timer);
906 965
907 del_gendisk(port->disk); 966 del_gendisk(port->disk);
@@ -919,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
919 return 0; 978 return 0;
920} 979}
921 980
981static void vdc_requeue_inflight(struct vdc_port *port)
982{
983 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
984 u32 idx;
985
986 for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
987 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
988 struct vdc_req_entry *rqe = &port->rq_arr[idx];
989 struct request *req;
990
991 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
992 desc->hdr.state = VIO_DESC_FREE;
993 dr->cons = vio_dring_next(dr, idx);
994
995 req = rqe->req;
996 if (req == NULL) {
997 vdc_end_special(port, desc);
998 continue;
999 }
1000
1001 rqe->req = NULL;
1002 blk_requeue_request(port->disk->queue, req);
1003 }
1004}
1005
1006static void vdc_queue_drain(struct vdc_port *port)
1007{
1008 struct request *req;
1009
1010 while ((req = blk_fetch_request(port->disk->queue)) != NULL)
1011 __blk_end_request_all(req, -EIO);
1012}
1013
1014static void vdc_ldc_reset_timer(unsigned long _arg)
1015{
1016 struct vdc_port *port = (struct vdc_port *) _arg;
1017 struct vio_driver_state *vio = &port->vio;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&vio->lock, flags);
1021 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1022 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1023 port->disk_name, port->ldc_timeout);
1024 vdc_queue_drain(port);
1025 vdc_blk_queue_start(port);
1026 }
1027 spin_unlock_irqrestore(&vio->lock, flags);
1028}
1029
1030static void vdc_ldc_reset_work(struct work_struct *work)
1031{
1032 struct vdc_port *port;
1033 struct vio_driver_state *vio;
1034 unsigned long flags;
1035
1036 port = container_of(work, struct vdc_port, ldc_reset_work);
1037 vio = &port->vio;
1038
1039 spin_lock_irqsave(&vio->lock, flags);
1040 vdc_ldc_reset(port);
1041 spin_unlock_irqrestore(&vio->lock, flags);
1042}
1043
1044static void vdc_ldc_reset(struct vdc_port *port)
1045{
1046 int err;
1047
1048 assert_spin_locked(&port->vio.lock);
1049
1050 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1051 blk_stop_queue(port->disk->queue);
1052 vdc_requeue_inflight(port);
1053 vdc_port_down(port);
1054
1055 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1056 if (err) {
1057 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1058 return;
1059 }
1060
1061 err = vdc_alloc_tx_ring(port);
1062 if (err) {
1063 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1064 goto err_free_ldc;
1065 }
1066
1067 if (port->ldc_timeout)
1068 mod_timer(&port->ldc_reset_timer,
1069 round_jiffies(jiffies + HZ * port->ldc_timeout));
1070 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1071 return;
1072
1073err_free_ldc:
1074 vio_ldc_free(&port->vio);
1075}
1076
922static const struct vio_device_id vdc_port_match[] = { 1077static const struct vio_device_id vdc_port_match[] = {
923 { 1078 {
924 .type = "vdc-port", 1079 .type = "vdc-port",
@@ -938,9 +1093,13 @@ static int __init vdc_init(void)
938{ 1093{
939 int err; 1094 int err;
940 1095
1096 sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1097 if (!sunvdc_wq)
1098 return -ENOMEM;
1099
941 err = register_blkdev(0, VDCBLK_NAME); 1100 err = register_blkdev(0, VDCBLK_NAME);
942 if (err < 0) 1101 if (err < 0)
943 goto out_err; 1102 goto out_free_wq;
944 1103
945 vdc_major = err; 1104 vdc_major = err;
946 1105
@@ -954,7 +1113,8 @@ out_unregister_blkdev:
954 unregister_blkdev(vdc_major, VDCBLK_NAME); 1113 unregister_blkdev(vdc_major, VDCBLK_NAME);
955 vdc_major = 0; 1114 vdc_major = 0;
956 1115
957out_err: 1116out_free_wq:
1117 destroy_workqueue(sunvdc_wq);
958 return err; 1118 return err;
959} 1119}
960 1120
@@ -962,6 +1122,7 @@ static void __exit vdc_exit(void)
962{ 1122{
963 vio_unregister_driver(&vdc_port_driver); 1123 vio_unregister_driver(&vdc_port_driver);
964 unregister_blkdev(vdc_major, VDCBLK_NAME); 1124 unregister_blkdev(vdc_major, VDCBLK_NAME);
1125 destroy_workqueue(sunvdc_wq);
965} 1126}
966 1127
967module_init(vdc_init); 1128module_init(vdc_init);