aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 18:36:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-12 18:36:40 -0500
commita7e8ddd813c20e3e10c9012f1625a5a02f870b15 (patch)
tree8060fc23bc406d0035145cf34620b208ca6ab625
parentad8f723afbfe242ad2bc5067e06ca438b6a5c8a9 (diff)
parent76e74bbe0a38c6720217425ed64dbb448c643b9d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc update from David Miller: "Not a lot of stuff this time around, mostly bug fixing: - Fix alignment of 32-bit crosscall datastructure on Leon, from Andreas Larsson. - Several fixes to the virtual disk driver on sparc64 by Dwight Engen, including handling resets of the service domain properly" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sunvdc: reconnect ldc after vds service domain restarts sparc/ldc: create separate ldc_unbind from ldc_free vio: create routines for inc,dec vio dring indexes sunvdc: fix module unload/reload sparc32, leon: Align ccall_info to prevent unaligned traps on crosscall
-rw-r--r--arch/sparc/include/asm/ldc.h1
-rw-r--r--arch/sparc/include/asm/vio.h15
-rw-r--r--arch/sparc/kernel/ldc.c12
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--drivers/block/sunvdc.c222
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c30
6 files changed, 230 insertions, 52 deletions
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index 58ab64de25d2..6e9004aa6f25 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -61,6 +61,7 @@ void ldc_free(struct ldc_channel *lp);
61 61
62/* Register TX and RX queues of the link with the hypervisor. */ 62/* Register TX and RX queues of the link with the hypervisor. */
63int ldc_bind(struct ldc_channel *lp); 63int ldc_bind(struct ldc_channel *lp);
64void ldc_unbind(struct ldc_channel *lp);
64 65
65/* For non-RAW protocols we need to complete a handshake before 66/* For non-RAW protocols we need to complete a handshake before
66 * communication can proceed. ldc_connect() does that, if the 67 * communication can proceed. ldc_connect() does that, if the
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index fb124feb363b..8174f6cdbbbb 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -300,6 +300,21 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
300 ((dr->prod - dr->cons) & (ring_size - 1)) - 1); 300 ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
301} 301}
302 302
303static inline u32 vio_dring_next(struct vio_dring_state *dr, u32 index)
304{
305 if (++index == dr->num_entries)
306 index = 0;
307 return index;
308}
309
310static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
311{
312 if (index == 0)
313 return dr->num_entries - 1;
314 else
315 return index - 1;
316}
317
303#define VIO_MAX_TYPE_LEN 32 318#define VIO_MAX_TYPE_LEN 32
304#define VIO_MAX_COMPAT_LEN 64 319#define VIO_MAX_COMPAT_LEN 64
305 320
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 4310332872d4..274a9f59d95c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1222,11 +1222,12 @@ out_err:
1222} 1222}
1223EXPORT_SYMBOL(ldc_alloc); 1223EXPORT_SYMBOL(ldc_alloc);
1224 1224
1225void ldc_free(struct ldc_channel *lp) 1225void ldc_unbind(struct ldc_channel *lp)
1226{ 1226{
1227 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { 1227 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
1228 free_irq(lp->cfg.rx_irq, lp); 1228 free_irq(lp->cfg.rx_irq, lp);
1229 free_irq(lp->cfg.tx_irq, lp); 1229 free_irq(lp->cfg.tx_irq, lp);
1230 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1230 } 1231 }
1231 1232
1232 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { 1233 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
@@ -1240,10 +1241,15 @@ void ldc_free(struct ldc_channel *lp)
1240 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; 1241 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
1241 } 1242 }
1242 1243
1243 hlist_del(&lp->list); 1244 ldc_set_state(lp, LDC_STATE_INIT);
1245}
1246EXPORT_SYMBOL(ldc_unbind);
1244 1247
1248void ldc_free(struct ldc_channel *lp)
1249{
1250 ldc_unbind(lp);
1251 hlist_del(&lp->list);
1245 kfree(lp->mssbuf); 1252 kfree(lp->mssbuf);
1246
1247 ldc_iommu_release(lp); 1253 ldc_iommu_release(lp);
1248 1254
1249 kfree(lp); 1255 kfree(lp);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index ea2bad306f93..71e16f2241c2 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -368,7 +368,7 @@ static struct smp_funcall {
368 unsigned long arg5; 368 unsigned long arg5;
369 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 369 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
370 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 370 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
371} ccall_info; 371} ccall_info __attribute__((aligned(8)));
372 372
373static DEFINE_SPINLOCK(cross_call_lock); 373static DEFINE_SPINLOCK(cross_call_lock);
374 374
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0ebadf93b6c5..4b911ed96ea3 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -23,8 +23,8 @@
23 23
24#define DRV_MODULE_NAME "sunvdc" 24#define DRV_MODULE_NAME "sunvdc"
25#define PFX DRV_MODULE_NAME ": " 25#define PFX DRV_MODULE_NAME ": "
26#define DRV_MODULE_VERSION "1.1" 26#define DRV_MODULE_VERSION "1.2"
27#define DRV_MODULE_RELDATE "February 13, 2013" 27#define DRV_MODULE_RELDATE "November 24, 2014"
28 28
29static char version[] = 29static char version[] =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
40#define WAITING_FOR_GEN_CMD 0x04 40#define WAITING_FOR_GEN_CMD 0x04
41#define WAITING_FOR_ANY -1 41#define WAITING_FOR_ANY -1
42 42
43static struct workqueue_struct *sunvdc_wq;
44
43struct vdc_req_entry { 45struct vdc_req_entry {
44 struct request *req; 46 struct request *req;
45}; 47};
@@ -60,6 +62,10 @@ struct vdc_port {
60 u64 max_xfer_size; 62 u64 max_xfer_size;
61 u32 vdisk_block_size; 63 u32 vdisk_block_size;
62 64
65 u64 ldc_timeout;
66 struct timer_list ldc_reset_timer;
67 struct work_struct ldc_reset_work;
68
63 /* The server fills these in for us in the disk attribute 69 /* The server fills these in for us in the disk attribute
64 * ACK packet. 70 * ACK packet.
65 */ 71 */
@@ -71,6 +77,10 @@ struct vdc_port {
71 char disk_name[32]; 77 char disk_name[32];
72}; 78};
73 79
80static void vdc_ldc_reset(struct vdc_port *port);
81static void vdc_ldc_reset_work(struct work_struct *work);
82static void vdc_ldc_reset_timer(unsigned long _arg);
83
74static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 84static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
75{ 85{
76 return container_of(vio, struct vdc_port, vio); 86 return container_of(vio, struct vdc_port, vio);
@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
150 .ioctl = vdc_ioctl, 160 .ioctl = vdc_ioctl,
151}; 161};
152 162
163static void vdc_blk_queue_start(struct vdc_port *port)
164{
165 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
166
167 /* restart blk queue when ring is half emptied. also called after
168 * handshake completes, so check for initial handshake before we've
169 * allocated a disk.
170 */
171 if (port->disk && blk_queue_stopped(port->disk->queue) &&
172 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
173 blk_start_queue(port->disk->queue);
174 }
175
176}
177
153static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) 178static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
154{ 179{
155 if (vio->cmp && 180 if (vio->cmp &&
@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
163 188
164static void vdc_handshake_complete(struct vio_driver_state *vio) 189static void vdc_handshake_complete(struct vio_driver_state *vio)
165{ 190{
191 struct vdc_port *port = to_vdc_port(vio);
192
193 del_timer(&port->ldc_reset_timer);
166 vdc_finish(vio, 0, WAITING_FOR_LINK_UP); 194 vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
195 vdc_blk_queue_start(port);
167} 196}
168 197
169static int vdc_handle_unknown(struct vdc_port *port, void *arg) 198static int vdc_handle_unknown(struct vdc_port *port, void *arg)
@@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
269 298
270 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); 299 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
271 desc->hdr.state = VIO_DESC_FREE; 300 desc->hdr.state = VIO_DESC_FREE;
272 dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); 301 dr->cons = vio_dring_next(dr, index);
273 302
274 req = rqe->req; 303 req = rqe->req;
275 if (req == NULL) { 304 if (req == NULL) {
@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
281 310
282 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); 311 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
283 312
284 /* restart blk queue when ring is half emptied */ 313 vdc_blk_queue_start(port);
285 if (blk_queue_stopped(port->disk->queue) &&
286 vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
287 blk_start_queue(port->disk->queue);
288} 314}
289 315
290static int vdc_ack(struct vdc_port *port, void *msgbuf) 316static int vdc_ack(struct vdc_port *port, void *msgbuf)
@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event)
317 343
318 spin_lock_irqsave(&vio->lock, flags); 344 spin_lock_irqsave(&vio->lock, flags);
319 345
320 if (unlikely(event == LDC_EVENT_RESET || 346 if (unlikely(event == LDC_EVENT_RESET)) {
321 event == LDC_EVENT_UP)) {
322 vio_link_state_change(vio, event); 347 vio_link_state_change(vio, event);
323 spin_unlock_irqrestore(&vio->lock, flags); 348 queue_work(sunvdc_wq, &port->ldc_reset_work);
324 return; 349 goto out;
350 }
351
352 if (unlikely(event == LDC_EVENT_UP)) {
353 vio_link_state_change(vio, event);
354 goto out;
325 } 355 }
326 356
327 if (unlikely(event != LDC_EVENT_DATA_READY)) { 357 if (unlikely(event != LDC_EVENT_DATA_READY)) {
328 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 358 pr_warn(PFX "Unexpected LDC event %d\n", event);
329 spin_unlock_irqrestore(&vio->lock, flags); 359 goto out;
330 return;
331 } 360 }
332 361
333 err = 0; 362 err = 0;
@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event)
371 } 400 }
372 if (err < 0) 401 if (err < 0)
373 vdc_finish(&port->vio, err, WAITING_FOR_ANY); 402 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
403out:
374 spin_unlock_irqrestore(&vio->lock, flags); 404 spin_unlock_irqrestore(&vio->lock, flags);
375} 405}
376 406
@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
403 delay = 128; 433 delay = 128;
404 } while (err == -EAGAIN); 434 } while (err == -EAGAIN);
405 435
436 if (err == -ENOTCONN)
437 vdc_ldc_reset(port);
406 return err; 438 return err;
407} 439}
408 440
@@ -472,7 +504,7 @@ static int __send_request(struct request *req)
472 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); 504 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
473 } else { 505 } else {
474 port->req_id++; 506 port->req_id++;
475 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); 507 dr->prod = vio_dring_next(dr, dr->prod);
476 } 508 }
477 509
478 return err; 510 return err;
@@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
626 err = __vdc_tx_trigger(port); 658 err = __vdc_tx_trigger(port);
627 if (err >= 0) { 659 if (err >= 0) {
628 port->req_id++; 660 port->req_id++;
629 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); 661 dr->prod = vio_dring_next(dr, dr->prod);
630 spin_unlock_irqrestore(&port->vio.lock, flags); 662 spin_unlock_irqrestore(&port->vio.lock, flags);
631 663
632 wait_for_completion(&comp.com); 664 wait_for_completion(&comp.com);
@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
690 } 722 }
691} 723}
692 724
693static int probe_disk(struct vdc_port *port) 725static int vdc_port_up(struct vdc_port *port)
694{ 726{
695 struct vio_completion comp; 727 struct vio_completion comp;
696 struct request_queue *q;
697 struct gendisk *g;
698 int err;
699 728
700 init_completion(&comp.com); 729 init_completion(&comp.com);
701 comp.err = 0; 730 comp.err = 0;
@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port)
703 port->vio.cmp = &comp; 732 port->vio.cmp = &comp;
704 733
705 vio_port_up(&port->vio); 734 vio_port_up(&port->vio);
706
707 wait_for_completion(&comp.com); 735 wait_for_completion(&comp.com);
708 if (comp.err) 736 return comp.err;
709 return comp.err; 737}
738
739static void vdc_port_down(struct vdc_port *port)
740{
741 ldc_disconnect(port->vio.lp);
742 ldc_unbind(port->vio.lp);
743 vdc_free_tx_ring(port);
744 vio_ldc_free(&port->vio);
745}
746
747static int probe_disk(struct vdc_port *port)
748{
749 struct request_queue *q;
750 struct gendisk *g;
751 int err;
752
753 err = vdc_port_up(port);
754 if (err)
755 return err;
710 756
711 if (vdc_version_supported(port, 1, 1)) { 757 if (vdc_version_supported(port, 1, 1)) {
712 /* vdisk_size should be set during the handshake, if it wasn't 758 /* vdisk_size should be set during the handshake, if it wasn't
@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
819 struct mdesc_handle *hp; 865 struct mdesc_handle *hp;
820 struct vdc_port *port; 866 struct vdc_port *port;
821 int err; 867 int err;
868 const u64 *ldc_timeout;
822 869
823 print_version(); 870 print_version();
824 871
@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
848 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); 895 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
849 port->vdisk_size = -1; 896 port->vdisk_size = -1;
850 897
898 /* Actual wall time may be double due to do_generic_file_read() doing
899 * a readahead I/O first, and once that fails it will try to read a
900 * single page.
901 */
902 ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
903 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
904 setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
905 (unsigned long)port);
906 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
907
851 err = vio_driver_init(&port->vio, vdev, VDEV_DISK, 908 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
852 vdc_versions, ARRAY_SIZE(vdc_versions), 909 vdc_versions, ARRAY_SIZE(vdc_versions),
853 &vdc_vio_ops, port->disk_name); 910 &vdc_vio_ops, port->disk_name);
@@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev)
896 struct vdc_port *port = dev_get_drvdata(&vdev->dev); 953 struct vdc_port *port = dev_get_drvdata(&vdev->dev);
897 954
898 if (port) { 955 if (port) {
956 unsigned long flags;
957
958 spin_lock_irqsave(&port->vio.lock, flags);
959 blk_stop_queue(port->disk->queue);
960 spin_unlock_irqrestore(&port->vio.lock, flags);
961
962 flush_work(&port->ldc_reset_work);
963 del_timer_sync(&port->ldc_reset_timer);
899 del_timer_sync(&port->vio.timer); 964 del_timer_sync(&port->vio.timer);
900 965
966 del_gendisk(port->disk);
967 blk_cleanup_queue(port->disk->queue);
968 put_disk(port->disk);
969 port->disk = NULL;
970
901 vdc_free_tx_ring(port); 971 vdc_free_tx_ring(port);
902 vio_ldc_free(&port->vio); 972 vio_ldc_free(&port->vio);
903 973
@@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
908 return 0; 978 return 0;
909} 979}
910 980
981static void vdc_requeue_inflight(struct vdc_port *port)
982{
983 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
984 u32 idx;
985
986 for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
987 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
988 struct vdc_req_entry *rqe = &port->rq_arr[idx];
989 struct request *req;
990
991 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
992 desc->hdr.state = VIO_DESC_FREE;
993 dr->cons = vio_dring_next(dr, idx);
994
995 req = rqe->req;
996 if (req == NULL) {
997 vdc_end_special(port, desc);
998 continue;
999 }
1000
1001 rqe->req = NULL;
1002 blk_requeue_request(port->disk->queue, req);
1003 }
1004}
1005
1006static void vdc_queue_drain(struct vdc_port *port)
1007{
1008 struct request *req;
1009
1010 while ((req = blk_fetch_request(port->disk->queue)) != NULL)
1011 __blk_end_request_all(req, -EIO);
1012}
1013
1014static void vdc_ldc_reset_timer(unsigned long _arg)
1015{
1016 struct vdc_port *port = (struct vdc_port *) _arg;
1017 struct vio_driver_state *vio = &port->vio;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&vio->lock, flags);
1021 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1022 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1023 port->disk_name, port->ldc_timeout);
1024 vdc_queue_drain(port);
1025 vdc_blk_queue_start(port);
1026 }
1027 spin_unlock_irqrestore(&vio->lock, flags);
1028}
1029
1030static void vdc_ldc_reset_work(struct work_struct *work)
1031{
1032 struct vdc_port *port;
1033 struct vio_driver_state *vio;
1034 unsigned long flags;
1035
1036 port = container_of(work, struct vdc_port, ldc_reset_work);
1037 vio = &port->vio;
1038
1039 spin_lock_irqsave(&vio->lock, flags);
1040 vdc_ldc_reset(port);
1041 spin_unlock_irqrestore(&vio->lock, flags);
1042}
1043
1044static void vdc_ldc_reset(struct vdc_port *port)
1045{
1046 int err;
1047
1048 assert_spin_locked(&port->vio.lock);
1049
1050 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1051 blk_stop_queue(port->disk->queue);
1052 vdc_requeue_inflight(port);
1053 vdc_port_down(port);
1054
1055 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1056 if (err) {
1057 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1058 return;
1059 }
1060
1061 err = vdc_alloc_tx_ring(port);
1062 if (err) {
1063 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1064 goto err_free_ldc;
1065 }
1066
1067 if (port->ldc_timeout)
1068 mod_timer(&port->ldc_reset_timer,
1069 round_jiffies(jiffies + HZ * port->ldc_timeout));
1070 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1071 return;
1072
1073err_free_ldc:
1074 vio_ldc_free(&port->vio);
1075}
1076
911static const struct vio_device_id vdc_port_match[] = { 1077static const struct vio_device_id vdc_port_match[] = {
912 { 1078 {
913 .type = "vdc-port", 1079 .type = "vdc-port",
@@ -927,9 +1093,13 @@ static int __init vdc_init(void)
927{ 1093{
928 int err; 1094 int err;
929 1095
1096 sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1097 if (!sunvdc_wq)
1098 return -ENOMEM;
1099
930 err = register_blkdev(0, VDCBLK_NAME); 1100 err = register_blkdev(0, VDCBLK_NAME);
931 if (err < 0) 1101 if (err < 0)
932 goto out_err; 1102 goto out_free_wq;
933 1103
934 vdc_major = err; 1104 vdc_major = err;
935 1105
@@ -943,7 +1113,8 @@ out_unregister_blkdev:
943 unregister_blkdev(vdc_major, VDCBLK_NAME); 1113 unregister_blkdev(vdc_major, VDCBLK_NAME);
944 vdc_major = 0; 1114 vdc_major = 0;
945 1115
946out_err: 1116out_free_wq:
1117 destroy_workqueue(sunvdc_wq);
947 return err; 1118 return err;
948} 1119}
949 1120
@@ -951,6 +1122,7 @@ static void __exit vdc_exit(void)
951{ 1122{
952 vio_unregister_driver(&vdc_port_driver); 1123 vio_unregister_driver(&vdc_port_driver);
953 unregister_blkdev(vdc_major, VDCBLK_NAME); 1124 unregister_blkdev(vdc_major, VDCBLK_NAME);
1125 destroy_workqueue(sunvdc_wq);
954} 1126}
955 1127
956module_init(vdc_init); 1128module_init(vdc_init);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 90c86cd3be14..45c408ef67d0 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -466,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
466 return err; 466 return err;
467} 467}
468 468
469static u32 next_idx(u32 idx, struct vio_dring_state *dr)
470{
471 if (++idx == dr->num_entries)
472 idx = 0;
473 return idx;
474}
475
476static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
477{
478 if (idx == 0)
479 idx = dr->num_entries - 1;
480 else
481 idx--;
482
483 return idx;
484}
485
486static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 469static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
487 struct vio_dring_state *dr, 470 struct vio_dring_state *dr,
488 u32 index) 471 u32 index)
@@ -556,7 +539,8 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
556 int ack_start = -1, ack_end = -1; 539 int ack_start = -1, ack_end = -1;
557 bool send_ack = true; 540 bool send_ack = true;
558 541
559 end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); 542 end = (end == (u32) -1) ? vio_dring_prev(dr, start)
543 : vio_dring_next(dr, end);
560 544
561 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 545 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
562 546
@@ -570,7 +554,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
570 if (ack_start == -1) 554 if (ack_start == -1)
571 ack_start = start; 555 ack_start = start;
572 ack_end = start; 556 ack_end = start;
573 start = next_idx(start, dr); 557 start = vio_dring_next(dr, start);
574 if (ack && start != end) { 558 if (ack && start != end) {
575 err = vnet_send_ack(port, dr, ack_start, ack_end, 559 err = vnet_send_ack(port, dr, ack_start, ack_end,
576 VIO_DRING_ACTIVE); 560 VIO_DRING_ACTIVE);
@@ -584,7 +568,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
584 } 568 }
585 } 569 }
586 if (unlikely(ack_start == -1)) 570 if (unlikely(ack_start == -1))
587 ack_start = ack_end = prev_idx(start, dr); 571 ack_start = ack_end = vio_dring_prev(dr, start);
588 if (send_ack) { 572 if (send_ack) {
589 port->napi_resume = false; 573 port->napi_resume = false;
590 return vnet_send_ack(port, dr, ack_start, ack_end, 574 return vnet_send_ack(port, dr, ack_start, ack_end,
@@ -633,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
633 found = 1; 617 found = 1;
634 break; 618 break;
635 } 619 }
636 idx = next_idx(idx, dr); 620 idx = vio_dring_next(dr, idx);
637 } 621 }
638 return found; 622 return found;
639} 623}
@@ -663,7 +647,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
663 /* sync for race conditions with vnet_start_xmit() and tell xmit it 647 /* sync for race conditions with vnet_start_xmit() and tell xmit it
664 * is time to send a trigger. 648 * is time to send a trigger.
665 */ 649 */
666 dr->cons = next_idx(end, dr); 650 dr->cons = vio_dring_next(dr, end);
667 desc = vio_dring_entry(dr, dr->cons); 651 desc = vio_dring_entry(dr, dr->cons);
668 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 652 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
669 /* vnet_start_xmit() just populated this dring but missed 653 /* vnet_start_xmit() just populated this dring but missed
@@ -784,7 +768,7 @@ ldc_ctrl:
784 pkt->tag.stype = VIO_SUBTYPE_INFO; 768 pkt->tag.stype = VIO_SUBTYPE_INFO;
785 pkt->tag.stype_env = VIO_DRING_DATA; 769 pkt->tag.stype_env = VIO_DRING_DATA;
786 pkt->seq = dr->rcv_nxt; 770 pkt->seq = dr->rcv_nxt;
787 pkt->start_idx = next_idx(port->napi_stop_idx, dr); 771 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
788 pkt->end_idx = -1; 772 pkt->end_idx = -1;
789 goto napi_resume; 773 goto napi_resume;
790 } 774 }