summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBryan O'Donoghue <bryan.odonoghue@linaro.org>2015-12-06 20:59:06 -0500
committerGreg Kroah-Hartman <gregkh@google.com>2015-12-07 14:32:20 -0500
commit12927835d21127d7e528b9ed56fc334ac96db985 (patch)
treeab51bc9aa71514ed1be3edd06b82d3a36fb70ad6 /drivers
parent2e238d71edadf03bed470cf58514ee10795a806b (diff)
greybus: loopback: Add asynchronous bi-directional support
A particular ask from the firmware people for some time now has been the ability to drive multiple outstanding bi-directional operations from loopback to loopback Interfaces. This patch implments that change. The approach taken is to make a call to gb_operation_send() and have loopback capture the completion callback itself, with a parallel timer to timeout completion callbacks that take too long. The calling thread will issue each gb_operation_send() as fast as it can within the constraints of thread-safety. In order to support this addition the following new sysfs entries are created on a per-connection basis. - async Zero indicates loopback should use the traditional synchronous model i.e. gb_operation_request_send_sync(). Non-zero indicates loopback should use the new asynchronous model i.e. gb_operation_send() - requests_completed This value indicates the number of requests successfully completed. - requests_timedout This value indicates the number of requests which timed out. - timeout The number of microseconds to give an individual asynchronous request before timing that request out. - timeout_min Read-only attribute informs user-space of the minimum allowed timeout. - timeout_max Read-only attribute informs user-space of the maximum allowed timeout. Note requests_completed + requests_timedout should always equal iteration_max, once iteration_count == iteration_max. Also, at this time we support either synchronous or asynchronous operations in one set of transactions. Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/greybus/loopback.c388
1 files changed, 369 insertions, 19 deletions
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index b65e3e591105..392f9854ff56 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -20,6 +20,7 @@
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/list_sort.h> 21#include <linux/list_sort.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
23 24
24#include <asm/div64.h> 25#include <asm/div64.h>
25 26
@@ -43,10 +44,24 @@ struct gb_loopback_device {
43 /* We need to take a lock in atomic context */ 44 /* We need to take a lock in atomic context */
44 spinlock_t lock; 45 spinlock_t lock;
45 struct list_head list; 46 struct list_head list;
47 struct list_head list_op_async;
48 wait_queue_head_t wq;
46}; 49};
47 50
48static struct gb_loopback_device gb_dev; 51static struct gb_loopback_device gb_dev;
49 52
53struct gb_loopback_async_operation {
54 struct gb_loopback *gb;
55 struct gb_operation *operation;
56 struct timeval ts;
57 struct timer_list timer;
58 struct list_head entry;
59 struct work_struct work;
60 struct kref kref;
61 bool pending;
62 int (*completion)(struct gb_loopback_async_operation *op_async);
63};
64
50struct gb_loopback { 65struct gb_loopback {
51 struct gb_connection *connection; 66 struct gb_connection *connection;
52 67
@@ -66,18 +81,29 @@ struct gb_loopback {
66 struct gb_loopback_stats gpbridge_firmware_latency; 81 struct gb_loopback_stats gpbridge_firmware_latency;
67 82
68 int type; 83 int type;
84 int async;
69 u32 mask; 85 u32 mask;
70 u32 size; 86 u32 size;
71 u32 iteration_max; 87 u32 iteration_max;
72 u32 iteration_count; 88 u32 iteration_count;
73 int ms_wait; 89 int ms_wait;
74 u32 error; 90 u32 error;
91 u32 requests_completed;
92 u32 requests_timedout;
93 u32 timeout;
94 u32 jiffy_timeout;
95 u32 timeout_min;
96 u32 timeout_max;
75 u32 lbid; 97 u32 lbid;
76 u64 elapsed_nsecs; 98 u64 elapsed_nsecs;
77 u32 apbridge_latency_ts; 99 u32 apbridge_latency_ts;
78 u32 gpbridge_latency_ts; 100 u32 gpbridge_latency_ts;
79}; 101};
80 102
103/* Min/max values in jiffies */
104#define GB_LOOPBACK_TIMEOUT_MIN 1
105#define GB_LOOPBACK_TIMEOUT_MAX 10000
106
81#define GB_LOOPBACK_FIFO_DEFAULT 8192 107#define GB_LOOPBACK_FIFO_DEFAULT 8192
82 108
83static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT; 109static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
@@ -215,6 +241,8 @@ static void gb_loopback_check_attr(struct gb_loopback *gb,
215 gb->ms_wait = GB_LOOPBACK_MS_WAIT_MAX; 241 gb->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
216 if (gb->size > gb_dev.size_max) 242 if (gb->size > gb_dev.size_max)
217 gb->size = gb_dev.size_max; 243 gb->size = gb_dev.size_max;
244 gb->requests_timedout = 0;
245 gb->requests_completed = 0;
218 gb->iteration_count = 0; 246 gb->iteration_count = 0;
219 gb->error = 0; 247 gb->error = 0;
220 248
@@ -230,6 +258,11 @@ static void gb_loopback_check_attr(struct gb_loopback *gb,
230 case GB_LOOPBACK_TYPE_PING: 258 case GB_LOOPBACK_TYPE_PING:
231 case GB_LOOPBACK_TYPE_TRANSFER: 259 case GB_LOOPBACK_TYPE_TRANSFER:
232 case GB_LOOPBACK_TYPE_SINK: 260 case GB_LOOPBACK_TYPE_SINK:
261 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
262 if (!gb->jiffy_timeout)
263 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
264 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
265 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
233 gb_loopback_reset_stats(gb); 266 gb_loopback_reset_stats(gb);
234 wake_up(&gb->wq); 267 wake_up(&gb->wq);
235 break; 268 break;
@@ -252,6 +285,14 @@ gb_loopback_stats_attrs(gpbridge_firmware_latency);
252 285
253/* Number of errors encountered during loop */ 286/* Number of errors encountered during loop */
254gb_loopback_ro_attr(error); 287gb_loopback_ro_attr(error);
288/* Number of requests successfully completed async */
289gb_loopback_ro_attr(requests_completed);
290/* Number of requests timed out async */
291gb_loopback_ro_attr(requests_timedout);
292/* Timeout minimum in useconds */
293gb_loopback_ro_attr(timeout_min);
294/* Timeout minimum in useconds */
295gb_loopback_ro_attr(timeout_max);
255 296
256/* 297/*
257 * Type of loopback message to send based on protocol type definitions 298 * Type of loopback message to send based on protocol type definitions
@@ -270,8 +311,12 @@ gb_dev_loopback_rw_attr(ms_wait, d);
270gb_dev_loopback_rw_attr(iteration_max, u); 311gb_dev_loopback_rw_attr(iteration_max, u);
271/* The current index of the for (i = 0; i < iteration_max; i++) loop */ 312/* The current index of the for (i = 0; i < iteration_max; i++) loop */
272gb_dev_loopback_ro_attr(iteration_count, false); 313gb_dev_loopback_ro_attr(iteration_count, false);
273/* A bit-mask of destination connecitons to include in the test run */ 314/* A bit-mask of destination connections to include in the test run */
274gb_dev_loopback_rw_attr(mask, u); 315gb_dev_loopback_rw_attr(mask, u);
316/* A flag to indicate synchronous or asynchronous operations */
317gb_dev_loopback_rw_attr(async, u);
318/* Timeout of an individual asynchronous request */
319gb_dev_loopback_rw_attr(timeout, u);
275 320
276static struct attribute *loopback_attrs[] = { 321static struct attribute *loopback_attrs[] = {
277 &dev_attr_latency_min.attr, 322 &dev_attr_latency_min.attr,
@@ -295,11 +340,19 @@ static struct attribute *loopback_attrs[] = {
295 &dev_attr_iteration_count.attr, 340 &dev_attr_iteration_count.attr,
296 &dev_attr_iteration_max.attr, 341 &dev_attr_iteration_max.attr,
297 &dev_attr_mask.attr, 342 &dev_attr_mask.attr,
343 &dev_attr_async.attr,
298 &dev_attr_error.attr, 344 &dev_attr_error.attr,
345 &dev_attr_requests_completed.attr,
346 &dev_attr_requests_timedout.attr,
347 &dev_attr_timeout.attr,
348 &dev_attr_timeout_min.attr,
349 &dev_attr_timeout_max.attr,
299 NULL, 350 NULL,
300}; 351};
301ATTRIBUTE_GROUPS(loopback); 352ATTRIBUTE_GROUPS(loopback);
302 353
354static void gb_loopback_calculate_stats(struct gb_loopback *gb);
355
303static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs) 356static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
304{ 357{
305 u32 lat; 358 u32 lat;
@@ -381,7 +434,200 @@ error:
381 return ret; 434 return ret;
382} 435}
383 436
384static int gb_loopback_sink(struct gb_loopback *gb, u32 len) 437static void __gb_loopback_async_operation_destroy(struct kref *kref)
438{
439 struct gb_loopback_async_operation *op_async;
440
441 op_async = container_of(kref, struct gb_loopback_async_operation, kref);
442
443 list_del(&op_async->entry);
444 if (op_async->operation)
445 gb_operation_put(op_async->operation);
446 kfree(op_async);
447}
448
449static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
450 *op_async)
451{
452 kref_get(&op_async->kref);
453}
454
455static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
456 *op_async)
457{
458 unsigned long flags;
459
460 spin_lock_irqsave(&gb_dev.lock, flags);
461 kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
462 spin_unlock_irqrestore(&gb_dev.lock, flags);
463}
464
465static struct gb_loopback_async_operation *
466 gb_loopback_operation_find(u16 id)
467{
468 struct gb_loopback_async_operation *op_async;
469 bool found = false;
470 unsigned long flags;
471
472 spin_lock_irqsave(&gb_dev.lock, flags);
473 list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
474 if (op_async->operation->id == id) {
475 gb_loopback_async_operation_get(op_async);
476 found = true;
477 break;
478 }
479 }
480 spin_unlock_irqrestore(&gb_dev.lock, flags);
481
482 return found ? op_async : NULL;
483}
484
485static void gb_loopback_async_operation_callback(struct gb_operation *operation)
486{
487 struct gb_loopback_async_operation *op_async;
488 struct gb_loopback *gb;
489 struct timeval te;
490 bool err = false;
491
492 do_gettimeofday(&te);
493 op_async = gb_loopback_operation_find(operation->id);
494 if (!op_async)
495 return;
496
497 gb = op_async->gb;
498 mutex_lock(&gb->mutex);
499
500 if (!op_async->pending || gb_operation_result(operation)) {
501 err = true;
502 } else {
503 if (op_async->completion)
504 if (op_async->completion(op_async))
505 err = true;
506 }
507
508 if (err) {
509 gb->error++;
510 } else {
511 gb->requests_completed++;
512 gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
513 gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
514 &te);
515 gb_loopback_calculate_stats(gb);
516 }
517
518 if (op_async->pending) {
519 gb->iteration_count++;
520 op_async->pending = false;
521 del_timer_sync(&op_async->timer);
522 gb_loopback_async_operation_put(op_async);
523 }
524 mutex_unlock(&gb->mutex);
525
526 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
527 operation->id);
528
529 gb_loopback_async_operation_put(op_async);
530}
531
532static void gb_loopback_async_operation_work(struct work_struct *work)
533{
534 struct gb_loopback *gb;
535 struct gb_operation *operation;
536 struct gb_loopback_async_operation *op_async;
537
538 op_async = container_of(work, struct gb_loopback_async_operation, work);
539 if (!op_async)
540 return;
541
542 gb = op_async->gb;
543 operation = op_async->operation;
544
545 mutex_lock(&gb->mutex);
546 if (op_async->pending) {
547 gb->requests_timedout++;
548 gb->error++;
549 gb->iteration_count++;
550 op_async->pending = false;
551 gb_loopback_async_operation_put(op_async);
552 }
553 mutex_unlock(&gb->mutex);
554
555 dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
556 operation->id);
557
558 gb_operation_cancel(operation, -ETIMEDOUT);
559 gb_loopback_async_operation_put(op_async);
560}
561
562static void gb_loopback_async_operation_timeout(unsigned long data)
563{
564 struct gb_loopback_async_operation *op_async;
565 u16 id = data;
566
567 op_async = gb_loopback_operation_find(id);
568 if (!op_async) {
569 pr_err("operation %d not found - time out ?\n", id);
570 return;
571 }
572 schedule_work(&op_async->work);
573}
574
575static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
576 void *request, int request_size,
577 int response_size,
578 void *completion)
579{
580 struct gb_loopback_async_operation *op_async;
581 struct gb_operation *operation;
582 int ret;
583 unsigned long flags;
584
585 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
586 if (!op_async)
587 return -ENOMEM;
588
589 INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
590 init_timer(&op_async->timer);
591 kref_init(&op_async->kref);
592
593 operation = gb_operation_create(gb->connection, type, request_size,
594 response_size, GFP_KERNEL);
595 if (!operation) {
596 ret = -ENOMEM;
597 goto error;
598 }
599
600 if (request_size)
601 memcpy(operation->request->payload, request, request_size);
602
603 op_async->gb = gb;
604 op_async->operation = operation;
605 op_async->completion = completion;
606
607 spin_lock_irqsave(&gb_dev.lock, flags);
608 list_add_tail(&op_async->entry, &gb_dev.list_op_async);
609 spin_unlock_irqrestore(&gb_dev.lock, flags);
610
611 do_gettimeofday(&op_async->ts);
612 op_async->pending = true;
613 ret = gb_operation_request_send(operation,
614 gb_loopback_async_operation_callback,
615 GFP_KERNEL);
616 if (ret)
617 goto error;
618
619 op_async->timer.function = gb_loopback_async_operation_timeout;
620 op_async->timer.expires = jiffies + gb->jiffy_timeout;
621 op_async->timer.data = (unsigned long)operation->id;
622 add_timer(&op_async->timer);
623
624 return ret;
625error:
626 gb_loopback_async_operation_put(op_async);
627 return ret;
628}
629
630static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
385{ 631{
386 struct gb_loopback_transfer_request *request; 632 struct gb_loopback_transfer_request *request;
387 int retval; 633 int retval;
@@ -398,7 +644,7 @@ static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
398 return retval; 644 return retval;
399} 645}
400 646
401static int gb_loopback_transfer(struct gb_loopback *gb, u32 len) 647static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
402{ 648{
403 struct gb_loopback_transfer_request *request; 649 struct gb_loopback_transfer_request *request;
404 struct gb_loopback_transfer_response *response; 650 struct gb_loopback_transfer_response *response;
@@ -440,12 +686,91 @@ gb_error:
440 return retval; 686 return retval;
441} 687}
442 688
443static int gb_loopback_ping(struct gb_loopback *gb) 689static int gb_loopback_sync_ping(struct gb_loopback *gb)
444{ 690{
445 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING, 691 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
446 NULL, 0, NULL, 0); 692 NULL, 0, NULL, 0);
447} 693}
448 694
695static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
696{
697 struct gb_loopback_transfer_request *request;
698 int retval;
699
700 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
701 if (!request)
702 return -ENOMEM;
703
704 request->len = cpu_to_le32(len);
705 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
706 request, len + sizeof(*request),
707 0, NULL);
708 kfree(request);
709 return retval;
710}
711
712static int gb_loopback_async_transfer_complete(
713 struct gb_loopback_async_operation *op_async)
714{
715 struct gb_loopback *gb;
716 struct gb_operation *operation;
717 struct gb_loopback_transfer_request *request;
718 struct gb_loopback_transfer_response *response;
719 size_t len;
720 int retval = 0;
721
722 gb = op_async->gb;
723 operation = op_async->operation;
724 request = operation->request->payload;
725 response = operation->response->payload;
726 len = le32_to_cpu(request->len);
727
728 if (memcmp(request->data, response->data, len)) {
729 dev_err(&gb->connection->bundle->dev,
730 "Loopback Data doesn't match operation id %d\n",
731 operation->id);
732 retval = -EREMOTEIO;
733 } else {
734 gb->apbridge_latency_ts =
735 (u32)__le32_to_cpu(response->reserved0);
736 gb->gpbridge_latency_ts =
737 (u32)__le32_to_cpu(response->reserved1);
738 }
739
740 return retval;
741}
742
743static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
744{
745 struct gb_loopback_transfer_request *request;
746 int retval, response_len;
747
748 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
749 if (!request)
750 return -ENOMEM;
751
752 memset(request->data, 0x5A, len);
753
754 request->len = cpu_to_le32(len);
755 response_len = sizeof(struct gb_loopback_transfer_response);
756 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
757 request, len + sizeof(*request),
758 len + response_len,
759 gb_loopback_async_transfer_complete);
760 if (retval)
761 goto gb_error;
762
763gb_error:
764 kfree(request);
765 return retval;
766}
767
768static int gb_loopback_async_ping(struct gb_loopback *gb)
769{
770 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
771 NULL, 0, 0, NULL);
772}
773
449static int gb_loopback_request_recv(u8 type, struct gb_operation *operation) 774static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
450{ 775{
451 struct gb_connection *connection = operation->connection; 776 struct gb_connection *connection = operation->connection;
@@ -512,6 +837,10 @@ static void gb_loopback_reset_stats(struct gb_loopback *gb)
512 memcpy(&gb->gpbridge_firmware_latency, &reset, 837 memcpy(&gb->gpbridge_firmware_latency, &reset,
513 sizeof(struct gb_loopback_stats)); 838 sizeof(struct gb_loopback_stats));
514 839
840 /* Set values to report min/max timeout to user-space */
841 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
842 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
843
515 /* Reset aggregate stats */ 844 /* Reset aggregate stats */
516 memcpy(&gb->latency, &reset, sizeof(struct gb_loopback_stats)); 845 memcpy(&gb->latency, &reset, sizeof(struct gb_loopback_stats));
517 memcpy(&gb->throughput, &reset, sizeof(struct gb_loopback_stats)); 846 memcpy(&gb->throughput, &reset, sizeof(struct gb_loopback_stats));
@@ -599,23 +928,25 @@ static int gb_loopback_fn(void *data)
599 int ms_wait = 0; 928 int ms_wait = 0;
600 int type; 929 int type;
601 u32 size; 930 u32 size;
931 u32 send_count = 0;
602 struct gb_loopback *gb = data; 932 struct gb_loopback *gb = data;
603 933
604 while (1) { 934 while (1) {
605 if (!gb->type) 935 if (!gb->type)
606 wait_event_interruptible(gb->wq, gb->type || 936 wait_event_interruptible(gb->wq, gb->type ||
607 kthread_should_stop()); 937 kthread_should_stop());
938
608 if (kthread_should_stop()) 939 if (kthread_should_stop())
609 break; 940 break;
610 941
611 mutex_lock(&gb->mutex); 942 mutex_lock(&gb->mutex);
612
613 sysfs_notify(&gb->connection->bundle->dev.kobj, 943 sysfs_notify(&gb->connection->bundle->dev.kobj,
614 NULL, "iteration_count"); 944 NULL, "iteration_count");
615 945
616 /* Optionally terminate */ 946 /* Optionally terminate */
617 if (gb->iteration_count == gb->iteration_max) { 947 if (send_count == gb->iteration_max) {
618 gb->type = 0; 948 gb->type = 0;
949 send_count = 0;
619 mutex_unlock(&gb->mutex); 950 mutex_unlock(&gb->mutex);
620 continue; 951 continue;
621 } 952 }
@@ -625,19 +956,33 @@ static int gb_loopback_fn(void *data)
625 mutex_unlock(&gb->mutex); 956 mutex_unlock(&gb->mutex);
626 957
627 /* Else operations to perform */ 958 /* Else operations to perform */
628 if (type == GB_LOOPBACK_TYPE_PING) 959 if (gb->async) {
629 error = gb_loopback_ping(gb); 960 if (type == GB_LOOPBACK_TYPE_PING) {
630 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 961 error = gb_loopback_async_ping(gb);
631 error = gb_loopback_transfer(gb, size); 962 gb_loopback_calculate_stats(gb);
632 else if (type == GB_LOOPBACK_TYPE_SINK) 963 } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
633 error = gb_loopback_sink(gb, size); 964 error = gb_loopback_async_transfer(gb, size);
634 965 } else if (type == GB_LOOPBACK_TYPE_SINK) {
635 if (error) 966 error = gb_loopback_async_sink(gb, size);
636 gb->error++; 967 }
637 968
638 gb_loopback_calculate_stats(gb); 969 if (error)
639 gb->iteration_count++; 970 gb->error++;
640 971 } else {
972 /* We are effectively single threaded here */
973 if (type == GB_LOOPBACK_TYPE_PING)
974 error = gb_loopback_sync_ping(gb);
975 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
976 error = gb_loopback_sync_transfer(gb, size);
977 else if (type == GB_LOOPBACK_TYPE_SINK)
978 error = gb_loopback_sync_sink(gb, size);
979
980 if (error)
981 gb->error++;
982 gb->iteration_count++;
983 gb_loopback_calculate_stats(gb);
984 }
985 send_count++;
641 if (ms_wait) 986 if (ms_wait)
642 msleep(ms_wait); 987 msleep(ms_wait);
643 } 988 }
@@ -742,6 +1087,10 @@ static int gb_loopback_connection_init(struct gb_connection *connection)
742 init_waitqueue_head(&gb->wq); 1087 init_waitqueue_head(&gb->wq);
743 gb_loopback_reset_stats(gb); 1088 gb_loopback_reset_stats(gb);
744 1089
1090 /* Reported values to user-space for min/max timeouts */
1091 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1092 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1093
745 if (!gb_dev.count) { 1094 if (!gb_dev.count) {
746 /* Calculate maximum payload */ 1095 /* Calculate maximum payload */
747 gb_dev.size_max = gb_operation_get_payload_size_max(connection); 1096 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
@@ -847,6 +1196,7 @@ static int loopback_init(void)
847 int retval; 1196 int retval;
848 1197
849 INIT_LIST_HEAD(&gb_dev.list); 1198 INIT_LIST_HEAD(&gb_dev.list);
1199 INIT_LIST_HEAD(&gb_dev.list_op_async);
850 spin_lock_init(&gb_dev.lock); 1200 spin_lock_init(&gb_dev.lock);
851 gb_dev.root = debugfs_create_dir("gb_loopback", NULL); 1201 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
852 1202