summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-06-06 08:25:10 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-09 05:42:43 -0400
commitd7f781bfdbf4eb7c5706c9974b8bf6d3c82e69c1 (patch)
tree8c54daf319dfa19c3b07e382b1d027adc6b67ee6 /drivers/thunderbolt
parent81a54b5e1986d02da33c59133556ce9fe2032049 (diff)
thunderbolt: Rework control channel to be more reliable
If a request times out the response might arrive right after the request is failed. This response is pushed to the kfifo and next request will read it instead. Since it most likely will not pass our validation checks in parse_header() the next request will fail as well, and response to that request will be pushed to the kfifo, ad infinitum. We end up in a situation where all requests fail and no devices can be added anymore until the driver is unloaded and reloaded again. To overcome this, rework the control channel so that we will have a queue of outstanding requests. Each request will be handled in turn and the response is validated against what is expected. Unexpected packets (for example responses for requests that have been timed out) are dropped. This model is copied from Greybus implementation with small changes here and there to get it cope with Thunderbolt control packets. In addition the configuration packets support sequence number which the switch is supposed to copy from the request to response. We use this to drop responses that are already timed out. Taking advantage of the sequence number, we automatically retry configuration read/write 4 times before giving up. Also timeout is not a programming error so there is no need to trigger a scary backtrace (WARN), instead we just log a warning. After all Thunderbolt devices are hot-pluggable by definition which means user can unplug a device any time and that is totally acceptable. With this change there is no need to take the global domain lock when sending configuration packets anymore. This is useful when we add support for cross-domain (XDomain) communication later on. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Michael Jamet <michael.jamet@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/ctl.c477
-rw-r--r--drivers/thunderbolt/ctl.h65
-rw-r--r--drivers/thunderbolt/tb.h2
3 files changed, 473 insertions, 71 deletions
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 5417ed244edc..27c30ff79a84 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -5,22 +5,17 @@
5 */ 5 */
6 6
7#include <linux/crc32.h> 7#include <linux/crc32.h>
8#include <linux/delay.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/pci.h> 10#include <linux/pci.h>
10#include <linux/dmapool.h> 11#include <linux/dmapool.h>
11#include <linux/workqueue.h> 12#include <linux/workqueue.h>
12#include <linux/kfifo.h>
13 13
14#include "ctl.h" 14#include "ctl.h"
15 15
16 16
17struct ctl_pkg { 17#define TB_CTL_RX_PKG_COUNT 10
18 struct tb_ctl *ctl; 18#define TB_CTL_RETRIES 4
19 void *buffer;
20 struct ring_frame frame;
21};
22
23#define TB_CTL_RX_PKG_COUNT 10
24 19
25/** 20/**
26 * struct tb_cfg - thunderbolt control channel 21 * struct tb_cfg - thunderbolt control channel
@@ -32,8 +27,9 @@ struct tb_ctl {
32 27
33 struct dma_pool *frame_pool; 28 struct dma_pool *frame_pool;
34 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; 29 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
35 DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16); 30 struct mutex request_queue_lock;
36 struct completion response_ready; 31 struct list_head request_queue;
32 bool running;
37 33
38 event_cb callback; 34 event_cb callback;
39 void *callback_data; 35 void *callback_data;
@@ -55,10 +51,121 @@ struct tb_ctl {
55#define tb_ctl_dbg(ctl, format, arg...) \ 51#define tb_ctl_dbg(ctl, format, arg...) \
56 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) 52 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
57 53
54static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
55/* Serializes access to request kref_get/put */
56static DEFINE_MUTEX(tb_cfg_request_lock);
57
58/**
59 * tb_cfg_request_alloc() - Allocates a new config request
60 *
61 * This is refcounted object so when you are done with this, call
62 * tb_cfg_request_put() to it.
63 */
64struct tb_cfg_request *tb_cfg_request_alloc(void)
65{
66 struct tb_cfg_request *req;
67
68 req = kzalloc(sizeof(*req), GFP_KERNEL);
69 if (!req)
70 return NULL;
71
72 kref_init(&req->kref);
73
74 return req;
75}
76
77/**
78 * tb_cfg_request_get() - Increase refcount of a request
79 * @req: Request whose refcount is increased
80 */
81void tb_cfg_request_get(struct tb_cfg_request *req)
82{
83 mutex_lock(&tb_cfg_request_lock);
84 kref_get(&req->kref);
85 mutex_unlock(&tb_cfg_request_lock);
86}
87
88static void tb_cfg_request_destroy(struct kref *kref)
89{
90 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
91
92 kfree(req);
93}
94
95/**
96 * tb_cfg_request_put() - Decrease refcount and possibly release the request
97 * @req: Request whose refcount is decreased
98 *
99 * Call this function when you are done with the request. When refcount
100 * goes to %0 the object is released.
101 */
102void tb_cfg_request_put(struct tb_cfg_request *req)
103{
104 mutex_lock(&tb_cfg_request_lock);
105 kref_put(&req->kref, tb_cfg_request_destroy);
106 mutex_unlock(&tb_cfg_request_lock);
107}
108
109static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
110 struct tb_cfg_request *req)
111{
112 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
113 WARN_ON(req->ctl);
114
115 mutex_lock(&ctl->request_queue_lock);
116 if (!ctl->running) {
117 mutex_unlock(&ctl->request_queue_lock);
118 return -ENOTCONN;
119 }
120 req->ctl = ctl;
121 list_add_tail(&req->list, &ctl->request_queue);
122 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
123 mutex_unlock(&ctl->request_queue_lock);
124 return 0;
125}
126
127static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
128{
129 struct tb_ctl *ctl = req->ctl;
130
131 mutex_lock(&ctl->request_queue_lock);
132 list_del(&req->list);
133 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
134 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
135 wake_up(&tb_cfg_request_cancel_queue);
136 mutex_unlock(&ctl->request_queue_lock);
137}
138
139static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
140{
141 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
142}
143
144static struct tb_cfg_request *
145tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
146{
147 struct tb_cfg_request *req;
148 bool found = false;
149
150 mutex_lock(&pkg->ctl->request_queue_lock);
151 list_for_each_entry(req, &pkg->ctl->request_queue, list) {
152 tb_cfg_request_get(req);
153 if (req->match(req, pkg)) {
154 found = true;
155 break;
156 }
157 tb_cfg_request_put(req);
158 }
159 mutex_unlock(&pkg->ctl->request_queue_lock);
160
161 return found ? req : NULL;
162}
163
58/* utility functions */ 164/* utility functions */
59 165
60static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type, 166
61 u64 route) 167static int check_header(const struct ctl_pkg *pkg, u32 len,
168 enum tb_cfg_pkg_type type, u64 route)
62{ 169{
63 struct tb_cfg_header *header = pkg->buffer; 170 struct tb_cfg_header *header = pkg->buffer;
64 171
@@ -100,8 +207,6 @@ static int check_config_address(struct tb_cfg_address addr,
100 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", 207 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
101 length, addr.length)) 208 length, addr.length))
102 return -EIO; 209 return -EIO;
103 if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
104 return -EIO;
105 /* 210 /*
106 * We cannot check addr->port as it is set to the upstream port of the 211 * We cannot check addr->port as it is set to the upstream port of the
107 * sender. 212 * sender.
@@ -109,7 +214,7 @@ static int check_config_address(struct tb_cfg_address addr,
109 return 0; 214 return 0;
110} 215}
111 216
112static struct tb_cfg_result decode_error(struct ctl_pkg *response) 217static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
113{ 218{
114 struct cfg_error_pkg *pkg = response->buffer; 219 struct cfg_error_pkg *pkg = response->buffer;
115 struct tb_cfg_result res = { 0 }; 220 struct tb_cfg_result res = { 0 };
@@ -130,7 +235,7 @@ static struct tb_cfg_result decode_error(struct ctl_pkg *response)
130 235
131} 236}
132 237
133static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len, 238static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
134 enum tb_cfg_pkg_type type, u64 route) 239 enum tb_cfg_pkg_type type, u64 route)
135{ 240{
136 struct tb_cfg_header *header = pkg->buffer; 241 struct tb_cfg_header *header = pkg->buffer;
@@ -198,7 +303,7 @@ static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
198 dst[i] = be32_to_cpu(src[i]); 303 dst[i] = be32_to_cpu(src[i]);
199} 304}
200 305
201static __be32 tb_crc(void *data, size_t len) 306static __be32 tb_crc(const void *data, size_t len)
202{ 307{
203 return cpu_to_be32(~__crc32c_le(~0, data, len)); 308 return cpu_to_be32(~__crc32c_le(~0, data, len));
204} 309}
@@ -315,6 +420,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
315 bool canceled) 420 bool canceled)
316{ 421{
317 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); 422 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
423 struct tb_cfg_request *req;
318 __be32 crc32; 424 __be32 crc32;
319 425
320 if (canceled) 426 if (canceled)
@@ -361,48 +467,135 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
361 goto rx; 467 goto rx;
362 468
363 default: 469 default:
364 tb_ctl_dbg(pkg->ctl, "RX: unknown package %#x, dropping\n", 470 break;
365 frame->eof);
366 goto rx;
367 } 471 }
368 472
369 if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) { 473 /*
370 tb_ctl_err(pkg->ctl, "RX: fifo is full\n"); 474 * The received packet will be processed only if there is an
371 goto rx; 475 * active request and that the packet is what is expected. This
476 * prevents packets such as replies coming after timeout has
477 * triggered from messing with the active requests.
478 */
479 req = tb_cfg_request_find(pkg->ctl, pkg);
480 if (req) {
481 if (req->copy(req, pkg))
482 schedule_work(&req->work);
483 tb_cfg_request_put(req);
372 } 484 }
373 complete(&pkg->ctl->response_ready); 485
374 return;
375rx: 486rx:
376 tb_ctl_rx_submit(pkg); 487 tb_ctl_rx_submit(pkg);
377} 488}
378 489
490static void tb_cfg_request_work(struct work_struct *work)
491{
492 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
493
494 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
495 req->callback(req->callback_data);
496
497 tb_cfg_request_dequeue(req);
498 tb_cfg_request_put(req);
499}
500
379/** 501/**
380 * tb_ctl_rx() - receive a packet from the control channel 502 * tb_cfg_request() - Start control request not waiting for it to complete
503 * @ctl: Control channel to use
504 * @req: Request to start
505 * @callback: Callback called when the request is completed
506 * @callback_data: Data to be passed to @callback
507 *
508 * This queues @req on the given control channel without waiting for it
509 * to complete. When the request completes @callback is called.
381 */ 510 */
382static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer, 511int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
383 size_t length, int timeout_msec, 512 void (*callback)(void *), void *callback_data)
384 u64 route, enum tb_cfg_pkg_type type)
385{ 513{
386 struct tb_cfg_result res; 514 int ret;
387 struct ctl_pkg *pkg;
388 515
389 if (!wait_for_completion_timeout(&ctl->response_ready, 516 req->flags = 0;
390 msecs_to_jiffies(timeout_msec))) { 517 req->callback = callback;
391 tb_ctl_WARN(ctl, "RX: timeout\n"); 518 req->callback_data = callback_data;
392 return (struct tb_cfg_result) { .err = -ETIMEDOUT }; 519 INIT_WORK(&req->work, tb_cfg_request_work);
393 } 520 INIT_LIST_HEAD(&req->list);
394 if (!kfifo_get(&ctl->response_fifo, &pkg)) {
395 tb_ctl_WARN(ctl, "empty kfifo\n");
396 return (struct tb_cfg_result) { .err = -EIO };
397 }
398 521
399 res = parse_header(pkg, length, type, route); 522 tb_cfg_request_get(req);
400 if (!res.err) 523 ret = tb_cfg_request_enqueue(ctl, req);
401 memcpy(buffer, pkg->buffer, length); 524 if (ret)
402 tb_ctl_rx_submit(pkg); 525 goto err_put;
403 return res; 526
527 ret = tb_ctl_tx(ctl, req->request, req->request_size,
528 req->request_type);
529 if (ret)
530 goto err_dequeue;
531
532 if (!req->response)
533 schedule_work(&req->work);
534
535 return 0;
536
537err_dequeue:
538 tb_cfg_request_dequeue(req);
539err_put:
540 tb_cfg_request_put(req);
541
542 return ret;
543}
544
545/**
546 * tb_cfg_request_cancel() - Cancel a control request
547 * @req: Request to cancel
548 * @err: Error to assign to the request
549 *
550 * This function can be used to cancel ongoing request. It will wait
551 * until the request is not active anymore.
552 */
553void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
554{
555 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
556 schedule_work(&req->work);
557 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
558 req->result.err = err;
404} 559}
405 560
561static void tb_cfg_request_complete(void *data)
562{
563 complete(data);
564}
565
566/**
567 * tb_cfg_request_sync() - Start control request and wait until it completes
568 * @ctl: Control channel to use
569 * @req: Request to start
570 * @timeout_msec: Timeout how long to wait @req to complete
571 *
572 * Starts a control request and waits until it completes. If timeout
573 * triggers the request is canceled before function returns. Note the
574 * caller needs to make sure only one message for given switch is active
575 * at a time.
576 */
577struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
578 struct tb_cfg_request *req,
579 int timeout_msec)
580{
581 unsigned long timeout = msecs_to_jiffies(timeout_msec);
582 struct tb_cfg_result res = { 0 };
583 DECLARE_COMPLETION_ONSTACK(done);
584 int ret;
585
586 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
587 if (ret) {
588 res.err = ret;
589 return res;
590 }
591
592 if (!wait_for_completion_timeout(&done, timeout))
593 tb_cfg_request_cancel(req, -ETIMEDOUT);
594
595 flush_work(&req->work);
596
597 return req->result;
598}
406 599
407/* public interface, alloc/start/stop/free */ 600/* public interface, alloc/start/stop/free */
408 601
@@ -423,8 +616,8 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
423 ctl->callback = cb; 616 ctl->callback = cb;
424 ctl->callback_data = cb_data; 617 ctl->callback_data = cb_data;
425 618
426 init_completion(&ctl->response_ready); 619 mutex_init(&ctl->request_queue_lock);
427 INIT_KFIFO(ctl->response_fifo); 620 INIT_LIST_HEAD(&ctl->request_queue);
428 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, 621 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
429 TB_FRAME_SIZE, 4, 0); 622 TB_FRAME_SIZE, 4, 0);
430 if (!ctl->frame_pool) 623 if (!ctl->frame_pool)
@@ -492,6 +685,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
492 ring_start(ctl->rx); 685 ring_start(ctl->rx);
493 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 686 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
494 tb_ctl_rx_submit(ctl->rx_packets[i]); 687 tb_ctl_rx_submit(ctl->rx_packets[i]);
688
689 ctl->running = true;
495} 690}
496 691
497/** 692/**
@@ -504,12 +699,16 @@ void tb_ctl_start(struct tb_ctl *ctl)
504 */ 699 */
505void tb_ctl_stop(struct tb_ctl *ctl) 700void tb_ctl_stop(struct tb_ctl *ctl)
506{ 701{
702 mutex_lock(&ctl->request_queue_lock);
703 ctl->running = false;
704 mutex_unlock(&ctl->request_queue_lock);
705
507 ring_stop(ctl->rx); 706 ring_stop(ctl->rx);
508 ring_stop(ctl->tx); 707 ring_stop(ctl->tx);
509 708
510 if (!kfifo_is_empty(&ctl->response_fifo)) 709 if (!list_empty(&ctl->request_queue))
511 tb_ctl_WARN(ctl, "dangling response in response_fifo\n"); 710 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
512 kfifo_reset(&ctl->response_fifo); 711 INIT_LIST_HEAD(&ctl->request_queue);
513 tb_ctl_info(ctl, "control channel stopped\n"); 712 tb_ctl_info(ctl, "control channel stopped\n");
514} 713}
515 714
@@ -532,6 +731,49 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
532 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); 731 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
533} 732}
534 733
734static bool tb_cfg_match(const struct tb_cfg_request *req,
735 const struct ctl_pkg *pkg)
736{
737 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
738
739 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
740 return true;
741
742 if (pkg->frame.eof != req->response_type)
743 return false;
744 if (route != tb_cfg_get_route(req->request))
745 return false;
746 if (pkg->frame.size != req->response_size)
747 return false;
748
749 if (pkg->frame.eof == TB_CFG_PKG_READ ||
750 pkg->frame.eof == TB_CFG_PKG_WRITE) {
751 const struct cfg_read_pkg *req_hdr = req->request;
752 const struct cfg_read_pkg *res_hdr = pkg->buffer;
753
754 if (req_hdr->addr.seq != res_hdr->addr.seq)
755 return false;
756 }
757
758 return true;
759}
760
761static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
762{
763 struct tb_cfg_result res;
764
765 /* Now make sure it is in expected format */
766 res = parse_header(pkg, req->response_size, req->response_type,
767 tb_cfg_get_route(req->request));
768 if (!res.err)
769 memcpy(req->response, pkg->buffer, req->response_size);
770
771 req->result = res;
772
773 /* Always complete when first response is received */
774 return true;
775}
776
535/** 777/**
536 * tb_cfg_reset() - send a reset packet and wait for a response 778 * tb_cfg_reset() - send a reset packet and wait for a response
537 * 779 *
@@ -542,16 +784,31 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
542struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, 784struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
543 int timeout_msec) 785 int timeout_msec)
544{ 786{
545 int err;
546 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; 787 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
788 struct tb_cfg_result res = { 0 };
547 struct tb_cfg_header reply; 789 struct tb_cfg_header reply;
790 struct tb_cfg_request *req;
791
792 req = tb_cfg_request_alloc();
793 if (!req) {
794 res.err = -ENOMEM;
795 return res;
796 }
797
798 req->match = tb_cfg_match;
799 req->copy = tb_cfg_copy;
800 req->request = &request;
801 req->request_size = sizeof(request);
802 req->request_type = TB_CFG_PKG_RESET;
803 req->response = &reply;
804 req->response_size = sizeof(reply);
805 req->response_type = sizeof(TB_CFG_PKG_RESET);
806
807 res = tb_cfg_request_sync(ctl, req, timeout_msec);
548 808
549 err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET); 809 tb_cfg_request_put(req);
550 if (err)
551 return (struct tb_cfg_result) { .err = err };
552 810
553 return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, 811 return res;
554 TB_CFG_PKG_RESET);
555} 812}
556 813
557/** 814/**
@@ -574,13 +831,39 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
574 }, 831 },
575 }; 832 };
576 struct cfg_write_pkg reply; 833 struct cfg_write_pkg reply;
834 int retries = 0;
577 835
578 res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ); 836 while (retries < TB_CTL_RETRIES) {
579 if (res.err) 837 struct tb_cfg_request *req;
580 return res; 838
839 req = tb_cfg_request_alloc();
840 if (!req) {
841 res.err = -ENOMEM;
842 return res;
843 }
844
845 request.addr.seq = retries++;
846
847 req->match = tb_cfg_match;
848 req->copy = tb_cfg_copy;
849 req->request = &request;
850 req->request_size = sizeof(request);
851 req->request_type = TB_CFG_PKG_READ;
852 req->response = &reply;
853 req->response_size = 12 + 4 * length;
854 req->response_type = TB_CFG_PKG_READ;
855
856 res = tb_cfg_request_sync(ctl, req, timeout_msec);
857
858 tb_cfg_request_put(req);
859
860 if (res.err != -ETIMEDOUT)
861 break;
862
863 /* Wait a bit (arbitrary time) until we send a retry */
864 usleep_range(10, 100);
865 }
581 866
582 res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
583 TB_CFG_PKG_READ);
584 if (res.err) 867 if (res.err)
585 return res; 868 return res;
586 869
@@ -611,15 +894,41 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
611 }, 894 },
612 }; 895 };
613 struct cfg_read_pkg reply; 896 struct cfg_read_pkg reply;
897 int retries = 0;
614 898
615 memcpy(&request.data, buffer, length * 4); 899 memcpy(&request.data, buffer, length * 4);
616 900
617 res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE); 901 while (retries < TB_CTL_RETRIES) {
618 if (res.err) 902 struct tb_cfg_request *req;
619 return res; 903
904 req = tb_cfg_request_alloc();
905 if (!req) {
906 res.err = -ENOMEM;
907 return res;
908 }
909
910 request.addr.seq = retries++;
911
912 req->match = tb_cfg_match;
913 req->copy = tb_cfg_copy;
914 req->request = &request;
915 req->request_size = 12 + 4 * length;
916 req->request_type = TB_CFG_PKG_WRITE;
917 req->response = &reply;
918 req->response_size = sizeof(reply);
919 req->response_type = TB_CFG_PKG_WRITE;
920
921 res = tb_cfg_request_sync(ctl, req, timeout_msec);
922
923 tb_cfg_request_put(req);
924
925 if (res.err != -ETIMEDOUT)
926 break;
927
928 /* Wait a bit (arbitrary time) until we send a retry */
929 usleep_range(10, 100);
930 }
620 931
621 res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
622 TB_CFG_PKG_WRITE);
623 if (res.err) 932 if (res.err)
624 return res; 933 return res;
625 934
@@ -633,11 +942,25 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
633{ 942{
634 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, 943 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
635 space, offset, length, TB_CFG_DEFAULT_TIMEOUT); 944 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
636 if (res.err == 1) { 945 switch (res.err) {
946 case 0:
947 /* Success */
948 break;
949
950 case 1:
951 /* Thunderbolt error, tb_error holds the actual number */
637 tb_cfg_print_error(ctl, &res); 952 tb_cfg_print_error(ctl, &res);
638 return -EIO; 953 return -EIO;
954
955 case -ETIMEDOUT:
956 tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
957 space, offset);
958 break;
959
960 default:
961 WARN(1, "tb_cfg_read: %d\n", res.err);
962 break;
639 } 963 }
640 WARN(res.err, "tb_cfg_read: %d\n", res.err);
641 return res.err; 964 return res.err;
642} 965}
643 966
@@ -646,11 +969,25 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
646{ 969{
647 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, 970 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
648 space, offset, length, TB_CFG_DEFAULT_TIMEOUT); 971 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
649 if (res.err == 1) { 972 switch (res.err) {
973 case 0:
974 /* Success */
975 break;
976
977 case 1:
978 /* Thunderbolt error, tb_error holds the actual number */
650 tb_cfg_print_error(ctl, &res); 979 tb_cfg_print_error(ctl, &res);
651 return -EIO; 980 return -EIO;
981
982 case -ETIMEDOUT:
983 tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
984 space, offset);
985 break;
986
987 default:
988 WARN(1, "tb_cfg_write: %d\n", res.err);
989 break;
652 } 990 }
653 WARN(res.err, "tb_cfg_write: %d\n", res.err);
654 return res.err; 991 return res.err;
655} 992}
656 993
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2b23e030a85b..36fd28b1c1c5 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -7,6 +7,8 @@
7#ifndef _TB_CFG 7#ifndef _TB_CFG
8#define _TB_CFG 8#define _TB_CFG
9 9
10#include <linux/kref.h>
11
10#include "nhi.h" 12#include "nhi.h"
11#include "tb_msgs.h" 13#include "tb_msgs.h"
12 14
@@ -39,6 +41,69 @@ struct tb_cfg_result {
39 enum tb_cfg_error tb_error; /* valid if err == 1 */ 41 enum tb_cfg_error tb_error; /* valid if err == 1 */
40}; 42};
41 43
44struct ctl_pkg {
45 struct tb_ctl *ctl;
46 void *buffer;
47 struct ring_frame frame;
48};
49
50/**
51 * struct tb_cfg_request - Control channel request
52 * @kref: Reference count
53 * @ctl: Pointer to the control channel structure. Only set when the
54 * request is queued.
55 * @request_size: Size of the request packet (in bytes)
56 * @request_type: Type of the request packet
57 * @response: Response is stored here
58 * @response_size: Maximum size of one response packet
59 * @response_type: Expected type of the response packet
60 * @npackets: Number of packets expected to be returned with this request
61 * @match: Function used to match the incoming packet
62 * @copy: Function used to copy the incoming packet to @response
63 * @callback: Callback called when the request is finished successfully
64 * @callback_data: Data to be passed to @callback
65 * @flags: Flags for the request
66 * @work: Work item used to complete the request
67 * @result: Result after the request has been completed
68 * @list: Requests are queued using this field
69 *
70 * An arbitrary request over Thunderbolt control channel. For standard
71 * control channel message, one should use tb_cfg_read/write() and
72 * friends if possible.
73 */
74struct tb_cfg_request {
75 struct kref kref;
76 struct tb_ctl *ctl;
77 const void *request;
78 size_t request_size;
79 enum tb_cfg_pkg_type request_type;
80 void *response;
81 size_t response_size;
82 enum tb_cfg_pkg_type response_type;
83 size_t npackets;
84 bool (*match)(const struct tb_cfg_request *req,
85 const struct ctl_pkg *pkg);
86 bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
87 void (*callback)(void *callback_data);
88 void *callback_data;
89 unsigned long flags;
90 struct work_struct work;
91 struct tb_cfg_result result;
92 struct list_head list;
93};
94
95#define TB_CFG_REQUEST_ACTIVE 0
96#define TB_CFG_REQUEST_CANCELED 1
97
98struct tb_cfg_request *tb_cfg_request_alloc(void);
99void tb_cfg_request_get(struct tb_cfg_request *req);
100void tb_cfg_request_put(struct tb_cfg_request *req);
101int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
102 void (*callback)(void *), void *callback_data);
103void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
104struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
105 struct tb_cfg_request *req, int timeout_msec);
106
42static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header) 107static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
43{ 108{
44 return (u64) header->route_hi << 32 | header->route_lo; 109 return (u64) header->route_hi << 32 | header->route_lo;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5bb9a5d60d2c..98a405384596 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -132,7 +132,7 @@ struct tb_cm_ops {
132/** 132/**
133 * struct tb - main thunderbolt bus structure 133 * struct tb - main thunderbolt bus structure
134 * @dev: Domain device 134 * @dev: Domain device
135 * @lock: Big lock. Must be held when accessing cfg or any struct 135 * @lock: Big lock. Must be held when accessing any struct
136 * tb_switch / struct tb_port. 136 * tb_switch / struct tb_port.
137 * @nhi: Pointer to the NHI structure 137 * @nhi: Pointer to the NHI structure
138 * @ctl: Control channel for this domain 138 * @ctl: Control channel for this domain