summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-10-02 06:38:36 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-02 14:24:41 -0400
commit9fb1e654dcf781e71a0ea7c5bdfea3ba85d1d06d (patch)
tree3ea6a9dfd32a077b15c2a9accd974e8116dc25a4 /drivers/thunderbolt
parent8c6bba10fb9262d7b3a11e86a40621d5b37810a6 (diff)
thunderbolt: Add support for frame mode
When high-speed DMA paths are used to transfer arbitrary data over a Thunderbolt link, DMA rings should be in frame mode instead of raw mode. The latter is used by the control channel (ring 0). In frame mode each data frame can hold up to 4kB payload. This patch modifies the DMA ring code to allow configuring a ring to be in frame mode by passing a new flag (RING_FLAG_FRAME) to the ring when it is allocated. In addition there might be need to enable end-to-end (E2E) workaround for the ring to prevent losing Rx frames in certain situations. We add another flag (RING_FLAG_E2E) that can be used for this purpose. This code is based on the work done by Amir Levy and Michael Jamet. Signed-off-by: Michael Jamet <michael.jamet@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/ctl.c3
-rw-r--r--drivers/thunderbolt/nhi.c76
-rw-r--r--drivers/thunderbolt/nhi.h10
-rw-r--r--drivers/thunderbolt/nhi_regs.h2
4 files changed, 61 insertions, 30 deletions
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 46e393c5fd1d..05400b77dcd7 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -618,7 +618,8 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
618 if (!ctl->tx) 618 if (!ctl->tx)
619 goto err; 619 goto err;
620 620
621 ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); 621 ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
622 0xffff);
622 if (!ctl->rx) 623 if (!ctl->rx)
623 goto err; 624 goto err;
624 625
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 8a7a3d0133f9..bebcad3d2c1f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -22,6 +22,12 @@
22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") 22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
23 23
24/* 24/*
25 * Used to enable end-to-end workaround for missing RX packets. Do not
26 * use this ring for anything else.
27 */
28#define RING_E2E_UNUSED_HOPID 2
29
30/*
25 * Minimal number of vectors when we use MSI-X. Two for control channel 31 * Minimal number of vectors when we use MSI-X. Two for control channel
26 * Rx/Tx and the rest four are for cross domain DMA paths. 32 * Rx/Tx and the rest four are for cross domain DMA paths.
27 */ 33 */
@@ -229,23 +235,6 @@ static void ring_work(struct work_struct *work)
229 frame->eof = ring->descriptors[ring->tail].eof; 235 frame->eof = ring->descriptors[ring->tail].eof;
230 frame->sof = ring->descriptors[ring->tail].sof; 236 frame->sof = ring->descriptors[ring->tail].sof;
231 frame->flags = ring->descriptors[ring->tail].flags; 237 frame->flags = ring->descriptors[ring->tail].flags;
232 if (frame->sof != 0)
233 dev_WARN(&ring->nhi->pdev->dev,
234 "%s %d got unexpected SOF: %#x\n",
235 RING_TYPE(ring), ring->hop,
236 frame->sof);
237 /*
238 * known flags:
239 * raw not enabled, interupt not set: 0x2=0010
240 * raw enabled: 0xa=1010
241 * raw not enabled: 0xb=1011
242 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
243 */
244 if (frame->flags != 0xa)
245 dev_WARN(&ring->nhi->pdev->dev,
246 "%s %d got unexpected flags: %#x\n",
247 RING_TYPE(ring), ring->hop,
248 frame->flags);
249 } 238 }
250 ring->tail = (ring->tail + 1) % ring->size; 239 ring->tail = (ring->tail + 1) % ring->size;
251 } 240 }
@@ -321,12 +310,17 @@ static void ring_release_msix(struct tb_ring *ring)
321} 310}
322 311
323static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, 312static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
324 bool transmit, unsigned int flags) 313 bool transmit, unsigned int flags,
314 u16 sof_mask, u16 eof_mask)
325{ 315{
326 struct tb_ring *ring = NULL; 316 struct tb_ring *ring = NULL;
327 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 317 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
328 transmit ? "TX" : "RX", hop, size); 318 transmit ? "TX" : "RX", hop, size);
329 319
320 /* Tx Ring 2 is reserved for E2E workaround */
321 if (transmit && hop == RING_E2E_UNUSED_HOPID)
322 return NULL;
323
330 mutex_lock(&nhi->lock); 324 mutex_lock(&nhi->lock);
331 if (hop >= nhi->hop_count) { 325 if (hop >= nhi->hop_count) {
332 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); 326 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
@@ -353,6 +347,8 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
353 ring->is_tx = transmit; 347 ring->is_tx = transmit;
354 ring->size = size; 348 ring->size = size;
355 ring->flags = flags; 349 ring->flags = flags;
350 ring->sof_mask = sof_mask;
351 ring->eof_mask = eof_mask;
356 ring->head = 0; 352 ring->head = 0;
357 ring->tail = 0; 353 ring->tail = 0;
358 ring->running = false; 354 ring->running = false;
@@ -384,13 +380,13 @@ err:
384struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 380struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
385 unsigned int flags) 381 unsigned int flags)
386{ 382{
387 return ring_alloc(nhi, hop, size, true, flags); 383 return ring_alloc(nhi, hop, size, true, flags, 0, 0);
388} 384}
389 385
390struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 386struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
391 unsigned int flags) 387 unsigned int flags, u16 sof_mask, u16 eof_mask)
392{ 388{
393 return ring_alloc(nhi, hop, size, false, flags); 389 return ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask);
394} 390}
395 391
396/** 392/**
@@ -400,6 +396,9 @@ struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
400 */ 396 */
401void ring_start(struct tb_ring *ring) 397void ring_start(struct tb_ring *ring)
402{ 398{
399 u16 frame_size;
400 u32 flags;
401
403 mutex_lock(&ring->nhi->lock); 402 mutex_lock(&ring->nhi->lock);
404 mutex_lock(&ring->lock); 403 mutex_lock(&ring->lock);
405 if (ring->nhi->going_away) 404 if (ring->nhi->going_away)
@@ -411,18 +410,39 @@ void ring_start(struct tb_ring *ring)
411 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", 410 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
412 RING_TYPE(ring), ring->hop); 411 RING_TYPE(ring), ring->hop);
413 412
413 if (ring->flags & RING_FLAG_FRAME) {
414 /* Means 4096 */
415 frame_size = 0;
416 flags = RING_FLAG_ENABLE;
417 } else {
418 frame_size = TB_FRAME_SIZE;
419 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
420 }
421
422 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
423 u32 hop;
424
425 /*
426 * In order not to lose Rx packets we enable end-to-end
427 * workaround which transfers Rx credits to an unused Tx
428 * HopID.
429 */
430 hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
431 hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
432 flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
433 }
434
414 ring_iowrite64desc(ring, ring->descriptors_dma, 0); 435 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
415 if (ring->is_tx) { 436 if (ring->is_tx) {
416 ring_iowrite32desc(ring, ring->size, 12); 437 ring_iowrite32desc(ring, ring->size, 12);
417 ring_iowrite32options(ring, 0, 4); /* time releated ? */ 438 ring_iowrite32options(ring, 0, 4); /* time releated ? */
418 ring_iowrite32options(ring, 439 ring_iowrite32options(ring, flags, 0);
419 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
420 } else { 440 } else {
421 ring_iowrite32desc(ring, 441 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
422 (TB_FRAME_SIZE << 16) | ring->size, 12); 442
423 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ 443 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
424 ring_iowrite32options(ring, 444 ring_iowrite32options(ring, sof_eof_mask, 4);
425 RING_FLAG_ENABLE | RING_FLAG_RAW, 0); 445 ring_iowrite32options(ring, flags, 0);
426 } 446 }
427 ring_interrupt_active(ring, true); 447 ring_interrupt_active(ring, true);
428 ring->running = true; 448 ring->running = true;
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 0e05828983db..4503ddbeccb3 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -56,6 +56,8 @@ struct tb_nhi {
56 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. 56 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
57 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) 57 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
58 * @flags: Ring specific flags 58 * @flags: Ring specific flags
59 * @sof_mask: Bit mask used to detect start of frame PDF
60 * @eof_mask: Bit mask used to detect end of frame PDF
59 */ 61 */
60struct tb_ring { 62struct tb_ring {
61 struct mutex lock; 63 struct mutex lock;
@@ -74,10 +76,16 @@ struct tb_ring {
74 int irq; 76 int irq;
75 u8 vector; 77 u8 vector;
76 unsigned int flags; 78 unsigned int flags;
79 u16 sof_mask;
80 u16 eof_mask;
77}; 81};
78 82
79/* Leave ring interrupt enabled on suspend */ 83/* Leave ring interrupt enabled on suspend */
80#define RING_FLAG_NO_SUSPEND BIT(0) 84#define RING_FLAG_NO_SUSPEND BIT(0)
85/* Configure the ring to be in frame mode */
86#define RING_FLAG_FRAME BIT(1)
87/* Enable end-to-end flow control */
88#define RING_FLAG_E2E BIT(2)
81 89
82struct ring_frame; 90struct ring_frame;
83typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); 91typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
@@ -100,7 +108,7 @@ struct ring_frame {
100struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 108struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
101 unsigned int flags); 109 unsigned int flags);
102struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 110struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
103 unsigned int flags); 111 unsigned int flags, u16 sof_mask, u16 eof_mask);
104void ring_start(struct tb_ring *ring); 112void ring_start(struct tb_ring *ring);
105void ring_stop(struct tb_ring *ring); 113void ring_stop(struct tb_ring *ring);
106void ring_free(struct tb_ring *ring); 114void ring_free(struct tb_ring *ring);
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 46eff69b19ad..491a4c0c18fc 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -77,6 +77,8 @@ struct ring_desc {
77 * ..: unknown 77 * ..: unknown
78 */ 78 */
79#define REG_RX_OPTIONS_BASE 0x29800 79#define REG_RX_OPTIONS_BASE 0x29800
80#define REG_RX_OPTIONS_E2E_HOP_MASK GENMASK(22, 12)
81#define REG_RX_OPTIONS_E2E_HOP_SHIFT 12
80 82
81/* 83/*
82 * three bitfields: tx, rx, rx overflow 84 * three bitfields: tx, rx, rx overflow