aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAndreas Noever <andreas.noever@gmail.com>2014-06-03 16:03:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-06-19 17:04:52 -0400
commit16603153666d22df544ae9f9b3764fd18da28eeb (patch)
treed28b3a560c98c6010196f856520f983efa34e4a8 /drivers
parent7171511eaec5bf23fb06078f59784a3a0626b38f (diff)
thunderbolt: Add initial cactus ridge NHI support
Thunderbolt hotplug is supposed to be handled by the firmware. But Apple decided to implement thunderbolt at the operating system level. The firmare only initializes thunderbolt devices that are present at boot time. This driver enables hotplug of thunderbolt of non-chained thunderbolt devices on Apple systems with a cactus ridge controller. This first patch adds the Kconfig file as well the parts of the driver which talk directly to the hardware (that is pci device setup, interrupt handling and RX/TX ring management). Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/thunderbolt/Kconfig12
-rw-r--r--drivers/thunderbolt/Makefile3
-rw-r--r--drivers/thunderbolt/nhi.c630
-rw-r--r--drivers/thunderbolt/nhi.h114
-rw-r--r--drivers/thunderbolt/nhi_regs.h101
7 files changed, 863 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0e87a34b6472..9b2dcc2ea663 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,4 +176,6 @@ source "drivers/powercap/Kconfig"
176 176
177source "drivers/mcb/Kconfig" 177source "drivers/mcb/Kconfig"
178 178
179source "drivers/thunderbolt/Kconfig"
180
179endmenu 181endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f98b50d8251d..37b9ed4cd2d6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -158,3 +158,4 @@ obj-$(CONFIG_NTB) += ntb/
158obj-$(CONFIG_FMC) += fmc/ 158obj-$(CONFIG_FMC) += fmc/
159obj-$(CONFIG_POWERCAP) += powercap/ 159obj-$(CONFIG_POWERCAP) += powercap/
160obj-$(CONFIG_MCB) += mcb/ 160obj-$(CONFIG_MCB) += mcb/
161obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644
index 000000000000..3a2552962d06
--- /dev/null
+++ b/drivers/thunderbolt/Kconfig
@@ -0,0 +1,12 @@
1menuconfig THUNDERBOLT
2 tristate "Thunderbolt support for Apple devices"
3 default no
4 help
5 Cactus Ridge Thunderbolt Controller driver
6 This driver is required if you want to hotplug Thunderbolt devices on
7 Apple hardware.
8
9 Device chaining is currently not supported.
10
11 To compile this driver a module, choose M here. The module will be
12 called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644
index 000000000000..d473ab93d127
--- /dev/null
+++ b/drivers/thunderbolt/Makefile
@@ -0,0 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o
3
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
new file mode 100644
index 000000000000..11070ff2cec7
--- /dev/null
+++ b/drivers/thunderbolt/nhi.c
@@ -0,0 +1,630 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 */
9
10#include <linux/slab.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/dmi.h>
16
17#include "nhi.h"
18#include "nhi_regs.h"
19
20#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
21
22
23static int ring_interrupt_index(struct tb_ring *ring)
24{
25 int bit = ring->hop;
26 if (!ring->is_tx)
27 bit += ring->nhi->hop_count;
28 return bit;
29}
30
31/**
32 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
33 *
34 * ring->nhi->lock must be held.
35 */
36static void ring_interrupt_active(struct tb_ring *ring, bool active)
37{
38 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
39 int bit = ring_interrupt_index(ring) & 31;
40 int mask = 1 << bit;
41 u32 old, new;
42 old = ioread32(ring->nhi->iobase + reg);
43 if (active)
44 new = old | mask;
45 else
46 new = old & ~mask;
47
48 dev_info(&ring->nhi->pdev->dev,
49 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
50 active ? "enabling" : "disabling", reg, bit, old, new);
51
52 if (new == old)
53 dev_WARN(&ring->nhi->pdev->dev,
54 "interrupt for %s %d is already %s\n",
55 RING_TYPE(ring), ring->hop,
56 active ? "enabled" : "disabled");
57 iowrite32(new, ring->nhi->iobase + reg);
58}
59
60/**
61 * nhi_disable_interrupts() - disable interrupts for all rings
62 *
63 * Use only during init and shutdown.
64 */
65static void nhi_disable_interrupts(struct tb_nhi *nhi)
66{
67 int i = 0;
68 /* disable interrupts */
69 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
70 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
71
72 /* clear interrupt status bits */
73 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
74 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
75}
76
77/* ring helper methods */
78
79static void __iomem *ring_desc_base(struct tb_ring *ring)
80{
81 void __iomem *io = ring->nhi->iobase;
82 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
83 io += ring->hop * 16;
84 return io;
85}
86
87static void __iomem *ring_options_base(struct tb_ring *ring)
88{
89 void __iomem *io = ring->nhi->iobase;
90 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
91 io += ring->hop * 32;
92 return io;
93}
94
95static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
96{
97 iowrite16(value, ring_desc_base(ring) + offset);
98}
99
100static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
101{
102 iowrite32(value, ring_desc_base(ring) + offset);
103}
104
105static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
106{
107 iowrite32(value, ring_desc_base(ring) + offset);
108 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
109}
110
111static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
112{
113 iowrite32(value, ring_options_base(ring) + offset);
114}
115
116static bool ring_full(struct tb_ring *ring)
117{
118 return ((ring->head + 1) % ring->size) == ring->tail;
119}
120
121static bool ring_empty(struct tb_ring *ring)
122{
123 return ring->head == ring->tail;
124}
125
126/**
127 * ring_write_descriptors() - post frames from ring->queue to the controller
128 *
129 * ring->lock is held.
130 */
131static void ring_write_descriptors(struct tb_ring *ring)
132{
133 struct ring_frame *frame, *n;
134 struct ring_desc *descriptor;
135 list_for_each_entry_safe(frame, n, &ring->queue, list) {
136 if (ring_full(ring))
137 break;
138 list_move_tail(&frame->list, &ring->in_flight);
139 descriptor = &ring->descriptors[ring->head];
140 descriptor->phys = frame->buffer_phy;
141 descriptor->time = 0;
142 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
143 if (ring->is_tx) {
144 descriptor->length = frame->size;
145 descriptor->eof = frame->eof;
146 descriptor->sof = frame->sof;
147 }
148 ring->head = (ring->head + 1) % ring->size;
149 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
150 }
151}
152
153/**
154 * ring_work() - progress completed frames
155 *
156 * If the ring is shutting down then all frames are marked as canceled and
157 * their callbacks are invoked.
158 *
159 * Otherwise we collect all completed frame from the ring buffer, write new
160 * frame to the ring buffer and invoke the callbacks for the completed frames.
161 */
162static void ring_work(struct work_struct *work)
163{
164 struct tb_ring *ring = container_of(work, typeof(*ring), work);
165 struct ring_frame *frame;
166 bool canceled = false;
167 LIST_HEAD(done);
168 mutex_lock(&ring->lock);
169
170 if (!ring->running) {
171 /* Move all frames to done and mark them as canceled. */
172 list_splice_tail_init(&ring->in_flight, &done);
173 list_splice_tail_init(&ring->queue, &done);
174 canceled = true;
175 goto invoke_callback;
176 }
177
178 while (!ring_empty(ring)) {
179 if (!(ring->descriptors[ring->tail].flags
180 & RING_DESC_COMPLETED))
181 break;
182 frame = list_first_entry(&ring->in_flight, typeof(*frame),
183 list);
184 list_move_tail(&frame->list, &done);
185 if (!ring->is_tx) {
186 frame->size = ring->descriptors[ring->tail].length;
187 frame->eof = ring->descriptors[ring->tail].eof;
188 frame->sof = ring->descriptors[ring->tail].sof;
189 frame->flags = ring->descriptors[ring->tail].flags;
190 if (frame->sof != 0)
191 dev_WARN(&ring->nhi->pdev->dev,
192 "%s %d got unexpected SOF: %#x\n",
193 RING_TYPE(ring), ring->hop,
194 frame->sof);
195 /*
196 * known flags:
197 * raw not enabled, interupt not set: 0x2=0010
198 * raw enabled: 0xa=1010
199 * raw not enabled: 0xb=1011
200 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
201 */
202 if (frame->flags != 0xa)
203 dev_WARN(&ring->nhi->pdev->dev,
204 "%s %d got unexpected flags: %#x\n",
205 RING_TYPE(ring), ring->hop,
206 frame->flags);
207 }
208 ring->tail = (ring->tail + 1) % ring->size;
209 }
210 ring_write_descriptors(ring);
211
212invoke_callback:
213 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
214 while (!list_empty(&done)) {
215 frame = list_first_entry(&done, typeof(*frame), list);
216 /*
217 * The callback may reenqueue or delete frame.
218 * Do not hold on to it.
219 */
220 list_del_init(&frame->list);
221 frame->callback(ring, frame, canceled);
222 }
223}
224
225int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
226{
227 int ret = 0;
228 mutex_lock(&ring->lock);
229 if (ring->running) {
230 list_add_tail(&frame->list, &ring->queue);
231 ring_write_descriptors(ring);
232 } else {
233 ret = -ESHUTDOWN;
234 }
235 mutex_unlock(&ring->lock);
236 return ret;
237}
238
239static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
240 bool transmit)
241{
242 struct tb_ring *ring = NULL;
243 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
244 transmit ? "TX" : "RX", hop, size);
245
246 mutex_lock(&nhi->lock);
247 if (hop >= nhi->hop_count) {
248 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
249 goto err;
250 }
251 if (transmit && nhi->tx_rings[hop]) {
252 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
253 goto err;
254 } else if (!transmit && nhi->rx_rings[hop]) {
255 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
256 goto err;
257 }
258 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
259 if (!ring)
260 goto err;
261
262 mutex_init(&ring->lock);
263 INIT_LIST_HEAD(&ring->queue);
264 INIT_LIST_HEAD(&ring->in_flight);
265 INIT_WORK(&ring->work, ring_work);
266
267 ring->nhi = nhi;
268 ring->hop = hop;
269 ring->is_tx = transmit;
270 ring->size = size;
271 ring->head = 0;
272 ring->tail = 0;
273 ring->running = false;
274 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
275 size * sizeof(*ring->descriptors),
276 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
277 if (!ring->descriptors)
278 goto err;
279
280 if (transmit)
281 nhi->tx_rings[hop] = ring;
282 else
283 nhi->rx_rings[hop] = ring;
284 mutex_unlock(&nhi->lock);
285 return ring;
286
287err:
288 if (ring)
289 mutex_destroy(&ring->lock);
290 kfree(ring);
291 mutex_unlock(&nhi->lock);
292 return NULL;
293}
294
295struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
296{
297 return ring_alloc(nhi, hop, size, true);
298}
299
300struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
301{
302 return ring_alloc(nhi, hop, size, false);
303}
304
305/**
306 * ring_start() - enable a ring
307 *
308 * Must not be invoked in parallel with ring_stop().
309 */
310void ring_start(struct tb_ring *ring)
311{
312 mutex_lock(&ring->nhi->lock);
313 mutex_lock(&ring->lock);
314 if (ring->running) {
315 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
316 goto err;
317 }
318 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
319 RING_TYPE(ring), ring->hop);
320
321 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
322 if (ring->is_tx) {
323 ring_iowrite32desc(ring, ring->size, 12);
324 ring_iowrite32options(ring, 0, 4); /* time releated ? */
325 ring_iowrite32options(ring,
326 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
327 } else {
328 ring_iowrite32desc(ring,
329 (TB_FRAME_SIZE << 16) | ring->size, 12);
330 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
331 ring_iowrite32options(ring,
332 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
333 }
334 ring_interrupt_active(ring, true);
335 ring->running = true;
336err:
337 mutex_unlock(&ring->lock);
338 mutex_unlock(&ring->nhi->lock);
339}
340
341
342/**
343 * ring_stop() - shutdown a ring
344 *
345 * Must not be invoked from a callback.
346 *
347 * This method will disable the ring. Further calls to ring_tx/ring_rx will
348 * return -ESHUTDOWN until ring_stop has been called.
349 *
350 * All enqueued frames will be canceled and their callbacks will be executed
351 * with frame->canceled set to true (on the callback thread). This method
352 * returns only after all callback invocations have finished.
353 */
354void ring_stop(struct tb_ring *ring)
355{
356 mutex_lock(&ring->nhi->lock);
357 mutex_lock(&ring->lock);
358 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
359 RING_TYPE(ring), ring->hop);
360 if (!ring->running) {
361 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
362 RING_TYPE(ring), ring->hop);
363 goto err;
364 }
365 ring_interrupt_active(ring, false);
366
367 ring_iowrite32options(ring, 0, 0);
368 ring_iowrite64desc(ring, 0, 0);
369 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
370 ring_iowrite32desc(ring, 0, 12);
371 ring->head = 0;
372 ring->tail = 0;
373 ring->running = false;
374
375err:
376 mutex_unlock(&ring->lock);
377 mutex_unlock(&ring->nhi->lock);
378
379 /*
380 * schedule ring->work to invoke callbacks on all remaining frames.
381 */
382 schedule_work(&ring->work);
383 flush_work(&ring->work);
384}
385
386/*
387 * ring_free() - free ring
388 *
389 * When this method returns all invocations of ring->callback will have
390 * finished.
391 *
392 * Ring must be stopped.
393 *
394 * Must NOT be called from ring_frame->callback!
395 */
396void ring_free(struct tb_ring *ring)
397{
398 mutex_lock(&ring->nhi->lock);
399 /*
400 * Dissociate the ring from the NHI. This also ensures that
401 * nhi_interrupt_work cannot reschedule ring->work.
402 */
403 if (ring->is_tx)
404 ring->nhi->tx_rings[ring->hop] = NULL;
405 else
406 ring->nhi->rx_rings[ring->hop] = NULL;
407
408 if (ring->running) {
409 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
410 RING_TYPE(ring), ring->hop);
411 }
412
413 dma_free_coherent(&ring->nhi->pdev->dev,
414 ring->size * sizeof(*ring->descriptors),
415 ring->descriptors, ring->descriptors_dma);
416
417 ring->descriptors = 0;
418 ring->descriptors_dma = 0;
419
420
421 dev_info(&ring->nhi->pdev->dev,
422 "freeing %s %d\n",
423 RING_TYPE(ring),
424 ring->hop);
425
426 mutex_unlock(&ring->nhi->lock);
427 /**
428 * ring->work can no longer be scheduled (it is scheduled only by
429 * nhi_interrupt_work and ring_stop). Wait for it to finish before
430 * freeing the ring.
431 */
432 flush_work(&ring->work);
433 mutex_destroy(&ring->lock);
434 kfree(ring);
435}
436
437static void nhi_interrupt_work(struct work_struct *work)
438{
439 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
440 int value = 0; /* Suppress uninitialized usage warning. */
441 int bit;
442 int hop = -1;
443 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
444 struct tb_ring *ring;
445
446 mutex_lock(&nhi->lock);
447
448 /*
449 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
450 * (TX, RX, RX overflow). We iterate over the bits and read a new
451 * dwords as required. The registers are cleared on read.
452 */
453 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
454 if (bit % 32 == 0)
455 value = ioread32(nhi->iobase
456 + REG_RING_NOTIFY_BASE
457 + 4 * (bit / 32));
458 if (++hop == nhi->hop_count) {
459 hop = 0;
460 type++;
461 }
462 if ((value & (1 << (bit % 32))) == 0)
463 continue;
464 if (type == 2) {
465 dev_warn(&nhi->pdev->dev,
466 "RX overflow for ring %d\n",
467 hop);
468 continue;
469 }
470 if (type == 0)
471 ring = nhi->tx_rings[hop];
472 else
473 ring = nhi->rx_rings[hop];
474 if (ring == NULL) {
475 dev_warn(&nhi->pdev->dev,
476 "got interrupt for inactive %s ring %d\n",
477 type ? "RX" : "TX",
478 hop);
479 continue;
480 }
481 /* we do not check ring->running, this is done in ring->work */
482 schedule_work(&ring->work);
483 }
484 mutex_unlock(&nhi->lock);
485}
486
487static irqreturn_t nhi_msi(int irq, void *data)
488{
489 struct tb_nhi *nhi = data;
490 schedule_work(&nhi->interrupt_work);
491 return IRQ_HANDLED;
492}
493
494static void nhi_shutdown(struct tb_nhi *nhi)
495{
496 int i;
497 dev_info(&nhi->pdev->dev, "shutdown\n");
498
499 for (i = 0; i < nhi->hop_count; i++) {
500 if (nhi->tx_rings[i])
501 dev_WARN(&nhi->pdev->dev,
502 "TX ring %d is still active\n", i);
503 if (nhi->rx_rings[i])
504 dev_WARN(&nhi->pdev->dev,
505 "RX ring %d is still active\n", i);
506 }
507 nhi_disable_interrupts(nhi);
508 /*
509 * We have to release the irq before calling flush_work. Otherwise an
510 * already executing IRQ handler could call schedule_work again.
511 */
512 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
513 flush_work(&nhi->interrupt_work);
514 mutex_destroy(&nhi->lock);
515}
516
517static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
518{
519 struct tb_nhi *nhi;
520 int res;
521
522 res = pcim_enable_device(pdev);
523 if (res) {
524 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
525 return res;
526 }
527
528 res = pci_enable_msi(pdev);
529 if (res) {
530 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
531 return res;
532 }
533
534 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
535 if (res) {
536 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
537 return res;
538 }
539
540 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
541 if (!nhi)
542 return -ENOMEM;
543
544 nhi->pdev = pdev;
545 /* cannot fail - table is allocated bin pcim_iomap_regions */
546 nhi->iobase = pcim_iomap_table(pdev)[0];
547 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
548 if (nhi->hop_count != 12)
549 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
550 nhi->hop_count);
551 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
552
553 nhi->tx_rings = devm_kzalloc(&pdev->dev,
554 nhi->hop_count * sizeof(struct tb_ring),
555 GFP_KERNEL);
556 nhi->rx_rings = devm_kzalloc(&pdev->dev,
557 nhi->hop_count * sizeof(struct tb_ring),
558 GFP_KERNEL);
559 if (!nhi->tx_rings || !nhi->rx_rings)
560 return -ENOMEM;
561
562 nhi_disable_interrupts(nhi); /* In case someone left them on. */
563 res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
564 IRQF_NO_SUSPEND, /* must work during _noirq */
565 "thunderbolt", nhi);
566 if (res) {
567 dev_err(&pdev->dev, "request_irq failed, aborting\n");
568 return res;
569 }
570
571 mutex_init(&nhi->lock);
572
573 pci_set_master(pdev);
574
575 /* magic value - clock related? */
576 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
577
578 pci_set_drvdata(pdev, nhi);
579
580 return 0;
581}
582
583static void nhi_remove(struct pci_dev *pdev)
584{
585 struct tb_nhi *nhi = pci_get_drvdata(pdev);
586 nhi_shutdown(nhi);
587}
588
589struct pci_device_id nhi_ids[] = {
590 /*
591 * We have to specify class, the TB bridges use the same device and
592 * vendor (sub)id.
593 */
594 {
595 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
596 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
597 .subvendor = 0x2222, .subdevice = 0x1111,
598 },
599 {
600 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
601 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
602 .subvendor = 0x2222, .subdevice = 0x1111,
603 },
604 { 0,}
605};
606
607MODULE_DEVICE_TABLE(pci, nhi_ids);
608MODULE_LICENSE("GPL");
609
610static struct pci_driver nhi_driver = {
611 .name = "thunderbolt",
612 .id_table = nhi_ids,
613 .probe = nhi_probe,
614 .remove = nhi_remove,
615};
616
617static int __init nhi_init(void)
618{
619 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
620 return -ENOSYS;
621 return pci_register_driver(&nhi_driver);
622}
623
624static void __exit nhi_unload(void)
625{
626 pci_unregister_driver(&nhi_driver);
627}
628
629module_init(nhi_init);
630module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
new file mode 100644
index 000000000000..317242939b31
--- /dev/null
+++ b/drivers/thunderbolt/nhi.h
@@ -0,0 +1,114 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_H_
8#define DSL3510_H_
9
10#include <linux/mutex.h>
11#include <linux/workqueue.h>
12
13/**
14 * struct tb_nhi - thunderbolt native host interface
15 */
16struct tb_nhi {
17 struct mutex lock; /*
18 * Must be held during ring creation/destruction.
19 * Is acquired by interrupt_work when dispatching
20 * interrupts to individual rings.
21 **/
22 struct pci_dev *pdev;
23 void __iomem *iobase;
24 struct tb_ring **tx_rings;
25 struct tb_ring **rx_rings;
26 struct work_struct interrupt_work;
27 u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
28};
29
30/**
31 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
32 */
33struct tb_ring {
34 struct mutex lock; /* must be acquired after nhi->lock */
35 struct tb_nhi *nhi;
36 int size;
37 int hop;
38 int head; /* write next descriptor here */
39 int tail; /* complete next descriptor here */
40 struct ring_desc *descriptors;
41 dma_addr_t descriptors_dma;
42 struct list_head queue;
43 struct list_head in_flight;
44 struct work_struct work;
45 bool is_tx:1; /* rx otherwise */
46 bool running:1;
47};
48
49struct ring_frame;
50typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
51
52/**
53 * struct ring_frame - for use with ring_rx/ring_tx
54 */
55struct ring_frame {
56 dma_addr_t buffer_phy;
57 ring_cb callback;
58 struct list_head list;
59 u32 size:12; /* TX: in, RX: out*/
60 u32 flags:12; /* RX: out */
61 u32 eof:4; /* TX:in, RX: out */
62 u32 sof:4; /* TX:in, RX: out */
63};
64
65#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
66
67struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
68struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
69void ring_start(struct tb_ring *ring);
70void ring_stop(struct tb_ring *ring);
71void ring_free(struct tb_ring *ring);
72
73int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
74
75/**
76 * ring_rx() - enqueue a frame on an RX ring
77 *
78 * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
79 * buffer must contain at least TB_FRAME_SIZE bytes.
80 *
81 * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
82 * frame->sof set once the frame has been received.
83 *
84 * If ring_stop is called after the packet has been enqueued frame->callback
85 * will be called with canceled set to true.
86 *
87 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
88 */
89static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
90{
91 WARN_ON(ring->is_tx);
92 return __ring_enqueue(ring, frame);
93}
94
95/**
96 * ring_tx() - enqueue a frame on an TX ring
97 *
98 * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
99 * and frame->sof have to be set.
100 *
101 * frame->callback will be invoked with once the frame has been transmitted.
102 *
103 * If ring_stop is called after the packet has been enqueued frame->callback
104 * will be called with canceled set to true.
105 *
106 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
107 */
108static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
109{
110 WARN_ON(!ring->is_tx);
111 return __ring_enqueue(ring, frame);
112}
113
114#endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
new file mode 100644
index 000000000000..86b996c702a0
--- /dev/null
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -0,0 +1,101 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI registers
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_REGS_H_
8#define DSL3510_REGS_H_
9
10#include <linux/types.h>
11
12enum ring_flags {
13 RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
14 RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
15 RING_FLAG_PCI_NO_SNOOP = 1 << 29,
16 RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
17 RING_FLAG_ENABLE = 1 << 31,
18};
19
20enum ring_desc_flags {
21 RING_DESC_ISOCH = 0x1, /* TX only? */
22 RING_DESC_COMPLETED = 0x2, /* set by NHI */
23 RING_DESC_POSTED = 0x4, /* always set this */
24 RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
25};
26
27/**
28 * struct ring_desc - TX/RX ring entry
29 *
30 * For TX set length/eof/sof.
31 * For RX length/eof/sof are set by the NHI.
32 */
33struct ring_desc {
34 u64 phys;
35 u32 length:12;
36 u32 eof:4;
37 u32 sof:4;
38 enum ring_desc_flags flags:12;
39 u32 time; /* write zero */
40} __packed;
41
42/* NHI registers in bar 0 */
43
44/*
45 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
46 * 00: physical pointer to an array of struct ring_desc
47 * 08: ring tail (set by NHI)
48 * 10: ring head (index of first non posted descriptor)
49 * 12: descriptor count
50 */
51#define REG_TX_RING_BASE 0x00000
52
53/*
54 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
55 * 00: physical pointer to an array of struct ring_desc
56 * 08: ring head (index of first not posted descriptor)
57 * 10: ring tail (set by NHI)
58 * 12: descriptor count
59 * 14: max frame sizes (anything larger than 0x100 has no effect)
60 */
61#define REG_RX_RING_BASE 0x08000
62
63/*
64 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
65 * 00: enum_ring_flags
66 * 04: isoch time stamp ?? (write 0)
67 * ..: unknown
68 */
69#define REG_TX_OPTIONS_BASE 0x19800
70
71/*
72 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
73 * 00: enum ring_flags
74 * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
75 * the corresponding TX hop id.
76 * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
77 * ..: unknown
78 */
79#define REG_RX_OPTIONS_BASE 0x29800
80
81/*
82 * three bitfields: tx, rx, rx overflow
83 * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
84 * cleared on read. New interrupts are fired only after ALL registers have been
85 * read (even those containing only disabled rings).
86 */
87#define REG_RING_NOTIFY_BASE 0x37800
88#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
89
90/*
91 * two bitfields: rx, tx
92 * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
93 * enable/disable interrupts set/clear the corresponding bits.
94 */
95#define REG_RING_INTERRUPT_BASE 0x38200
96#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
97
98/* The last 11 bits contain the number of hops supported by the NHI port. */
99#define REG_HOP_COUNT 0x39640
100
101#endif