diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-23 22:10:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-23 22:10:54 -0400 |
commit | f0a08fcb5972167e55faa330c4a24fbaa3328b1f (patch) | |
tree | e24c42230888bd0e6422b2f81d7991da4373bb5d /arch/tile | |
parent | 474183b188b3c5af45831c71151f819fc70479b8 (diff) | |
parent | f6d2ce00da145ae31ec22d21daca6ca5e22b3c84 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf:
"These changes provide support for PCIe root complex and USB host mode
for tilegx's on-chip I/Os.
In addition, this pull provides the required underpinning for the
on-chip networking support that was pulled into 3.5. The changes have
all been through LKML (with several rounds for PCIe RC) and on
linux-next."
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
tile: updates to pci root complex from community feedback
bounce: allow use of bounce pool via config option
usb: add host support for the tilegx architecture
arch/tile: provide kernel support for the tilegx USB shim
tile pci: enable IOMMU to support DMA for legacy devices
arch/tile: enable ZONE_DMA for tilegx
tilegx pci: support I/O to arbitrarily-cached pages
tile: remove unused header
arch/tile: tilegx PCI root complex support
arch/tile: provide kernel support for the tilegx TRIO shim
arch/tile: break out the "csum a long" function to <asm/checksum.h>
arch/tile: provide kernel support for the tilegx mPIPE shim
arch/tile: common DMA code for the GXIO IORPC subsystem
arch/tile: support MMIO-based readb/writeb etc.
arch/tile: introduce GXIO IORPC framework for tilegx
Diffstat (limited to 'arch/tile')
65 files changed, 10771 insertions, 400 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index fe128816c448..932e4430f7f3 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | config TILE | 4 | config TILE |
5 | def_bool y | 5 | def_bool y |
6 | select HAVE_DMA_ATTRS | ||
7 | select HAVE_DMA_API_DEBUG | ||
6 | select HAVE_KVM if !TILEGX | 8 | select HAVE_KVM if !TILEGX |
7 | select GENERIC_FIND_FIRST_BIT | 9 | select GENERIC_FIND_FIRST_BIT |
8 | select USE_GENERIC_SMP_HELPERS | 10 | select USE_GENERIC_SMP_HELPERS |
@@ -79,6 +81,9 @@ config ARCH_DMA_ADDR_T_64BIT | |||
79 | config NEED_DMA_MAP_STATE | 81 | config NEED_DMA_MAP_STATE |
80 | def_bool y | 82 | def_bool y |
81 | 83 | ||
84 | config ARCH_HAS_DMA_SET_COHERENT_MASK | ||
85 | bool | ||
86 | |||
82 | config LOCKDEP_SUPPORT | 87 | config LOCKDEP_SUPPORT |
83 | def_bool y | 88 | def_bool y |
84 | 89 | ||
@@ -212,6 +217,22 @@ config HIGHMEM | |||
212 | 217 | ||
213 | If unsure, say "true". | 218 | If unsure, say "true". |
214 | 219 | ||
220 | config ZONE_DMA | ||
221 | def_bool y | ||
222 | |||
223 | config IOMMU_HELPER | ||
224 | bool | ||
225 | |||
226 | config NEED_SG_DMA_LENGTH | ||
227 | bool | ||
228 | |||
229 | config SWIOTLB | ||
230 | bool | ||
231 | default TILEGX | ||
232 | select IOMMU_HELPER | ||
233 | select NEED_SG_DMA_LENGTH | ||
234 | select ARCH_HAS_DMA_SET_COHERENT_MASK | ||
235 | |||
215 | # We do not currently support disabling NUMA. | 236 | # We do not currently support disabling NUMA. |
216 | config NUMA | 237 | config NUMA |
217 | bool # "NUMA Memory Allocation and Scheduler Support" | 238 | bool # "NUMA Memory Allocation and Scheduler Support" |
@@ -345,6 +366,8 @@ config KERNEL_PL | |||
345 | kernel will be built to run at. Generally you should use | 366 | kernel will be built to run at. Generally you should use |
346 | the default value here. | 367 | the default value here. |
347 | 368 | ||
369 | source "arch/tile/gxio/Kconfig" | ||
370 | |||
348 | endmenu # Tilera-specific configuration | 371 | endmenu # Tilera-specific configuration |
349 | 372 | ||
350 | menu "Bus options" | 373 | menu "Bus options" |
@@ -354,6 +377,9 @@ config PCI | |||
354 | default y | 377 | default y |
355 | select PCI_DOMAINS | 378 | select PCI_DOMAINS |
356 | select GENERIC_PCI_IOMAP | 379 | select GENERIC_PCI_IOMAP |
380 | select TILE_GXIO_TRIO if TILEGX | ||
381 | select ARCH_SUPPORTS_MSI if TILEGX | ||
382 | select PCI_MSI if TILEGX | ||
357 | ---help--- | 383 | ---help--- |
358 | Enable PCI root complex support, so PCIe endpoint devices can | 384 | Enable PCI root complex support, so PCIe endpoint devices can |
359 | be attached to the Tile chip. Many, but not all, PCI devices | 385 | be attached to the Tile chip. Many, but not all, PCI devices |
@@ -370,6 +396,22 @@ config NO_IOPORT | |||
370 | 396 | ||
371 | source "drivers/pci/Kconfig" | 397 | source "drivers/pci/Kconfig" |
372 | 398 | ||
399 | config TILE_USB | ||
400 | tristate "Tilera USB host adapter support" | ||
401 | default y | ||
402 | depends on USB | ||
403 | depends on TILEGX | ||
404 | select TILE_GXIO_USB_HOST | ||
405 | ---help--- | ||
406 | Provides USB host adapter support for the built-in EHCI and OHCI | ||
407 | interfaces on TILE-Gx chips. | ||
408 | |||
409 | # USB OHCI needs the bounce pool since tilegx will often have more | ||
410 | # than 4GB of memory, but we don't currently use the IOTLB to present | ||
411 | # a 32-bit address to OHCI. So we need to use a bounce pool instead. | ||
412 | config NEED_BOUNCE_POOL | ||
413 | def_bool USB_OHCI_HCD | ||
414 | |||
373 | config HOTPLUG | 415 | config HOTPLUG |
374 | bool "Support for hot-pluggable devices" | 416 | bool "Support for hot-pluggable devices" |
375 | ---help--- | 417 | ---help--- |
diff --git a/arch/tile/Makefile b/arch/tile/Makefile index e20b0a0b64a1..55640cf92597 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile | |||
@@ -59,6 +59,8 @@ libs-y += $(LIBGCC_PATH) | |||
59 | # See arch/tile/Kbuild for content of core part of the kernel | 59 | # See arch/tile/Kbuild for content of core part of the kernel |
60 | core-y += arch/tile/ | 60 | core-y += arch/tile/ |
61 | 61 | ||
62 | core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/ | ||
63 | |||
62 | ifdef TILERA_ROOT | 64 | ifdef TILERA_ROOT |
63 | INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot | 65 | INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot |
64 | endif | 66 | endif |
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig new file mode 100644 index 000000000000..d221f8d6de8b --- /dev/null +++ b/arch/tile/gxio/Kconfig | |||
@@ -0,0 +1,28 @@ | |||
1 | # Support direct access to TILE-Gx hardware from user space, via the | ||
2 | # gxio library, or from kernel space, via kernel IORPC support. | ||
3 | config TILE_GXIO | ||
4 | bool | ||
5 | depends on TILEGX | ||
6 | |||
7 | # Support direct access to the common I/O DMA facility within the | ||
8 | # TILE-Gx mPIPE and Trio hardware from kernel space. | ||
9 | config TILE_GXIO_DMA | ||
10 | bool | ||
11 | select TILE_GXIO | ||
12 | |||
13 | # Support direct access to the TILE-Gx mPIPE hardware from kernel space. | ||
14 | config TILE_GXIO_MPIPE | ||
15 | bool | ||
16 | select TILE_GXIO | ||
17 | select TILE_GXIO_DMA | ||
18 | |||
19 | # Support direct access to the TILE-Gx TRIO hardware from kernel space. | ||
20 | config TILE_GXIO_TRIO | ||
21 | bool | ||
22 | select TILE_GXIO | ||
23 | select TILE_GXIO_DMA | ||
24 | |||
25 | # Support direct access to the TILE-Gx USB hardware from kernel space. | ||
26 | config TILE_GXIO_USB_HOST | ||
27 | bool | ||
28 | select TILE_GXIO | ||
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile new file mode 100644 index 000000000000..8684bcaa74ea --- /dev/null +++ b/arch/tile/gxio/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the Tile-Gx device access support. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o | ||
6 | obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o | ||
7 | obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o | ||
8 | obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o | ||
9 | obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o | ||
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c new file mode 100644 index 000000000000..baa60357f8ba --- /dev/null +++ b/arch/tile/gxio/dma_queue.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/atomic.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <gxio/dma_queue.h> | ||
19 | |||
20 | /* Wait for a memory read to complete. */ | ||
21 | #define wait_for_value(val) \ | ||
22 | __asm__ __volatile__("move %0, %0" :: "r"(val)) | ||
23 | |||
24 | /* The index is in the low 16. */ | ||
25 | #define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1) | ||
26 | |||
27 | /* | ||
28 | * The hardware descriptor-ring type. | ||
29 | * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t) | ||
30 | * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t). | ||
31 | * See those types for more documentation on the individual fields. | ||
32 | */ | ||
33 | typedef union { | ||
34 | struct { | ||
35 | #ifndef __BIG_ENDIAN__ | ||
36 | uint64_t ring_idx:16; | ||
37 | uint64_t count:16; | ||
38 | uint64_t gen:1; | ||
39 | uint64_t __reserved:31; | ||
40 | #else | ||
41 | uint64_t __reserved:31; | ||
42 | uint64_t gen:1; | ||
43 | uint64_t count:16; | ||
44 | uint64_t ring_idx:16; | ||
45 | #endif | ||
46 | }; | ||
47 | uint64_t word; | ||
48 | } __gxio_ring_t; | ||
49 | |||
50 | void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||
51 | void *post_region_addr, unsigned int num_entries) | ||
52 | { | ||
53 | /* | ||
54 | * Limit 65536 entry rings to 65535 credits because we only have a | ||
55 | * 16 bit completion counter. | ||
56 | */ | ||
57 | int64_t credits = (num_entries < 65536) ? num_entries : 65535; | ||
58 | |||
59 | memset(dma_queue, 0, sizeof(*dma_queue)); | ||
60 | |||
61 | dma_queue->post_region_addr = post_region_addr; | ||
62 | dma_queue->hw_complete_count = 0; | ||
63 | dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT; | ||
64 | } | ||
65 | |||
66 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_init); | ||
67 | |||
68 | void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue) | ||
69 | { | ||
70 | __gxio_ring_t val; | ||
71 | uint64_t count; | ||
72 | uint64_t delta; | ||
73 | uint64_t new_count; | ||
74 | |||
75 | /* | ||
76 | * Read the 64-bit completion count without touching the cache, so | ||
77 | * we later avoid having to evict any sharers of this cache line | ||
78 | * when we update it below. | ||
79 | */ | ||
80 | uint64_t orig_hw_complete_count = | ||
81 | cmpxchg(&dma_queue->hw_complete_count, | ||
82 | -1, -1); | ||
83 | |||
84 | /* Make sure the load completes before we access the hardware. */ | ||
85 | wait_for_value(orig_hw_complete_count); | ||
86 | |||
87 | /* Read the 16-bit count of how many packets it has completed. */ | ||
88 | val.word = __gxio_mmio_read(dma_queue->post_region_addr); | ||
89 | count = val.count; | ||
90 | |||
91 | /* | ||
92 | * Calculate the number of completions since we last updated the | ||
93 | * 64-bit counter. It's safe to ignore the high bits because the | ||
94 | * maximum credit value is 65535. | ||
95 | */ | ||
96 | delta = (count - orig_hw_complete_count) & 0xffff; | ||
97 | if (delta == 0) | ||
98 | return; | ||
99 | |||
100 | /* | ||
101 | * Try to write back the count, advanced by delta. If we race with | ||
102 | * another thread, this might fail, in which case we return | ||
103 | * immediately on the assumption that some credits are (or at least | ||
104 | * were) available. | ||
105 | */ | ||
106 | new_count = orig_hw_complete_count + delta; | ||
107 | if (cmpxchg(&dma_queue->hw_complete_count, | ||
108 | orig_hw_complete_count, | ||
109 | new_count) != orig_hw_complete_count) | ||
110 | return; | ||
111 | |||
112 | /* | ||
113 | * We succeeded in advancing the completion count; add back the | ||
114 | * corresponding number of egress credits. | ||
115 | */ | ||
116 | __insn_fetchadd(&dma_queue->credits_and_next_index, | ||
117 | (delta << DMA_QUEUE_CREDIT_SHIFT)); | ||
118 | } | ||
119 | |||
120 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits); | ||
121 | |||
122 | /* | ||
123 | * A separate 'blocked' method for put() so that backtraces and | ||
124 | * profiles will clearly indicate that we're wasting time spinning on | ||
125 | * egress availability rather than actually posting commands. | ||
126 | */ | ||
127 | int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||
128 | int64_t modifier) | ||
129 | { | ||
130 | int backoff = 16; | ||
131 | int64_t old; | ||
132 | |||
133 | do { | ||
134 | int i; | ||
135 | /* Back off to avoid spamming memory networks. */ | ||
136 | for (i = backoff; i > 0; i--) | ||
137 | __insn_mfspr(SPR_PASS); | ||
138 | |||
139 | /* Check credits again. */ | ||
140 | __gxio_dma_queue_update_credits(dma_queue); | ||
141 | old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
142 | modifier); | ||
143 | |||
144 | /* Calculate bounded exponential backoff for next iteration. */ | ||
145 | if (backoff < 256) | ||
146 | backoff *= 2; | ||
147 | } while (old + modifier < 0); | ||
148 | |||
149 | return old; | ||
150 | } | ||
151 | |||
152 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits); | ||
153 | |||
154 | int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||
155 | unsigned int num, int wait) | ||
156 | { | ||
157 | return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true); | ||
158 | } | ||
159 | |||
160 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux); | ||
161 | |||
162 | int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||
163 | int64_t completion_slot, int update) | ||
164 | { | ||
165 | if (update) { | ||
166 | if (ACCESS_ONCE(dma_queue->hw_complete_count) > | ||
167 | completion_slot) | ||
168 | return 1; | ||
169 | |||
170 | __gxio_dma_queue_update_credits(dma_queue); | ||
171 | } | ||
172 | |||
173 | return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot; | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); | ||
diff --git a/arch/tile/gxio/iorpc_globals.c b/arch/tile/gxio/iorpc_globals.c new file mode 100644 index 000000000000..e178e90805a2 --- /dev/null +++ b/arch/tile/gxio/iorpc_globals.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #include "gxio/iorpc_globals.h" | ||
17 | |||
18 | struct arm_pollfd_param { | ||
19 | union iorpc_pollfd pollfd; | ||
20 | }; | ||
21 | |||
22 | int __iorpc_arm_pollfd(int fd, int pollfd_cookie) | ||
23 | { | ||
24 | struct arm_pollfd_param temp; | ||
25 | struct arm_pollfd_param *params = &temp; | ||
26 | |||
27 | params->pollfd.kernel.cookie = pollfd_cookie; | ||
28 | |||
29 | return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
30 | IORPC_OP_ARM_POLLFD); | ||
31 | } | ||
32 | |||
33 | EXPORT_SYMBOL(__iorpc_arm_pollfd); | ||
34 | |||
35 | struct close_pollfd_param { | ||
36 | union iorpc_pollfd pollfd; | ||
37 | }; | ||
38 | |||
39 | int __iorpc_close_pollfd(int fd, int pollfd_cookie) | ||
40 | { | ||
41 | struct close_pollfd_param temp; | ||
42 | struct close_pollfd_param *params = &temp; | ||
43 | |||
44 | params->pollfd.kernel.cookie = pollfd_cookie; | ||
45 | |||
46 | return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
47 | IORPC_OP_CLOSE_POLLFD); | ||
48 | } | ||
49 | |||
50 | EXPORT_SYMBOL(__iorpc_close_pollfd); | ||
51 | |||
52 | struct get_mmio_base_param { | ||
53 | HV_PTE base; | ||
54 | }; | ||
55 | |||
56 | int __iorpc_get_mmio_base(int fd, HV_PTE *base) | ||
57 | { | ||
58 | int __result; | ||
59 | struct get_mmio_base_param temp; | ||
60 | struct get_mmio_base_param *params = &temp; | ||
61 | |||
62 | __result = | ||
63 | hv_dev_pread(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
64 | IORPC_OP_GET_MMIO_BASE); | ||
65 | *base = params->base; | ||
66 | |||
67 | return __result; | ||
68 | } | ||
69 | |||
70 | EXPORT_SYMBOL(__iorpc_get_mmio_base); | ||
71 | |||
72 | struct check_mmio_offset_param { | ||
73 | unsigned long offset; | ||
74 | unsigned long size; | ||
75 | }; | ||
76 | |||
77 | int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size) | ||
78 | { | ||
79 | struct check_mmio_offset_param temp; | ||
80 | struct check_mmio_offset_param *params = &temp; | ||
81 | |||
82 | params->offset = offset; | ||
83 | params->size = size; | ||
84 | |||
85 | return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
86 | IORPC_OP_CHECK_MMIO_OFFSET); | ||
87 | } | ||
88 | |||
89 | EXPORT_SYMBOL(__iorpc_check_mmio_offset); | ||
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c new file mode 100644 index 000000000000..31b87bf8c027 --- /dev/null +++ b/arch/tile/gxio/iorpc_mpipe.c | |||
@@ -0,0 +1,529 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #include "gxio/iorpc_mpipe.h" | ||
17 | |||
18 | struct alloc_buffer_stacks_param { | ||
19 | unsigned int count; | ||
20 | unsigned int first; | ||
21 | unsigned int flags; | ||
22 | }; | ||
23 | |||
24 | int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, | ||
25 | unsigned int count, unsigned int first, | ||
26 | unsigned int flags) | ||
27 | { | ||
28 | struct alloc_buffer_stacks_param temp; | ||
29 | struct alloc_buffer_stacks_param *params = &temp; | ||
30 | |||
31 | params->count = count; | ||
32 | params->first = first; | ||
33 | params->flags = flags; | ||
34 | |||
35 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
36 | sizeof(*params), | ||
37 | GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS); | ||
38 | } | ||
39 | |||
40 | EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks); | ||
41 | |||
42 | struct init_buffer_stack_aux_param { | ||
43 | union iorpc_mem_buffer buffer; | ||
44 | unsigned int stack; | ||
45 | unsigned int buffer_size_enum; | ||
46 | }; | ||
47 | |||
48 | int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, | ||
49 | void *mem_va, size_t mem_size, | ||
50 | unsigned int mem_flags, unsigned int stack, | ||
51 | unsigned int buffer_size_enum) | ||
52 | { | ||
53 | int __result; | ||
54 | unsigned long long __cpa; | ||
55 | pte_t __pte; | ||
56 | struct init_buffer_stack_aux_param temp; | ||
57 | struct init_buffer_stack_aux_param *params = &temp; | ||
58 | |||
59 | __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||
60 | if (__result != 0) | ||
61 | return __result; | ||
62 | params->buffer.kernel.cpa = __cpa; | ||
63 | params->buffer.kernel.size = mem_size; | ||
64 | params->buffer.kernel.pte = __pte; | ||
65 | params->buffer.kernel.flags = mem_flags; | ||
66 | params->stack = stack; | ||
67 | params->buffer_size_enum = buffer_size_enum; | ||
68 | |||
69 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
70 | sizeof(*params), | ||
71 | GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX); | ||
72 | } | ||
73 | |||
74 | EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux); | ||
75 | |||
76 | |||
77 | struct alloc_notif_rings_param { | ||
78 | unsigned int count; | ||
79 | unsigned int first; | ||
80 | unsigned int flags; | ||
81 | }; | ||
82 | |||
83 | int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, | ||
84 | unsigned int count, unsigned int first, | ||
85 | unsigned int flags) | ||
86 | { | ||
87 | struct alloc_notif_rings_param temp; | ||
88 | struct alloc_notif_rings_param *params = &temp; | ||
89 | |||
90 | params->count = count; | ||
91 | params->first = first; | ||
92 | params->flags = flags; | ||
93 | |||
94 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
95 | sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS); | ||
96 | } | ||
97 | |||
98 | EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings); | ||
99 | |||
100 | struct init_notif_ring_aux_param { | ||
101 | union iorpc_mem_buffer buffer; | ||
102 | unsigned int ring; | ||
103 | }; | ||
104 | |||
105 | int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||
106 | size_t mem_size, unsigned int mem_flags, | ||
107 | unsigned int ring) | ||
108 | { | ||
109 | int __result; | ||
110 | unsigned long long __cpa; | ||
111 | pte_t __pte; | ||
112 | struct init_notif_ring_aux_param temp; | ||
113 | struct init_notif_ring_aux_param *params = &temp; | ||
114 | |||
115 | __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||
116 | if (__result != 0) | ||
117 | return __result; | ||
118 | params->buffer.kernel.cpa = __cpa; | ||
119 | params->buffer.kernel.size = mem_size; | ||
120 | params->buffer.kernel.pte = __pte; | ||
121 | params->buffer.kernel.flags = mem_flags; | ||
122 | params->ring = ring; | ||
123 | |||
124 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
125 | sizeof(*params), | ||
126 | GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX); | ||
127 | } | ||
128 | |||
129 | EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux); | ||
130 | |||
131 | struct request_notif_ring_interrupt_param { | ||
132 | union iorpc_interrupt interrupt; | ||
133 | unsigned int ring; | ||
134 | }; | ||
135 | |||
136 | int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||
137 | int inter_x, int inter_y, | ||
138 | int inter_ipi, int inter_event, | ||
139 | unsigned int ring) | ||
140 | { | ||
141 | struct request_notif_ring_interrupt_param temp; | ||
142 | struct request_notif_ring_interrupt_param *params = &temp; | ||
143 | |||
144 | params->interrupt.kernel.x = inter_x; | ||
145 | params->interrupt.kernel.y = inter_y; | ||
146 | params->interrupt.kernel.ipi = inter_ipi; | ||
147 | params->interrupt.kernel.event = inter_event; | ||
148 | params->ring = ring; | ||
149 | |||
150 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
151 | sizeof(*params), | ||
152 | GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT); | ||
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt); | ||
156 | |||
157 | struct enable_notif_ring_interrupt_param { | ||
158 | unsigned int ring; | ||
159 | }; | ||
160 | |||
161 | int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||
162 | unsigned int ring) | ||
163 | { | ||
164 | struct enable_notif_ring_interrupt_param temp; | ||
165 | struct enable_notif_ring_interrupt_param *params = &temp; | ||
166 | |||
167 | params->ring = ring; | ||
168 | |||
169 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
170 | sizeof(*params), | ||
171 | GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT); | ||
172 | } | ||
173 | |||
174 | EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt); | ||
175 | |||
176 | struct alloc_notif_groups_param { | ||
177 | unsigned int count; | ||
178 | unsigned int first; | ||
179 | unsigned int flags; | ||
180 | }; | ||
181 | |||
182 | int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, | ||
183 | unsigned int count, unsigned int first, | ||
184 | unsigned int flags) | ||
185 | { | ||
186 | struct alloc_notif_groups_param temp; | ||
187 | struct alloc_notif_groups_param *params = &temp; | ||
188 | |||
189 | params->count = count; | ||
190 | params->first = first; | ||
191 | params->flags = flags; | ||
192 | |||
193 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
194 | sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS); | ||
195 | } | ||
196 | |||
197 | EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups); | ||
198 | |||
199 | struct init_notif_group_param { | ||
200 | unsigned int group; | ||
201 | gxio_mpipe_notif_group_bits_t bits; | ||
202 | }; | ||
203 | |||
204 | int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, | ||
205 | unsigned int group, | ||
206 | gxio_mpipe_notif_group_bits_t bits) | ||
207 | { | ||
208 | struct init_notif_group_param temp; | ||
209 | struct init_notif_group_param *params = &temp; | ||
210 | |||
211 | params->group = group; | ||
212 | params->bits = bits; | ||
213 | |||
214 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
215 | sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP); | ||
216 | } | ||
217 | |||
218 | EXPORT_SYMBOL(gxio_mpipe_init_notif_group); | ||
219 | |||
220 | struct alloc_buckets_param { | ||
221 | unsigned int count; | ||
222 | unsigned int first; | ||
223 | unsigned int flags; | ||
224 | }; | ||
225 | |||
226 | int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, | ||
227 | unsigned int first, unsigned int flags) | ||
228 | { | ||
229 | struct alloc_buckets_param temp; | ||
230 | struct alloc_buckets_param *params = &temp; | ||
231 | |||
232 | params->count = count; | ||
233 | params->first = first; | ||
234 | params->flags = flags; | ||
235 | |||
236 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
237 | sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS); | ||
238 | } | ||
239 | |||
240 | EXPORT_SYMBOL(gxio_mpipe_alloc_buckets); | ||
241 | |||
242 | struct init_bucket_param { | ||
243 | unsigned int bucket; | ||
244 | MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info; | ||
245 | }; | ||
246 | |||
247 | int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, | ||
248 | MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info) | ||
249 | { | ||
250 | struct init_bucket_param temp; | ||
251 | struct init_bucket_param *params = &temp; | ||
252 | |||
253 | params->bucket = bucket; | ||
254 | params->bucket_info = bucket_info; | ||
255 | |||
256 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
257 | sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET); | ||
258 | } | ||
259 | |||
260 | EXPORT_SYMBOL(gxio_mpipe_init_bucket); | ||
261 | |||
262 | struct alloc_edma_rings_param { | ||
263 | unsigned int count; | ||
264 | unsigned int first; | ||
265 | unsigned int flags; | ||
266 | }; | ||
267 | |||
268 | int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, | ||
269 | unsigned int count, unsigned int first, | ||
270 | unsigned int flags) | ||
271 | { | ||
272 | struct alloc_edma_rings_param temp; | ||
273 | struct alloc_edma_rings_param *params = &temp; | ||
274 | |||
275 | params->count = count; | ||
276 | params->first = first; | ||
277 | params->flags = flags; | ||
278 | |||
279 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
280 | sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS); | ||
281 | } | ||
282 | |||
283 | EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings); | ||
284 | |||
285 | struct init_edma_ring_aux_param { | ||
286 | union iorpc_mem_buffer buffer; | ||
287 | unsigned int ring; | ||
288 | unsigned int channel; | ||
289 | }; | ||
290 | |||
291 | int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||
292 | size_t mem_size, unsigned int mem_flags, | ||
293 | unsigned int ring, unsigned int channel) | ||
294 | { | ||
295 | int __result; | ||
296 | unsigned long long __cpa; | ||
297 | pte_t __pte; | ||
298 | struct init_edma_ring_aux_param temp; | ||
299 | struct init_edma_ring_aux_param *params = &temp; | ||
300 | |||
301 | __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte); | ||
302 | if (__result != 0) | ||
303 | return __result; | ||
304 | params->buffer.kernel.cpa = __cpa; | ||
305 | params->buffer.kernel.size = mem_size; | ||
306 | params->buffer.kernel.pte = __pte; | ||
307 | params->buffer.kernel.flags = mem_flags; | ||
308 | params->ring = ring; | ||
309 | params->channel = channel; | ||
310 | |||
311 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
312 | sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX); | ||
313 | } | ||
314 | |||
315 | EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux); | ||
316 | |||
317 | |||
318 | int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, | ||
319 | size_t blob_size) | ||
320 | { | ||
321 | const void *params = blob; | ||
322 | |||
323 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size, | ||
324 | GXIO_MPIPE_OP_COMMIT_RULES); | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL(gxio_mpipe_commit_rules); | ||
328 | |||
329 | struct register_client_memory_param { | ||
330 | unsigned int iotlb; | ||
331 | HV_PTE pte; | ||
332 | unsigned int flags; | ||
333 | }; | ||
334 | |||
335 | int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, | ||
336 | unsigned int iotlb, HV_PTE pte, | ||
337 | unsigned int flags) | ||
338 | { | ||
339 | struct register_client_memory_param temp; | ||
340 | struct register_client_memory_param *params = &temp; | ||
341 | |||
342 | params->iotlb = iotlb; | ||
343 | params->pte = pte; | ||
344 | params->flags = flags; | ||
345 | |||
346 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
347 | sizeof(*params), | ||
348 | GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY); | ||
349 | } | ||
350 | |||
351 | EXPORT_SYMBOL(gxio_mpipe_register_client_memory); | ||
352 | |||
353 | struct link_open_aux_param { | ||
354 | _gxio_mpipe_link_name_t name; | ||
355 | unsigned int flags; | ||
356 | }; | ||
357 | |||
358 | int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, | ||
359 | _gxio_mpipe_link_name_t name, unsigned int flags) | ||
360 | { | ||
361 | struct link_open_aux_param temp; | ||
362 | struct link_open_aux_param *params = &temp; | ||
363 | |||
364 | params->name = name; | ||
365 | params->flags = flags; | ||
366 | |||
367 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
368 | sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX); | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL(gxio_mpipe_link_open_aux); | ||
372 | |||
373 | struct link_close_aux_param { | ||
374 | int mac; | ||
375 | }; | ||
376 | |||
377 | int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) | ||
378 | { | ||
379 | struct link_close_aux_param temp; | ||
380 | struct link_close_aux_param *params = &temp; | ||
381 | |||
382 | params->mac = mac; | ||
383 | |||
384 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
385 | sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX); | ||
386 | } | ||
387 | |||
388 | EXPORT_SYMBOL(gxio_mpipe_link_close_aux); | ||
389 | |||
390 | |||
391 | struct get_timestamp_aux_param { | ||
392 | uint64_t sec; | ||
393 | uint64_t nsec; | ||
394 | uint64_t cycles; | ||
395 | }; | ||
396 | |||
397 | int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, | ||
398 | uint64_t * nsec, uint64_t * cycles) | ||
399 | { | ||
400 | int __result; | ||
401 | struct get_timestamp_aux_param temp; | ||
402 | struct get_timestamp_aux_param *params = &temp; | ||
403 | |||
404 | __result = | ||
405 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
406 | GXIO_MPIPE_OP_GET_TIMESTAMP_AUX); | ||
407 | *sec = params->sec; | ||
408 | *nsec = params->nsec; | ||
409 | *cycles = params->cycles; | ||
410 | |||
411 | return __result; | ||
412 | } | ||
413 | |||
414 | EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux); | ||
415 | |||
416 | struct set_timestamp_aux_param { | ||
417 | uint64_t sec; | ||
418 | uint64_t nsec; | ||
419 | uint64_t cycles; | ||
420 | }; | ||
421 | |||
422 | int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, | ||
423 | uint64_t nsec, uint64_t cycles) | ||
424 | { | ||
425 | struct set_timestamp_aux_param temp; | ||
426 | struct set_timestamp_aux_param *params = &temp; | ||
427 | |||
428 | params->sec = sec; | ||
429 | params->nsec = nsec; | ||
430 | params->cycles = cycles; | ||
431 | |||
432 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
433 | sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX); | ||
434 | } | ||
435 | |||
436 | EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux); | ||
437 | |||
438 | struct adjust_timestamp_aux_param { | ||
439 | int64_t nsec; | ||
440 | }; | ||
441 | |||
442 | int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, | ||
443 | int64_t nsec) | ||
444 | { | ||
445 | struct adjust_timestamp_aux_param temp; | ||
446 | struct adjust_timestamp_aux_param *params = &temp; | ||
447 | |||
448 | params->nsec = nsec; | ||
449 | |||
450 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
451 | sizeof(*params), | ||
452 | GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX); | ||
453 | } | ||
454 | |||
455 | EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); | ||
456 | |||
457 | struct arm_pollfd_param { | ||
458 | union iorpc_pollfd pollfd; | ||
459 | }; | ||
460 | |||
461 | int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) | ||
462 | { | ||
463 | struct arm_pollfd_param temp; | ||
464 | struct arm_pollfd_param *params = &temp; | ||
465 | |||
466 | params->pollfd.kernel.cookie = pollfd_cookie; | ||
467 | |||
468 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
469 | sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD); | ||
470 | } | ||
471 | |||
472 | EXPORT_SYMBOL(gxio_mpipe_arm_pollfd); | ||
473 | |||
474 | struct close_pollfd_param { | ||
475 | union iorpc_pollfd pollfd; | ||
476 | }; | ||
477 | |||
478 | int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) | ||
479 | { | ||
480 | struct close_pollfd_param temp; | ||
481 | struct close_pollfd_param *params = &temp; | ||
482 | |||
483 | params->pollfd.kernel.cookie = pollfd_cookie; | ||
484 | |||
485 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
486 | sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD); | ||
487 | } | ||
488 | |||
489 | EXPORT_SYMBOL(gxio_mpipe_close_pollfd); | ||
490 | |||
491 | struct get_mmio_base_param { | ||
492 | HV_PTE base; | ||
493 | }; | ||
494 | |||
495 | int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base) | ||
496 | { | ||
497 | int __result; | ||
498 | struct get_mmio_base_param temp; | ||
499 | struct get_mmio_base_param *params = &temp; | ||
500 | |||
501 | __result = | ||
502 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
503 | GXIO_MPIPE_OP_GET_MMIO_BASE); | ||
504 | *base = params->base; | ||
505 | |||
506 | return __result; | ||
507 | } | ||
508 | |||
509 | EXPORT_SYMBOL(gxio_mpipe_get_mmio_base); | ||
510 | |||
511 | struct check_mmio_offset_param { | ||
512 | unsigned long offset; | ||
513 | unsigned long size; | ||
514 | }; | ||
515 | |||
516 | int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, | ||
517 | unsigned long offset, unsigned long size) | ||
518 | { | ||
519 | struct check_mmio_offset_param temp; | ||
520 | struct check_mmio_offset_param *params = &temp; | ||
521 | |||
522 | params->offset = offset; | ||
523 | params->size = size; | ||
524 | |||
525 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
526 | sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET); | ||
527 | } | ||
528 | |||
529 | EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset); | ||
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c new file mode 100644 index 000000000000..d0254aa60cba --- /dev/null +++ b/arch/tile/gxio/iorpc_mpipe_info.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #include "gxio/iorpc_mpipe_info.h" | ||
17 | |||
18 | |||
19 | struct enumerate_aux_param { | ||
20 | _gxio_mpipe_link_name_t name; | ||
21 | _gxio_mpipe_link_mac_t mac; | ||
22 | }; | ||
23 | |||
24 | int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, | ||
25 | unsigned int idx, | ||
26 | _gxio_mpipe_link_name_t * name, | ||
27 | _gxio_mpipe_link_mac_t * mac) | ||
28 | { | ||
29 | int __result; | ||
30 | struct enumerate_aux_param temp; | ||
31 | struct enumerate_aux_param *params = &temp; | ||
32 | |||
33 | __result = | ||
34 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
35 | (((uint64_t) idx << 32) | | ||
36 | GXIO_MPIPE_INFO_OP_ENUMERATE_AUX)); | ||
37 | *name = params->name; | ||
38 | *mac = params->mac; | ||
39 | |||
40 | return __result; | ||
41 | } | ||
42 | |||
43 | EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux); | ||
44 | |||
45 | struct get_mmio_base_param { | ||
46 | HV_PTE base; | ||
47 | }; | ||
48 | |||
49 | int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, | ||
50 | HV_PTE *base) | ||
51 | { | ||
52 | int __result; | ||
53 | struct get_mmio_base_param temp; | ||
54 | struct get_mmio_base_param *params = &temp; | ||
55 | |||
56 | __result = | ||
57 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
58 | GXIO_MPIPE_INFO_OP_GET_MMIO_BASE); | ||
59 | *base = params->base; | ||
60 | |||
61 | return __result; | ||
62 | } | ||
63 | |||
64 | EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base); | ||
65 | |||
66 | struct check_mmio_offset_param { | ||
67 | unsigned long offset; | ||
68 | unsigned long size; | ||
69 | }; | ||
70 | |||
71 | int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, | ||
72 | unsigned long offset, unsigned long size) | ||
73 | { | ||
74 | struct check_mmio_offset_param temp; | ||
75 | struct check_mmio_offset_param *params = &temp; | ||
76 | |||
77 | params->offset = offset; | ||
78 | params->size = size; | ||
79 | |||
80 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
81 | sizeof(*params), | ||
82 | GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET); | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset); | ||
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c new file mode 100644 index 000000000000..cef4b2209cda --- /dev/null +++ b/arch/tile/gxio/iorpc_trio.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #include "gxio/iorpc_trio.h" | ||
17 | |||
18 | struct alloc_asids_param { | ||
19 | unsigned int count; | ||
20 | unsigned int first; | ||
21 | unsigned int flags; | ||
22 | }; | ||
23 | |||
24 | int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, | ||
25 | unsigned int first, unsigned int flags) | ||
26 | { | ||
27 | struct alloc_asids_param temp; | ||
28 | struct alloc_asids_param *params = &temp; | ||
29 | |||
30 | params->count = count; | ||
31 | params->first = first; | ||
32 | params->flags = flags; | ||
33 | |||
34 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
35 | sizeof(*params), GXIO_TRIO_OP_ALLOC_ASIDS); | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(gxio_trio_alloc_asids); | ||
39 | |||
40 | |||
41 | struct alloc_memory_maps_param { | ||
42 | unsigned int count; | ||
43 | unsigned int first; | ||
44 | unsigned int flags; | ||
45 | }; | ||
46 | |||
47 | int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, | ||
48 | unsigned int count, unsigned int first, | ||
49 | unsigned int flags) | ||
50 | { | ||
51 | struct alloc_memory_maps_param temp; | ||
52 | struct alloc_memory_maps_param *params = &temp; | ||
53 | |||
54 | params->count = count; | ||
55 | params->first = first; | ||
56 | params->flags = flags; | ||
57 | |||
58 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
59 | sizeof(*params), GXIO_TRIO_OP_ALLOC_MEMORY_MAPS); | ||
60 | } | ||
61 | |||
62 | EXPORT_SYMBOL(gxio_trio_alloc_memory_maps); | ||
63 | |||
64 | |||
65 | struct alloc_pio_regions_param { | ||
66 | unsigned int count; | ||
67 | unsigned int first; | ||
68 | unsigned int flags; | ||
69 | }; | ||
70 | |||
71 | int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, | ||
72 | unsigned int count, unsigned int first, | ||
73 | unsigned int flags) | ||
74 | { | ||
75 | struct alloc_pio_regions_param temp; | ||
76 | struct alloc_pio_regions_param *params = &temp; | ||
77 | |||
78 | params->count = count; | ||
79 | params->first = first; | ||
80 | params->flags = flags; | ||
81 | |||
82 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
83 | sizeof(*params), GXIO_TRIO_OP_ALLOC_PIO_REGIONS); | ||
84 | } | ||
85 | |||
86 | EXPORT_SYMBOL(gxio_trio_alloc_pio_regions); | ||
87 | |||
88 | struct init_pio_region_aux_param { | ||
89 | unsigned int pio_region; | ||
90 | unsigned int mac; | ||
91 | uint32_t bus_address_hi; | ||
92 | unsigned int flags; | ||
93 | }; | ||
94 | |||
95 | int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, | ||
96 | unsigned int pio_region, unsigned int mac, | ||
97 | uint32_t bus_address_hi, unsigned int flags) | ||
98 | { | ||
99 | struct init_pio_region_aux_param temp; | ||
100 | struct init_pio_region_aux_param *params = &temp; | ||
101 | |||
102 | params->pio_region = pio_region; | ||
103 | params->mac = mac; | ||
104 | params->bus_address_hi = bus_address_hi; | ||
105 | params->flags = flags; | ||
106 | |||
107 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
108 | sizeof(*params), GXIO_TRIO_OP_INIT_PIO_REGION_AUX); | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL(gxio_trio_init_pio_region_aux); | ||
112 | |||
113 | |||
114 | struct init_memory_map_mmu_aux_param { | ||
115 | unsigned int map; | ||
116 | unsigned long va; | ||
117 | uint64_t size; | ||
118 | unsigned int asid; | ||
119 | unsigned int mac; | ||
120 | uint64_t bus_address; | ||
121 | unsigned int node; | ||
122 | unsigned int order_mode; | ||
123 | }; | ||
124 | |||
125 | int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, | ||
126 | unsigned int map, unsigned long va, | ||
127 | uint64_t size, unsigned int asid, | ||
128 | unsigned int mac, uint64_t bus_address, | ||
129 | unsigned int node, | ||
130 | unsigned int order_mode) | ||
131 | { | ||
132 | struct init_memory_map_mmu_aux_param temp; | ||
133 | struct init_memory_map_mmu_aux_param *params = &temp; | ||
134 | |||
135 | params->map = map; | ||
136 | params->va = va; | ||
137 | params->size = size; | ||
138 | params->asid = asid; | ||
139 | params->mac = mac; | ||
140 | params->bus_address = bus_address; | ||
141 | params->node = node; | ||
142 | params->order_mode = order_mode; | ||
143 | |||
144 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
145 | sizeof(*params), | ||
146 | GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX); | ||
147 | } | ||
148 | |||
149 | EXPORT_SYMBOL(gxio_trio_init_memory_map_mmu_aux); | ||
150 | |||
151 | struct get_port_property_param { | ||
152 | struct pcie_trio_ports_property trio_ports; | ||
153 | }; | ||
154 | |||
155 | int gxio_trio_get_port_property(gxio_trio_context_t * context, | ||
156 | struct pcie_trio_ports_property *trio_ports) | ||
157 | { | ||
158 | int __result; | ||
159 | struct get_port_property_param temp; | ||
160 | struct get_port_property_param *params = &temp; | ||
161 | |||
162 | __result = | ||
163 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
164 | GXIO_TRIO_OP_GET_PORT_PROPERTY); | ||
165 | *trio_ports = params->trio_ports; | ||
166 | |||
167 | return __result; | ||
168 | } | ||
169 | |||
170 | EXPORT_SYMBOL(gxio_trio_get_port_property); | ||
171 | |||
172 | struct config_legacy_intr_param { | ||
173 | union iorpc_interrupt interrupt; | ||
174 | unsigned int mac; | ||
175 | unsigned int intx; | ||
176 | }; | ||
177 | |||
178 | int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, | ||
179 | int inter_y, int inter_ipi, int inter_event, | ||
180 | unsigned int mac, unsigned int intx) | ||
181 | { | ||
182 | struct config_legacy_intr_param temp; | ||
183 | struct config_legacy_intr_param *params = &temp; | ||
184 | |||
185 | params->interrupt.kernel.x = inter_x; | ||
186 | params->interrupt.kernel.y = inter_y; | ||
187 | params->interrupt.kernel.ipi = inter_ipi; | ||
188 | params->interrupt.kernel.event = inter_event; | ||
189 | params->mac = mac; | ||
190 | params->intx = intx; | ||
191 | |||
192 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
193 | sizeof(*params), GXIO_TRIO_OP_CONFIG_LEGACY_INTR); | ||
194 | } | ||
195 | |||
196 | EXPORT_SYMBOL(gxio_trio_config_legacy_intr); | ||
197 | |||
198 | struct config_msi_intr_param { | ||
199 | union iorpc_interrupt interrupt; | ||
200 | unsigned int mac; | ||
201 | unsigned int mem_map; | ||
202 | uint64_t mem_map_base; | ||
203 | uint64_t mem_map_limit; | ||
204 | unsigned int asid; | ||
205 | }; | ||
206 | |||
207 | int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, | ||
208 | int inter_y, int inter_ipi, int inter_event, | ||
209 | unsigned int mac, unsigned int mem_map, | ||
210 | uint64_t mem_map_base, uint64_t mem_map_limit, | ||
211 | unsigned int asid) | ||
212 | { | ||
213 | struct config_msi_intr_param temp; | ||
214 | struct config_msi_intr_param *params = &temp; | ||
215 | |||
216 | params->interrupt.kernel.x = inter_x; | ||
217 | params->interrupt.kernel.y = inter_y; | ||
218 | params->interrupt.kernel.ipi = inter_ipi; | ||
219 | params->interrupt.kernel.event = inter_event; | ||
220 | params->mac = mac; | ||
221 | params->mem_map = mem_map; | ||
222 | params->mem_map_base = mem_map_base; | ||
223 | params->mem_map_limit = mem_map_limit; | ||
224 | params->asid = asid; | ||
225 | |||
226 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
227 | sizeof(*params), GXIO_TRIO_OP_CONFIG_MSI_INTR); | ||
228 | } | ||
229 | |||
230 | EXPORT_SYMBOL(gxio_trio_config_msi_intr); | ||
231 | |||
232 | |||
233 | struct set_mps_mrs_param { | ||
234 | uint16_t mps; | ||
235 | uint16_t mrs; | ||
236 | unsigned int mac; | ||
237 | }; | ||
238 | |||
239 | int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, | ||
240 | uint16_t mrs, unsigned int mac) | ||
241 | { | ||
242 | struct set_mps_mrs_param temp; | ||
243 | struct set_mps_mrs_param *params = &temp; | ||
244 | |||
245 | params->mps = mps; | ||
246 | params->mrs = mrs; | ||
247 | params->mac = mac; | ||
248 | |||
249 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
250 | sizeof(*params), GXIO_TRIO_OP_SET_MPS_MRS); | ||
251 | } | ||
252 | |||
253 | EXPORT_SYMBOL(gxio_trio_set_mps_mrs); | ||
254 | |||
255 | struct force_rc_link_up_param { | ||
256 | unsigned int mac; | ||
257 | }; | ||
258 | |||
259 | int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac) | ||
260 | { | ||
261 | struct force_rc_link_up_param temp; | ||
262 | struct force_rc_link_up_param *params = &temp; | ||
263 | |||
264 | params->mac = mac; | ||
265 | |||
266 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
267 | sizeof(*params), GXIO_TRIO_OP_FORCE_RC_LINK_UP); | ||
268 | } | ||
269 | |||
270 | EXPORT_SYMBOL(gxio_trio_force_rc_link_up); | ||
271 | |||
272 | struct force_ep_link_up_param { | ||
273 | unsigned int mac; | ||
274 | }; | ||
275 | |||
276 | int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac) | ||
277 | { | ||
278 | struct force_ep_link_up_param temp; | ||
279 | struct force_ep_link_up_param *params = &temp; | ||
280 | |||
281 | params->mac = mac; | ||
282 | |||
283 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
284 | sizeof(*params), GXIO_TRIO_OP_FORCE_EP_LINK_UP); | ||
285 | } | ||
286 | |||
287 | EXPORT_SYMBOL(gxio_trio_force_ep_link_up); | ||
288 | |||
289 | struct get_mmio_base_param { | ||
290 | HV_PTE base; | ||
291 | }; | ||
292 | |||
293 | int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base) | ||
294 | { | ||
295 | int __result; | ||
296 | struct get_mmio_base_param temp; | ||
297 | struct get_mmio_base_param *params = &temp; | ||
298 | |||
299 | __result = | ||
300 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
301 | GXIO_TRIO_OP_GET_MMIO_BASE); | ||
302 | *base = params->base; | ||
303 | |||
304 | return __result; | ||
305 | } | ||
306 | |||
307 | EXPORT_SYMBOL(gxio_trio_get_mmio_base); | ||
308 | |||
309 | struct check_mmio_offset_param { | ||
310 | unsigned long offset; | ||
311 | unsigned long size; | ||
312 | }; | ||
313 | |||
314 | int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, | ||
315 | unsigned long offset, unsigned long size) | ||
316 | { | ||
317 | struct check_mmio_offset_param temp; | ||
318 | struct check_mmio_offset_param *params = &temp; | ||
319 | |||
320 | params->offset = offset; | ||
321 | params->size = size; | ||
322 | |||
323 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
324 | sizeof(*params), GXIO_TRIO_OP_CHECK_MMIO_OFFSET); | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL(gxio_trio_check_mmio_offset); | ||
diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c new file mode 100644 index 000000000000..cf3c3cc12204 --- /dev/null +++ b/arch/tile/gxio/iorpc_usb_host.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #include "gxio/iorpc_usb_host.h" | ||
17 | |||
18 | struct cfg_interrupt_param { | ||
19 | union iorpc_interrupt interrupt; | ||
20 | }; | ||
21 | |||
22 | int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, | ||
23 | int inter_y, int inter_ipi, int inter_event) | ||
24 | { | ||
25 | struct cfg_interrupt_param temp; | ||
26 | struct cfg_interrupt_param *params = &temp; | ||
27 | |||
28 | params->interrupt.kernel.x = inter_x; | ||
29 | params->interrupt.kernel.y = inter_y; | ||
30 | params->interrupt.kernel.ipi = inter_ipi; | ||
31 | params->interrupt.kernel.event = inter_event; | ||
32 | |||
33 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
34 | sizeof(*params), GXIO_USB_HOST_OP_CFG_INTERRUPT); | ||
35 | } | ||
36 | |||
37 | EXPORT_SYMBOL(gxio_usb_host_cfg_interrupt); | ||
38 | |||
39 | struct register_client_memory_param { | ||
40 | HV_PTE pte; | ||
41 | unsigned int flags; | ||
42 | }; | ||
43 | |||
44 | int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, | ||
45 | HV_PTE pte, unsigned int flags) | ||
46 | { | ||
47 | struct register_client_memory_param temp; | ||
48 | struct register_client_memory_param *params = &temp; | ||
49 | |||
50 | params->pte = pte; | ||
51 | params->flags = flags; | ||
52 | |||
53 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
54 | sizeof(*params), | ||
55 | GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY); | ||
56 | } | ||
57 | |||
58 | EXPORT_SYMBOL(gxio_usb_host_register_client_memory); | ||
59 | |||
60 | struct get_mmio_base_param { | ||
61 | HV_PTE base; | ||
62 | }; | ||
63 | |||
64 | int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base) | ||
65 | { | ||
66 | int __result; | ||
67 | struct get_mmio_base_param temp; | ||
68 | struct get_mmio_base_param *params = &temp; | ||
69 | |||
70 | __result = | ||
71 | hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), | ||
72 | GXIO_USB_HOST_OP_GET_MMIO_BASE); | ||
73 | *base = params->base; | ||
74 | |||
75 | return __result; | ||
76 | } | ||
77 | |||
78 | EXPORT_SYMBOL(gxio_usb_host_get_mmio_base); | ||
79 | |||
80 | struct check_mmio_offset_param { | ||
81 | unsigned long offset; | ||
82 | unsigned long size; | ||
83 | }; | ||
84 | |||
85 | int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, | ||
86 | unsigned long offset, unsigned long size) | ||
87 | { | ||
88 | struct check_mmio_offset_param temp; | ||
89 | struct check_mmio_offset_param *params = &temp; | ||
90 | |||
91 | params->offset = offset; | ||
92 | params->size = size; | ||
93 | |||
94 | return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, | ||
95 | sizeof(*params), | ||
96 | GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET); | ||
97 | } | ||
98 | |||
99 | EXPORT_SYMBOL(gxio_usb_host_check_mmio_offset); | ||
diff --git a/arch/tile/gxio/kiorpc.c b/arch/tile/gxio/kiorpc.c new file mode 100644 index 000000000000..c8096aa5a3fc --- /dev/null +++ b/arch/tile/gxio/kiorpc.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE-Gx IORPC support for kernel I/O drivers. | ||
15 | */ | ||
16 | |||
17 | #include <linux/mmzone.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <gxio/iorpc_globals.h> | ||
21 | #include <gxio/kiorpc.h> | ||
22 | |||
23 | #ifdef DEBUG_IORPC | ||
24 | #define TRACE(FMT, ...) pr_info(SIMPLE_MSG_LINE FMT, ## __VA_ARGS__) | ||
25 | #else | ||
26 | #define TRACE(...) | ||
27 | #endif | ||
28 | |||
29 | /* Create kernel-VA-space MMIO mapping for an on-chip IO device. */ | ||
30 | void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset, | ||
31 | unsigned long size) | ||
32 | { | ||
33 | pgprot_t mmio_base, prot = { 0 }; | ||
34 | unsigned long pfn; | ||
35 | int err; | ||
36 | |||
37 | /* Look up the shim's lotar and base PA. */ | ||
38 | err = __iorpc_get_mmio_base(hv_fd, &mmio_base); | ||
39 | if (err) { | ||
40 | TRACE("get_mmio_base() failure: %d\n", err); | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | /* Make sure the HV driver approves of our offset and size. */ | ||
45 | err = __iorpc_check_mmio_offset(hv_fd, offset, size); | ||
46 | if (err) { | ||
47 | TRACE("check_mmio_offset() failure: %d\n", err); | ||
48 | return NULL; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * mmio_base contains a base pfn and homing coordinates. Turn | ||
53 | * it into an MMIO pgprot and offset pfn. | ||
54 | */ | ||
55 | prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base)); | ||
56 | pfn = pte_pfn(mmio_base) + PFN_DOWN(offset); | ||
57 | |||
58 | return ioremap_prot(PFN_PHYS(pfn), size, prot); | ||
59 | } | ||
60 | |||
61 | EXPORT_SYMBOL(iorpc_ioremap); | ||
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c new file mode 100644 index 000000000000..e71c63390acc --- /dev/null +++ b/arch/tile/gxio/mpipe.c | |||
@@ -0,0 +1,545 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * Implementation of mpipe gxio calls. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/module.h> | ||
22 | |||
23 | #include <gxio/iorpc_globals.h> | ||
24 | #include <gxio/iorpc_mpipe.h> | ||
25 | #include <gxio/iorpc_mpipe_info.h> | ||
26 | #include <gxio/kiorpc.h> | ||
27 | #include <gxio/mpipe.h> | ||
28 | |||
29 | /* HACK: Avoid pointless "shadow" warnings. */ | ||
30 | #define link link_shadow | ||
31 | |||
32 | int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) | ||
33 | { | ||
34 | char file[32]; | ||
35 | |||
36 | int fd; | ||
37 | int i; | ||
38 | |||
39 | snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); | ||
40 | fd = hv_dev_open((HV_VirtAddr) file, 0); | ||
41 | if (fd < 0) { | ||
42 | if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||
43 | return fd; | ||
44 | else | ||
45 | return -ENODEV; | ||
46 | } | ||
47 | |||
48 | context->fd = fd; | ||
49 | |||
50 | /* Map in the MMIO space. */ | ||
51 | context->mmio_cfg_base = (void __force *) | ||
52 | iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, | ||
53 | HV_MPIPE_CONFIG_MMIO_SIZE); | ||
54 | if (context->mmio_cfg_base == NULL) | ||
55 | goto cfg_failed; | ||
56 | |||
57 | context->mmio_fast_base = (void __force *) | ||
58 | iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET, | ||
59 | HV_MPIPE_FAST_MMIO_SIZE); | ||
60 | if (context->mmio_fast_base == NULL) | ||
61 | goto fast_failed; | ||
62 | |||
63 | /* Initialize the stacks. */ | ||
64 | for (i = 0; i < 8; i++) | ||
65 | context->__stacks.stacks[i] = 255; | ||
66 | |||
67 | return 0; | ||
68 | |||
69 | fast_failed: | ||
70 | iounmap((void __force __iomem *)(context->mmio_cfg_base)); | ||
71 | cfg_failed: | ||
72 | hv_dev_close(context->fd); | ||
73 | return -ENODEV; | ||
74 | } | ||
75 | |||
76 | EXPORT_SYMBOL_GPL(gxio_mpipe_init); | ||
77 | |||
78 | int gxio_mpipe_destroy(gxio_mpipe_context_t *context) | ||
79 | { | ||
80 | iounmap((void __force __iomem *)(context->mmio_cfg_base)); | ||
81 | iounmap((void __force __iomem *)(context->mmio_fast_base)); | ||
82 | return hv_dev_close(context->fd); | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL_GPL(gxio_mpipe_destroy); | ||
86 | |||
87 | static int16_t gxio_mpipe_buffer_sizes[8] = | ||
88 | { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 }; | ||
89 | |||
90 | gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t | ||
91 | size) | ||
92 | { | ||
93 | int i; | ||
94 | for (i = 0; i < 7; i++) | ||
95 | if (size <= gxio_mpipe_buffer_sizes[i]) | ||
96 | break; | ||
97 | return i; | ||
98 | } | ||
99 | |||
100 | EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum); | ||
101 | |||
102 | size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t | ||
103 | buffer_size_enum) | ||
104 | { | ||
105 | if (buffer_size_enum > 7) | ||
106 | buffer_size_enum = 7; | ||
107 | |||
108 | return gxio_mpipe_buffer_sizes[buffer_size_enum]; | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size); | ||
112 | |||
113 | size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers) | ||
114 | { | ||
115 | const int BUFFERS_PER_LINE = 12; | ||
116 | |||
117 | /* Count the number of cachlines. */ | ||
118 | unsigned long lines = | ||
119 | (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE; | ||
120 | |||
121 | /* Convert to bytes. */ | ||
122 | return lines * CHIP_L2_LINE_SIZE(); | ||
123 | } | ||
124 | |||
125 | EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes); | ||
126 | |||
127 | int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, | ||
128 | unsigned int stack, | ||
129 | gxio_mpipe_buffer_size_enum_t | ||
130 | buffer_size_enum, void *mem, size_t mem_size, | ||
131 | unsigned int mem_flags) | ||
132 | { | ||
133 | int result; | ||
134 | |||
135 | memset(mem, 0, mem_size); | ||
136 | |||
137 | result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size, | ||
138 | mem_flags, stack, | ||
139 | buffer_size_enum); | ||
140 | if (result < 0) | ||
141 | return result; | ||
142 | |||
143 | /* Save the stack. */ | ||
144 | context->__stacks.stacks[buffer_size_enum] = stack; | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack); | ||
150 | |||
151 | int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, | ||
152 | unsigned int ring, | ||
153 | void *mem, size_t mem_size, | ||
154 | unsigned int mem_flags) | ||
155 | { | ||
156 | return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size, | ||
157 | mem_flags, ring); | ||
158 | } | ||
159 | |||
160 | EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring); | ||
161 | |||
162 | int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context, | ||
163 | unsigned int group, | ||
164 | unsigned int ring, | ||
165 | unsigned int num_rings, | ||
166 | unsigned int bucket, | ||
167 | unsigned int num_buckets, | ||
168 | gxio_mpipe_bucket_mode_t mode) | ||
169 | { | ||
170 | int i; | ||
171 | int result; | ||
172 | |||
173 | gxio_mpipe_bucket_info_t bucket_info = { { | ||
174 | .group = group, | ||
175 | .mode = mode, | ||
176 | } | ||
177 | }; | ||
178 | |||
179 | gxio_mpipe_notif_group_bits_t bits = { {0} }; | ||
180 | |||
181 | for (i = 0; i < num_rings; i++) | ||
182 | gxio_mpipe_notif_group_add_ring(&bits, ring + i); | ||
183 | |||
184 | result = gxio_mpipe_init_notif_group(context, group, bits); | ||
185 | if (result != 0) | ||
186 | return result; | ||
187 | |||
188 | for (i = 0; i < num_buckets; i++) { | ||
189 | bucket_info.notifring = ring + (i % num_rings); | ||
190 | |||
191 | result = gxio_mpipe_init_bucket(context, bucket + i, | ||
192 | bucket_info); | ||
193 | if (result != 0) | ||
194 | return result; | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets); | ||
201 | |||
202 | int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, | ||
203 | unsigned int ring, unsigned int channel, | ||
204 | void *mem, size_t mem_size, | ||
205 | unsigned int mem_flags) | ||
206 | { | ||
207 | memset(mem, 0, mem_size); | ||
208 | |||
209 | return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags, | ||
210 | ring, channel); | ||
211 | } | ||
212 | |||
213 | EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring); | ||
214 | |||
215 | void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules, | ||
216 | gxio_mpipe_context_t *context) | ||
217 | { | ||
218 | rules->context = context; | ||
219 | memset(&rules->list, 0, sizeof(rules->list)); | ||
220 | } | ||
221 | |||
222 | EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init); | ||
223 | |||
224 | int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules, | ||
225 | unsigned int bucket, unsigned int num_buckets, | ||
226 | gxio_mpipe_rules_stacks_t *stacks) | ||
227 | { | ||
228 | int i; | ||
229 | int stack = 255; | ||
230 | |||
231 | gxio_mpipe_rules_list_t *list = &rules->list; | ||
232 | |||
233 | /* Current rule. */ | ||
234 | gxio_mpipe_rules_rule_t *rule = | ||
235 | (gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||
236 | |||
237 | unsigned int head = list->tail; | ||
238 | |||
239 | /* | ||
240 | * Align next rule properly. | ||
241 | *Note that "dmacs_and_vlans" will also be aligned. | ||
242 | */ | ||
243 | unsigned int pad = 0; | ||
244 | while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0) | ||
245 | pad++; | ||
246 | |||
247 | /* | ||
248 | * Verify room. | ||
249 | * ISSUE: Mark rules as broken on error? | ||
250 | */ | ||
251 | if (head + pad + sizeof(*rule) >= sizeof(list->rules)) | ||
252 | return GXIO_MPIPE_ERR_RULES_FULL; | ||
253 | |||
254 | /* Verify num_buckets is a power of 2. */ | ||
255 | if (__builtin_popcount(num_buckets) != 1) | ||
256 | return GXIO_MPIPE_ERR_RULES_INVALID; | ||
257 | |||
258 | /* Add padding to previous rule. */ | ||
259 | rule->size += pad; | ||
260 | |||
261 | /* Start a new rule. */ | ||
262 | list->head = head + pad; | ||
263 | |||
264 | rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||
265 | |||
266 | /* Default some values. */ | ||
267 | rule->headroom = 2; | ||
268 | rule->tailroom = 0; | ||
269 | rule->capacity = 16384; | ||
270 | |||
271 | /* Save the bucket info. */ | ||
272 | rule->bucket_mask = num_buckets - 1; | ||
273 | rule->bucket_first = bucket; | ||
274 | |||
275 | for (i = 8 - 1; i >= 0; i--) { | ||
276 | int maybe = | ||
277 | stacks ? stacks->stacks[i] : rules->context->__stacks. | ||
278 | stacks[i]; | ||
279 | if (maybe != 255) | ||
280 | stack = maybe; | ||
281 | rule->stacks.stacks[i] = stack; | ||
282 | } | ||
283 | |||
284 | if (stack == 255) | ||
285 | return GXIO_MPIPE_ERR_RULES_INVALID; | ||
286 | |||
287 | /* NOTE: Only entries at the end of the array can be 255. */ | ||
288 | for (i = 8 - 1; i > 0; i--) { | ||
289 | if (rule->stacks.stacks[i] == 255) { | ||
290 | rule->stacks.stacks[i] = stack; | ||
291 | rule->capacity = | ||
292 | gxio_mpipe_buffer_size_enum_to_buffer_size(i - | ||
293 | 1); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | rule->size = sizeof(*rule); | ||
298 | list->tail = list->head + rule->size; | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin); | ||
304 | |||
305 | int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules, | ||
306 | unsigned int channel) | ||
307 | { | ||
308 | gxio_mpipe_rules_list_t *list = &rules->list; | ||
309 | |||
310 | gxio_mpipe_rules_rule_t *rule = | ||
311 | (gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||
312 | |||
313 | /* Verify channel. */ | ||
314 | if (channel >= 32) | ||
315 | return GXIO_MPIPE_ERR_RULES_INVALID; | ||
316 | |||
317 | /* Verify begun. */ | ||
318 | if (list->tail == 0) | ||
319 | return GXIO_MPIPE_ERR_RULES_EMPTY; | ||
320 | |||
321 | rule->channel_bits |= (1UL << channel); | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel); | ||
327 | |||
328 | int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom) | ||
329 | { | ||
330 | gxio_mpipe_rules_list_t *list = &rules->list; | ||
331 | |||
332 | gxio_mpipe_rules_rule_t *rule = | ||
333 | (gxio_mpipe_rules_rule_t *) (list->rules + list->head); | ||
334 | |||
335 | /* Verify begun. */ | ||
336 | if (list->tail == 0) | ||
337 | return GXIO_MPIPE_ERR_RULES_EMPTY; | ||
338 | |||
339 | rule->headroom = headroom; | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom); | ||
345 | |||
346 | int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules) | ||
347 | { | ||
348 | gxio_mpipe_rules_list_t *list = &rules->list; | ||
349 | unsigned int size = | ||
350 | offsetof(gxio_mpipe_rules_list_t, rules) + list->tail; | ||
351 | return gxio_mpipe_commit_rules(rules->context, list, size); | ||
352 | } | ||
353 | |||
354 | EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit); | ||
355 | |||
356 | int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue, | ||
357 | gxio_mpipe_context_t *context, | ||
358 | unsigned int ring, | ||
359 | void *mem, size_t mem_size, unsigned int mem_flags) | ||
360 | { | ||
361 | /* The init call below will verify that "mem_size" is legal. */ | ||
362 | unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t); | ||
363 | |||
364 | iqueue->context = context; | ||
365 | iqueue->idescs = (gxio_mpipe_idesc_t *)mem; | ||
366 | iqueue->ring = ring; | ||
367 | iqueue->num_entries = num_entries; | ||
368 | iqueue->mask_num_entries = num_entries - 1; | ||
369 | iqueue->log2_num_entries = __builtin_ctz(num_entries); | ||
370 | iqueue->head = 1; | ||
371 | #ifdef __BIG_ENDIAN__ | ||
372 | iqueue->swapped = 0; | ||
373 | #endif | ||
374 | |||
375 | /* Initialize the "tail". */ | ||
376 | __gxio_mmio_write(mem, iqueue->head); | ||
377 | |||
378 | return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, | ||
379 | mem_flags); | ||
380 | } | ||
381 | |||
382 | EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init); | ||
383 | |||
384 | int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, | ||
385 | gxio_mpipe_context_t *context, | ||
386 | unsigned int edma_ring_id, | ||
387 | unsigned int channel, | ||
388 | void *mem, unsigned int mem_size, | ||
389 | unsigned int mem_flags) | ||
390 | { | ||
391 | /* The init call below will verify that "mem_size" is legal. */ | ||
392 | unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t); | ||
393 | |||
394 | /* Offset used to read number of completed commands. */ | ||
395 | MPIPE_EDMA_POST_REGION_ADDR_t offset; | ||
396 | |||
397 | int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel, | ||
398 | mem, mem_size, mem_flags); | ||
399 | if (result < 0) | ||
400 | return result; | ||
401 | |||
402 | memset(equeue, 0, sizeof(*equeue)); | ||
403 | |||
404 | offset.word = 0; | ||
405 | offset.region = | ||
406 | MPIPE_MMIO_ADDR__REGION_VAL_EDMA - | ||
407 | MPIPE_MMIO_ADDR__REGION_VAL_IDMA; | ||
408 | offset.ring = edma_ring_id; | ||
409 | |||
410 | __gxio_dma_queue_init(&equeue->dma_queue, | ||
411 | context->mmio_fast_base + offset.word, | ||
412 | num_entries); | ||
413 | equeue->edescs = mem; | ||
414 | equeue->mask_num_entries = num_entries - 1; | ||
415 | equeue->log2_num_entries = __builtin_ctz(num_entries); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init); | ||
421 | |||
422 | int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, | ||
423 | const struct timespec *ts) | ||
424 | { | ||
425 | cycles_t cycles = get_cycles(); | ||
426 | return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec, | ||
427 | (uint64_t)ts->tv_nsec, | ||
428 | (uint64_t)cycles); | ||
429 | } | ||
430 | |||
431 | int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, | ||
432 | struct timespec *ts) | ||
433 | { | ||
434 | int ret; | ||
435 | cycles_t cycles_prev, cycles_now, clock_rate; | ||
436 | cycles_prev = get_cycles(); | ||
437 | ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec, | ||
438 | (uint64_t *)&ts->tv_nsec, | ||
439 | (uint64_t *)&cycles_now); | ||
440 | if (ret < 0) { | ||
441 | return ret; | ||
442 | } | ||
443 | |||
444 | clock_rate = get_clock_rate(); | ||
445 | ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate; | ||
446 | if (ts->tv_nsec < 0) { | ||
447 | ts->tv_nsec += 1000000000LL; | ||
448 | ts->tv_sec -= 1; | ||
449 | } | ||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) | ||
454 | { | ||
455 | return gxio_mpipe_adjust_timestamp_aux(context, delta); | ||
456 | } | ||
457 | |||
458 | /* Get our internal context used for link name access. This context is | ||
459 | * special in that it is not associated with an mPIPE service domain. | ||
460 | */ | ||
461 | static gxio_mpipe_context_t *_gxio_get_link_context(void) | ||
462 | { | ||
463 | static gxio_mpipe_context_t context; | ||
464 | static gxio_mpipe_context_t *contextp; | ||
465 | static int tried_open = 0; | ||
466 | static DEFINE_MUTEX(mutex); | ||
467 | |||
468 | mutex_lock(&mutex); | ||
469 | |||
470 | if (!tried_open) { | ||
471 | int i = 0; | ||
472 | tried_open = 1; | ||
473 | |||
474 | /* | ||
475 | * "4" here is the maximum possible number of mPIPE shims; it's | ||
476 | * an exaggeration but we shouldn't ever go beyond 2 anyway. | ||
477 | */ | ||
478 | for (i = 0; i < 4; i++) { | ||
479 | char file[80]; | ||
480 | |||
481 | snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i); | ||
482 | context.fd = hv_dev_open((HV_VirtAddr) file, 0); | ||
483 | if (context.fd < 0) | ||
484 | continue; | ||
485 | |||
486 | contextp = &context; | ||
487 | break; | ||
488 | } | ||
489 | } | ||
490 | |||
491 | mutex_unlock(&mutex); | ||
492 | |||
493 | return contextp; | ||
494 | } | ||
495 | |||
496 | int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) | ||
497 | { | ||
498 | int rv; | ||
499 | _gxio_mpipe_link_name_t name; | ||
500 | _gxio_mpipe_link_mac_t mac; | ||
501 | |||
502 | gxio_mpipe_context_t *context = _gxio_get_link_context(); | ||
503 | if (!context) | ||
504 | return GXIO_ERR_NO_DEVICE; | ||
505 | |||
506 | rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); | ||
507 | if (rv >= 0) { | ||
508 | strncpy(link_name, name.name, sizeof(name.name)); | ||
509 | memcpy(link_mac, mac.mac, sizeof(mac.mac)); | ||
510 | } | ||
511 | |||
512 | return rv; | ||
513 | } | ||
514 | |||
515 | EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac); | ||
516 | |||
517 | int gxio_mpipe_link_open(gxio_mpipe_link_t *link, | ||
518 | gxio_mpipe_context_t *context, const char *link_name, | ||
519 | unsigned int flags) | ||
520 | { | ||
521 | _gxio_mpipe_link_name_t name; | ||
522 | int rv; | ||
523 | |||
524 | strncpy(name.name, link_name, sizeof(name.name)); | ||
525 | name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0'; | ||
526 | |||
527 | rv = gxio_mpipe_link_open_aux(context, name, flags); | ||
528 | if (rv < 0) | ||
529 | return rv; | ||
530 | |||
531 | link->context = context; | ||
532 | link->channel = rv >> 8; | ||
533 | link->mac = rv & 0xFF; | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | EXPORT_SYMBOL_GPL(gxio_mpipe_link_open); | ||
539 | |||
540 | int gxio_mpipe_link_close(gxio_mpipe_link_t *link) | ||
541 | { | ||
542 | return gxio_mpipe_link_close_aux(link->context, link->mac); | ||
543 | } | ||
544 | |||
545 | EXPORT_SYMBOL_GPL(gxio_mpipe_link_close); | ||
diff --git a/arch/tile/gxio/trio.c b/arch/tile/gxio/trio.c new file mode 100644 index 000000000000..69f0b8df3ce3 --- /dev/null +++ b/arch/tile/gxio/trio.c | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * Implementation of trio gxio calls. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/module.h> | ||
22 | |||
23 | #include <gxio/trio.h> | ||
24 | #include <gxio/iorpc_globals.h> | ||
25 | #include <gxio/iorpc_trio.h> | ||
26 | #include <gxio/kiorpc.h> | ||
27 | |||
28 | int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index) | ||
29 | { | ||
30 | char file[32]; | ||
31 | int fd; | ||
32 | |||
33 | snprintf(file, sizeof(file), "trio/%d/iorpc", trio_index); | ||
34 | fd = hv_dev_open((HV_VirtAddr) file, 0); | ||
35 | if (fd < 0) { | ||
36 | context->fd = -1; | ||
37 | |||
38 | if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||
39 | return fd; | ||
40 | else | ||
41 | return -ENODEV; | ||
42 | } | ||
43 | |||
44 | context->fd = fd; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL_GPL(gxio_trio_init); | ||
diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c new file mode 100644 index 000000000000..66b002f54ecc --- /dev/null +++ b/arch/tile/gxio/usb_host.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * | ||
17 | * Implementation of USB gxio calls. | ||
18 | */ | ||
19 | |||
20 | #include <linux/io.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/module.h> | ||
23 | |||
24 | #include <gxio/iorpc_globals.h> | ||
25 | #include <gxio/iorpc_usb_host.h> | ||
26 | #include <gxio/kiorpc.h> | ||
27 | #include <gxio/usb_host.h> | ||
28 | |||
29 | int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, | ||
30 | int is_ehci) | ||
31 | { | ||
32 | char file[32]; | ||
33 | int fd; | ||
34 | |||
35 | if (is_ehci) | ||
36 | snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci", | ||
37 | usb_index); | ||
38 | else | ||
39 | snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci", | ||
40 | usb_index); | ||
41 | |||
42 | fd = hv_dev_open((HV_VirtAddr) file, 0); | ||
43 | if (fd < 0) { | ||
44 | if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) | ||
45 | return fd; | ||
46 | else | ||
47 | return -ENODEV; | ||
48 | } | ||
49 | |||
50 | context->fd = fd; | ||
51 | |||
52 | // Map in the MMIO space. | ||
53 | context->mmio_base = | ||
54 | (void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE); | ||
55 | |||
56 | if (context->mmio_base == NULL) { | ||
57 | hv_dev_close(context->fd); | ||
58 | return -ENODEV; | ||
59 | } | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | EXPORT_SYMBOL_GPL(gxio_usb_host_init); | ||
65 | |||
66 | int gxio_usb_host_destroy(gxio_usb_host_context_t * context) | ||
67 | { | ||
68 | iounmap((void __force __iomem *)(context->mmio_base)); | ||
69 | hv_dev_close(context->fd); | ||
70 | |||
71 | context->mmio_base = NULL; | ||
72 | context->fd = -1; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); | ||
78 | |||
79 | void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context) | ||
80 | { | ||
81 | return context->mmio_base; | ||
82 | } | ||
83 | |||
84 | EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); | ||
85 | |||
86 | size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context) | ||
87 | { | ||
88 | return HV_USB_HOST_MMIO_SIZE; | ||
89 | } | ||
90 | |||
91 | EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len); | ||
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h new file mode 100644 index 000000000000..8a33912fd6cc --- /dev/null +++ b/arch/tile/include/arch/mpipe.h | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_MPIPE_H__ | ||
18 | #define __ARCH_MPIPE_H__ | ||
19 | |||
20 | #include <arch/abi.h> | ||
21 | #include <arch/mpipe_def.h> | ||
22 | |||
23 | #ifndef __ASSEMBLER__ | ||
24 | |||
25 | /* | ||
26 | * MMIO Ingress DMA Release Region Address. | ||
27 | * This is a description of the physical addresses used to manipulate ingress | ||
28 | * credit counters. Accesses to this address space should use an address of | ||
29 | * this form and a value like that specified in IDMA_RELEASE_REGION_VAL. | ||
30 | */ | ||
31 | |||
32 | __extension__ | ||
33 | typedef union | ||
34 | { | ||
35 | struct | ||
36 | { | ||
37 | #ifndef __BIG_ENDIAN__ | ||
38 | /* Reserved. */ | ||
39 | uint_reg_t __reserved_0 : 3; | ||
40 | /* NotifRing to be released */ | ||
41 | uint_reg_t ring : 8; | ||
42 | /* Bucket to be released */ | ||
43 | uint_reg_t bucket : 13; | ||
44 | /* Enable NotifRing release */ | ||
45 | uint_reg_t ring_enable : 1; | ||
46 | /* Enable Bucket release */ | ||
47 | uint_reg_t bucket_enable : 1; | ||
48 | /* | ||
49 | * This field of the address selects the region (address space) to be | ||
50 | * accessed. For the iDMA release region, this field must be 4. | ||
51 | */ | ||
52 | uint_reg_t region : 3; | ||
53 | /* Reserved. */ | ||
54 | uint_reg_t __reserved_1 : 6; | ||
55 | /* This field of the address indexes the 32 entry service domain table. */ | ||
56 | uint_reg_t svc_dom : 5; | ||
57 | /* Reserved. */ | ||
58 | uint_reg_t __reserved_2 : 24; | ||
59 | #else /* __BIG_ENDIAN__ */ | ||
60 | uint_reg_t __reserved_2 : 24; | ||
61 | uint_reg_t svc_dom : 5; | ||
62 | uint_reg_t __reserved_1 : 6; | ||
63 | uint_reg_t region : 3; | ||
64 | uint_reg_t bucket_enable : 1; | ||
65 | uint_reg_t ring_enable : 1; | ||
66 | uint_reg_t bucket : 13; | ||
67 | uint_reg_t ring : 8; | ||
68 | uint_reg_t __reserved_0 : 3; | ||
69 | #endif | ||
70 | }; | ||
71 | |||
72 | uint_reg_t word; | ||
73 | } MPIPE_IDMA_RELEASE_REGION_ADDR_t; | ||
74 | |||
75 | /* | ||
76 | * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket. | ||
77 | * Provides release of the associated NotifRing. The address of the MMIO | ||
78 | * operation is described in IDMA_RELEASE_REGION_ADDR. | ||
79 | */ | ||
80 | |||
81 | __extension__ | ||
82 | typedef union | ||
83 | { | ||
84 | struct | ||
85 | { | ||
86 | #ifndef __BIG_ENDIAN__ | ||
87 | /* | ||
88 | * Number of packets being released. The load balancer's count of | ||
89 | * inflight packets will be decremented by this amount for the associated | ||
90 | * Bucket and/or NotifRing | ||
91 | */ | ||
92 | uint_reg_t count : 16; | ||
93 | /* Reserved. */ | ||
94 | uint_reg_t __reserved : 48; | ||
95 | #else /* __BIG_ENDIAN__ */ | ||
96 | uint_reg_t __reserved : 48; | ||
97 | uint_reg_t count : 16; | ||
98 | #endif | ||
99 | }; | ||
100 | |||
101 | uint_reg_t word; | ||
102 | } MPIPE_IDMA_RELEASE_REGION_VAL_t; | ||
103 | |||
104 | /* | ||
105 | * MMIO Buffer Stack Manager Region Address. | ||
106 | * This MMIO region is used for posting or fetching buffers to/from the | ||
107 | * buffer stack manager. On an MMIO load, this pops a buffer descriptor from | ||
108 | * the top of stack if one is available. On an MMIO store, this pushes a | ||
109 | * buffer to the stack. The value read or written is described in | ||
110 | * BSM_REGION_VAL. | ||
111 | */ | ||
112 | |||
113 | __extension__ | ||
114 | typedef union | ||
115 | { | ||
116 | struct | ||
117 | { | ||
118 | #ifndef __BIG_ENDIAN__ | ||
119 | /* Reserved. */ | ||
120 | uint_reg_t __reserved_0 : 3; | ||
121 | /* BufferStack being accessed. */ | ||
122 | uint_reg_t stack : 5; | ||
123 | /* Reserved. */ | ||
124 | uint_reg_t __reserved_1 : 18; | ||
125 | /* | ||
126 | * This field of the address selects the region (address space) to be | ||
127 | * accessed. For the buffer stack manager region, this field must be 6. | ||
128 | */ | ||
129 | uint_reg_t region : 3; | ||
130 | /* Reserved. */ | ||
131 | uint_reg_t __reserved_2 : 6; | ||
132 | /* This field of the address indexes the 32 entry service domain table. */ | ||
133 | uint_reg_t svc_dom : 5; | ||
134 | /* Reserved. */ | ||
135 | uint_reg_t __reserved_3 : 24; | ||
136 | #else /* __BIG_ENDIAN__ */ | ||
137 | uint_reg_t __reserved_3 : 24; | ||
138 | uint_reg_t svc_dom : 5; | ||
139 | uint_reg_t __reserved_2 : 6; | ||
140 | uint_reg_t region : 3; | ||
141 | uint_reg_t __reserved_1 : 18; | ||
142 | uint_reg_t stack : 5; | ||
143 | uint_reg_t __reserved_0 : 3; | ||
144 | #endif | ||
145 | }; | ||
146 | |||
147 | uint_reg_t word; | ||
148 | } MPIPE_BSM_REGION_ADDR_t; | ||
149 | |||
150 | /* | ||
151 | * MMIO Buffer Stack Manager Region Value. | ||
152 | * This MMIO region is used for posting or fetching buffers to/from the | ||
153 | * buffer stack manager. On an MMIO load, this pops a buffer descriptor from | ||
154 | * the top of stack if one is available. On an MMIO store, this pushes a | ||
155 | * buffer to the stack. The address of the MMIO operation is described in | ||
156 | * BSM_REGION_ADDR. | ||
157 | */ | ||
158 | |||
159 | __extension__ | ||
160 | typedef union | ||
161 | { | ||
162 | struct | ||
163 | { | ||
164 | #ifndef __BIG_ENDIAN__ | ||
165 | /* Reserved. */ | ||
166 | uint_reg_t __reserved_0 : 7; | ||
167 | /* | ||
168 | * Base virtual address of the buffer. Must be sign extended by consumer. | ||
169 | */ | ||
170 | int_reg_t va : 35; | ||
171 | /* Reserved. */ | ||
172 | uint_reg_t __reserved_1 : 6; | ||
173 | /* | ||
174 | * Index of the buffer stack to which this buffer belongs. Ignored on | ||
175 | * writes since the offset bits specify the stack being accessed. | ||
176 | */ | ||
177 | uint_reg_t stack_idx : 5; | ||
178 | /* Reserved. */ | ||
179 | uint_reg_t __reserved_2 : 5; | ||
180 | /* | ||
181 | * Reads as one to indicate that this is a hardware managed buffer. | ||
182 | * Ignored on writes since all buffers on a given stack are the same size. | ||
183 | */ | ||
184 | uint_reg_t hwb : 1; | ||
185 | /* | ||
186 | * Encoded size of buffer (ignored on writes): | ||
187 | * 0 = 128 bytes | ||
188 | * 1 = 256 bytes | ||
189 | * 2 = 512 bytes | ||
190 | * 3 = 1024 bytes | ||
191 | * 4 = 1664 bytes | ||
192 | * 5 = 4096 bytes | ||
193 | * 6 = 10368 bytes | ||
194 | * 7 = 16384 bytes | ||
195 | */ | ||
196 | uint_reg_t size : 3; | ||
197 | /* | ||
198 | * Valid indication for the buffer. Ignored on writes. | ||
199 | * 0 : Valid buffer descriptor popped from stack. | ||
200 | * 3 : Could not pop a buffer from the stack. Either the stack is empty, | ||
201 | * or the hardware's prefetch buffer is empty for this stack. | ||
202 | */ | ||
203 | uint_reg_t c : 2; | ||
204 | #else /* __BIG_ENDIAN__ */ | ||
205 | uint_reg_t c : 2; | ||
206 | uint_reg_t size : 3; | ||
207 | uint_reg_t hwb : 1; | ||
208 | uint_reg_t __reserved_2 : 5; | ||
209 | uint_reg_t stack_idx : 5; | ||
210 | uint_reg_t __reserved_1 : 6; | ||
211 | int_reg_t va : 35; | ||
212 | uint_reg_t __reserved_0 : 7; | ||
213 | #endif | ||
214 | }; | ||
215 | |||
216 | uint_reg_t word; | ||
217 | } MPIPE_BSM_REGION_VAL_t; | ||
218 | |||
219 | /* | ||
220 | * MMIO Egress DMA Post Region Address. | ||
221 | * Used to post descriptor locations to the eDMA descriptor engine. The | ||
222 | * value to be written is described in EDMA_POST_REGION_VAL | ||
223 | */ | ||
224 | |||
225 | __extension__ | ||
226 | typedef union | ||
227 | { | ||
228 | struct | ||
229 | { | ||
230 | #ifndef __BIG_ENDIAN__ | ||
231 | /* Reserved. */ | ||
232 | uint_reg_t __reserved_0 : 3; | ||
233 | /* eDMA ring being accessed */ | ||
234 | uint_reg_t ring : 5; | ||
235 | /* Reserved. */ | ||
236 | uint_reg_t __reserved_1 : 18; | ||
237 | /* | ||
238 | * This field of the address selects the region (address space) to be | ||
239 | * accessed. For the egress DMA post region, this field must be 5. | ||
240 | */ | ||
241 | uint_reg_t region : 3; | ||
242 | /* Reserved. */ | ||
243 | uint_reg_t __reserved_2 : 6; | ||
244 | /* This field of the address indexes the 32 entry service domain table. */ | ||
245 | uint_reg_t svc_dom : 5; | ||
246 | /* Reserved. */ | ||
247 | uint_reg_t __reserved_3 : 24; | ||
248 | #else /* __BIG_ENDIAN__ */ | ||
249 | uint_reg_t __reserved_3 : 24; | ||
250 | uint_reg_t svc_dom : 5; | ||
251 | uint_reg_t __reserved_2 : 6; | ||
252 | uint_reg_t region : 3; | ||
253 | uint_reg_t __reserved_1 : 18; | ||
254 | uint_reg_t ring : 5; | ||
255 | uint_reg_t __reserved_0 : 3; | ||
256 | #endif | ||
257 | }; | ||
258 | |||
259 | uint_reg_t word; | ||
260 | } MPIPE_EDMA_POST_REGION_ADDR_t; | ||
261 | |||
262 | /* | ||
263 | * MMIO Egress DMA Post Region Value. | ||
264 | * Used to post descriptor locations to the eDMA descriptor engine. The | ||
265 | * address is described in EDMA_POST_REGION_ADDR. | ||
266 | */ | ||
267 | |||
268 | __extension__ | ||
269 | typedef union | ||
270 | { | ||
271 | struct | ||
272 | { | ||
273 | #ifndef __BIG_ENDIAN__ | ||
274 | /* | ||
275 | * For writes, this specifies the current ring tail pointer prior to any | ||
276 | * post. For example, to post 1 or more descriptors starting at location | ||
277 | * 23, this would contain 23 (not 24). On writes, this index must be | ||
278 | * masked based on the ring size. The new tail pointer after this post | ||
279 | * is COUNT+RING_IDX (masked by the ring size). | ||
280 | * | ||
281 | * For reads, this provides the hardware descriptor fetcher's head | ||
282 | * pointer. The descriptors prior to the head pointer, however, may not | ||
283 | * yet have been processed so this indicator is only used to determine | ||
284 | * how full the ring is and if software may post more descriptors. | ||
285 | */ | ||
286 | uint_reg_t ring_idx : 16; | ||
287 | /* | ||
288 | * For writes, this specifies number of contiguous descriptors that are | ||
289 | * being posted. Software may post up to RingSize descriptors with a | ||
290 | * single MMIO store. A zero in this field on a write will "wake up" an | ||
291 | * eDMA ring and cause it fetch descriptors regardless of the hardware's | ||
292 | * current view of the state of the tail pointer. | ||
293 | * | ||
294 | * For reads, this field provides a rolling count of the number of | ||
295 | * descriptors that have been completely processed. This may be used by | ||
296 | * software to determine when buffers associated with a descriptor may be | ||
297 | * returned or reused. When the ring's flush bit is cleared by software | ||
298 | * (after having been set by HW or SW), the COUNT will be cleared. | ||
299 | */ | ||
300 | uint_reg_t count : 16; | ||
301 | /* | ||
302 | * For writes, this specifies the generation number of the tail being | ||
303 | * posted. Note that if tail+cnt wraps to the beginning of the ring, the | ||
304 | * eDMA hardware assumes that the descriptors posted at the beginning of | ||
305 | * the ring are also valid so it is okay to post around the wrap point. | ||
306 | * | ||
307 | * For reads, this is the current generation number. Valid descriptors | ||
308 | * will have the inverse of this generation number. | ||
309 | */ | ||
310 | uint_reg_t gen : 1; | ||
311 | /* Reserved. */ | ||
312 | uint_reg_t __reserved : 31; | ||
313 | #else /* __BIG_ENDIAN__ */ | ||
314 | uint_reg_t __reserved : 31; | ||
315 | uint_reg_t gen : 1; | ||
316 | uint_reg_t count : 16; | ||
317 | uint_reg_t ring_idx : 16; | ||
318 | #endif | ||
319 | }; | ||
320 | |||
321 | uint_reg_t word; | ||
322 | } MPIPE_EDMA_POST_REGION_VAL_t; | ||
323 | |||
324 | /* | ||
325 | * Load Balancer Bucket Status Data. | ||
326 | * Read/Write data for load balancer Bucket-Status Table. 4160 entries | ||
327 | * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL | ||
328 | */ | ||
329 | |||
330 | __extension__ | ||
331 | typedef union | ||
332 | { | ||
333 | struct | ||
334 | { | ||
335 | #ifndef __BIG_ENDIAN__ | ||
336 | /* NotifRing currently assigned to this bucket. */ | ||
337 | uint_reg_t notifring : 8; | ||
338 | /* Current reference count. */ | ||
339 | uint_reg_t count : 16; | ||
340 | /* Group associated with this bucket. */ | ||
341 | uint_reg_t group : 5; | ||
342 | /* Mode select for this bucket. */ | ||
343 | uint_reg_t mode : 3; | ||
344 | /* Reserved. */ | ||
345 | uint_reg_t __reserved : 32; | ||
346 | #else /* __BIG_ENDIAN__ */ | ||
347 | uint_reg_t __reserved : 32; | ||
348 | uint_reg_t mode : 3; | ||
349 | uint_reg_t group : 5; | ||
350 | uint_reg_t count : 16; | ||
351 | uint_reg_t notifring : 8; | ||
352 | #endif | ||
353 | }; | ||
354 | |||
355 | uint_reg_t word; | ||
356 | } MPIPE_LBL_INIT_DAT_BSTS_TBL_t; | ||
357 | #endif /* !defined(__ASSEMBLER__) */ | ||
358 | |||
359 | #endif /* !defined(__ARCH_MPIPE_H__) */ | ||
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h new file mode 100644 index 000000000000..410a0400e055 --- /dev/null +++ b/arch/tile/include/arch/mpipe_constants.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | |||
16 | #ifndef __ARCH_MPIPE_CONSTANTS_H__ | ||
17 | #define __ARCH_MPIPE_CONSTANTS_H__ | ||
18 | |||
19 | #define MPIPE_NUM_CLASSIFIERS 10 | ||
20 | #define MPIPE_CLS_MHZ 1200 | ||
21 | |||
22 | #define MPIPE_NUM_EDMA_RINGS 32 | ||
23 | |||
24 | #define MPIPE_NUM_SGMII_MACS 16 | ||
25 | #define MPIPE_NUM_XAUI_MACS 4 | ||
26 | #define MPIPE_NUM_LOOPBACK_CHANNELS 4 | ||
27 | #define MPIPE_NUM_NON_LB_CHANNELS 28 | ||
28 | |||
29 | #define MPIPE_NUM_IPKT_BLOCKS 1536 | ||
30 | |||
31 | #define MPIPE_NUM_BUCKETS 4160 | ||
32 | |||
33 | #define MPIPE_NUM_NOTIF_RINGS 256 | ||
34 | |||
35 | #define MPIPE_NUM_NOTIF_GROUPS 32 | ||
36 | |||
37 | #define MPIPE_NUM_TLBS_PER_ASID 16 | ||
38 | #define MPIPE_TLB_IDX_WIDTH 4 | ||
39 | |||
40 | #define MPIPE_MMIO_NUM_SVC_DOM 32 | ||
41 | |||
42 | #endif /* __ARCH_MPIPE_CONSTANTS_H__ */ | ||
diff --git a/arch/tile/include/arch/mpipe_def.h b/arch/tile/include/arch/mpipe_def.h new file mode 100644 index 000000000000..c3d30217fc66 --- /dev/null +++ b/arch/tile/include/arch/mpipe_def.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_MPIPE_DEF_H__ | ||
18 | #define __ARCH_MPIPE_DEF_H__ | ||
19 | #define MPIPE_MMIO_ADDR__REGION_SHIFT 26 | ||
20 | #define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0 | ||
21 | #define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4 | ||
22 | #define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5 | ||
23 | #define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6 | ||
24 | #define MPIPE_BSM_REGION_VAL__VA_SHIFT 7 | ||
25 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0 | ||
26 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1 | ||
27 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2 | ||
28 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3 | ||
29 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4 | ||
30 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5 | ||
31 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6 | ||
32 | #define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7 | ||
33 | #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0 | ||
34 | #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1 | ||
35 | #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2 | ||
36 | #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3 | ||
37 | #define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7 | ||
38 | #define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138 | ||
39 | #endif /* !defined(__ARCH_MPIPE_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h new file mode 100644 index 000000000000..f2e9e122818d --- /dev/null +++ b/arch/tile/include/arch/mpipe_shm.h | |||
@@ -0,0 +1,509 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | |||
18 | #ifndef __ARCH_MPIPE_SHM_H__ | ||
19 | #define __ARCH_MPIPE_SHM_H__ | ||
20 | |||
21 | #include <arch/abi.h> | ||
22 | #include <arch/mpipe_shm_def.h> | ||
23 | |||
24 | #ifndef __ASSEMBLER__ | ||
25 | /** | ||
26 | * MPIPE eDMA Descriptor. | ||
27 | * The eDMA descriptor is written by software and consumed by hardware. It | ||
28 | * is used to specify the location of egress packet data to be sent out of | ||
29 | * the chip via one of the packet interfaces. | ||
30 | */ | ||
31 | |||
32 | __extension__ | ||
33 | typedef union | ||
34 | { | ||
35 | struct | ||
36 | { | ||
37 | /* Word 0 */ | ||
38 | |||
39 | #ifndef __BIG_ENDIAN__ | ||
40 | /** | ||
41 | * Generation number. Used to indicate a valid descriptor in ring. When | ||
42 | * a new descriptor is written into the ring, software must toggle this | ||
43 | * bit. The net effect is that the GEN bit being written into new | ||
44 | * descriptors toggles each time the ring tail pointer wraps. | ||
45 | */ | ||
46 | uint_reg_t gen : 1; | ||
47 | /** Reserved. Must be zero. */ | ||
48 | uint_reg_t r0 : 7; | ||
49 | /** Checksum generation enabled for this transfer. */ | ||
50 | uint_reg_t csum : 1; | ||
51 | /** | ||
52 | * Nothing to be sent. Used, for example, when software has dropped a | ||
53 | * packet but still wishes to return all of the associated buffers. | ||
54 | */ | ||
55 | uint_reg_t ns : 1; | ||
56 | /** | ||
57 | * Notification interrupt will be delivered when packet has been egressed. | ||
58 | */ | ||
59 | uint_reg_t notif : 1; | ||
60 | /** | ||
61 | * Boundary indicator. When 1, this transfer includes the EOP for this | ||
62 | * command. Must be clear on all but the last descriptor for an egress | ||
63 | * packet. | ||
64 | */ | ||
65 | uint_reg_t bound : 1; | ||
66 | /** Reserved. Must be zero. */ | ||
67 | uint_reg_t r1 : 4; | ||
68 | /** | ||
69 | * Number of bytes to be sent for this descriptor. When zero, no data | ||
70 | * will be moved and the buffer descriptor will be ignored. If the | ||
71 | * buffer descriptor indicates that it is chained, the low 7 bits of the | ||
72 | * VA indicate the offset within the first buffer (e.g. 127 bytes is the | ||
73 | * maximum offset into the first buffer). If the size exceeds a single | ||
74 | * buffer, subsequent buffer descriptors will be fetched prior to | ||
75 | * processing the next eDMA descriptor in the ring. | ||
76 | */ | ||
77 | uint_reg_t xfer_size : 14; | ||
78 | /** Reserved. Must be zero. */ | ||
79 | uint_reg_t r2 : 2; | ||
80 | /** | ||
81 | * Destination of checksum relative to CSUM_START relative to the first | ||
82 | * byte moved by this descriptor. Must be zero if CSUM=0 in this | ||
83 | * descriptor. Must be less than XFER_SIZE (e.g. the first byte of the | ||
84 | * CSUM_DEST must be within the span of this descriptor). | ||
85 | */ | ||
86 | uint_reg_t csum_dest : 8; | ||
87 | /** | ||
88 | * Start byte of checksum relative to the first byte moved by this | ||
89 | * descriptor. If this is not the first descriptor for the egress | ||
90 | * packet, CSUM_START is still relative to the first byte in this | ||
91 | * descriptor. Must be zero if CSUM=0 in this descriptor. | ||
92 | */ | ||
93 | uint_reg_t csum_start : 8; | ||
94 | /** | ||
95 | * Initial value for 16-bit 1's compliment checksum if enabled via CSUM. | ||
96 | * Specified in network order. That is, bits[7:0] will be added to the | ||
97 | * byte pointed to by CSUM_START and bits[15:8] will be added to the byte | ||
98 | * pointed to by CSUM_START+1 (with appropriate 1's compliment carries). | ||
99 | * Must be zero if CSUM=0 in this descriptor. | ||
100 | */ | ||
101 | uint_reg_t csum_seed : 16; | ||
102 | #else /* __BIG_ENDIAN__ */ | ||
103 | uint_reg_t csum_seed : 16; | ||
104 | uint_reg_t csum_start : 8; | ||
105 | uint_reg_t csum_dest : 8; | ||
106 | uint_reg_t r2 : 2; | ||
107 | uint_reg_t xfer_size : 14; | ||
108 | uint_reg_t r1 : 4; | ||
109 | uint_reg_t bound : 1; | ||
110 | uint_reg_t notif : 1; | ||
111 | uint_reg_t ns : 1; | ||
112 | uint_reg_t csum : 1; | ||
113 | uint_reg_t r0 : 7; | ||
114 | uint_reg_t gen : 1; | ||
115 | #endif | ||
116 | |||
117 | /* Word 1 */ | ||
118 | |||
119 | #ifndef __BIG_ENDIAN__ | ||
120 | /** Virtual address. Must be sign extended by consumer. */ | ||
121 | int_reg_t va : 42; | ||
122 | /** Reserved. */ | ||
123 | uint_reg_t __reserved_0 : 6; | ||
124 | /** Index of the buffer stack to which this buffer belongs. */ | ||
125 | uint_reg_t stack_idx : 5; | ||
126 | /** Reserved. */ | ||
127 | uint_reg_t __reserved_1 : 3; | ||
128 | /** | ||
129 | * Instance ID. For devices that support more than one mPIPE instance, | ||
130 | * this field indicates the buffer owner. If the INST field does not | ||
131 | * match the mPIPE's instance number when a packet is egressed, buffers | ||
132 | * with HWB set will be returned to the other mPIPE instance. | ||
133 | */ | ||
134 | uint_reg_t inst : 1; | ||
135 | /** Reserved. */ | ||
136 | uint_reg_t __reserved_2 : 1; | ||
137 | /** | ||
138 | * Always set to one by hardware in iDMA packet descriptors. For eDMA, | ||
139 | * indicates whether the buffer will be released to the buffer stack | ||
140 | * manager. When 0, software is responsible for releasing the buffer. | ||
141 | */ | ||
142 | uint_reg_t hwb : 1; | ||
143 | /** | ||
144 | * Encoded size of buffer. Set by the ingress hardware for iDMA packet | ||
145 | * descriptors. For eDMA descriptors, indicates the buffer size if .c | ||
146 | * indicates a chained packet. If an eDMA descriptor is not chained and | ||
147 | * the .hwb bit is not set, this field is ignored and the size is | ||
148 | * specified by the .xfer_size field. | ||
149 | * 0 = 128 bytes | ||
150 | * 1 = 256 bytes | ||
151 | * 2 = 512 bytes | ||
152 | * 3 = 1024 bytes | ||
153 | * 4 = 1664 bytes | ||
154 | * 5 = 4096 bytes | ||
155 | * 6 = 10368 bytes | ||
156 | * 7 = 16384 bytes | ||
157 | */ | ||
158 | uint_reg_t size : 3; | ||
159 | /** | ||
160 | * Chaining configuration for the buffer. Indicates that an ingress | ||
161 | * packet or egress command is chained across multiple buffers, with each | ||
162 | * buffer's size indicated by the .size field. | ||
163 | */ | ||
164 | uint_reg_t c : 2; | ||
165 | #else /* __BIG_ENDIAN__ */ | ||
166 | uint_reg_t c : 2; | ||
167 | uint_reg_t size : 3; | ||
168 | uint_reg_t hwb : 1; | ||
169 | uint_reg_t __reserved_2 : 1; | ||
170 | uint_reg_t inst : 1; | ||
171 | uint_reg_t __reserved_1 : 3; | ||
172 | uint_reg_t stack_idx : 5; | ||
173 | uint_reg_t __reserved_0 : 6; | ||
174 | int_reg_t va : 42; | ||
175 | #endif | ||
176 | |||
177 | }; | ||
178 | |||
179 | /** Word access */ | ||
180 | uint_reg_t words[2]; | ||
181 | } MPIPE_EDMA_DESC_t; | ||
182 | |||
183 | /** | ||
184 | * MPIPE Packet Descriptor. | ||
185 | * The packet descriptor is filled by the mPIPE's classification, | ||
186 | * load-balancing, and buffer management services. Some fields are consumed | ||
187 | * by mPIPE hardware, and others are consumed by Tile software. | ||
188 | */ | ||
189 | |||
190 | __extension__ | ||
191 | typedef union | ||
192 | { | ||
193 | struct | ||
194 | { | ||
195 | /* Word 0 */ | ||
196 | |||
197 | #ifndef __BIG_ENDIAN__ | ||
198 | /** | ||
199 | * Notification ring into which this packet descriptor is written. | ||
200 | * Typically written by load balancer, but can be overridden by | ||
201 | * classification program if NR is asserted. | ||
202 | */ | ||
203 | uint_reg_t notif_ring : 8; | ||
204 | /** Source channel for this packet. Written by mPIPE DMA hardware. */ | ||
205 | uint_reg_t channel : 5; | ||
206 | /** Reserved. */ | ||
207 | uint_reg_t __reserved_0 : 1; | ||
208 | /** | ||
209 | * MAC Error. | ||
210 | * Generated by the MAC interface. Asserted if there was an overrun of | ||
211 | * the MAC's receive FIFO. This condition generally only occurs if the | ||
212 | * mPIPE clock is running too slowly. | ||
213 | */ | ||
214 | uint_reg_t me : 1; | ||
215 | /** | ||
216 | * Truncation Error. | ||
217 | * Written by the iDMA hardware. Asserted if packet was truncated due to | ||
218 | * insufficient space in iPkt buffer | ||
219 | */ | ||
220 | uint_reg_t tr : 1; | ||
221 | /** | ||
222 | * Written by the iDMA hardware. Indicates the number of bytes written | ||
223 | * to Tile memory. In general, this is the actual size of the packet as | ||
224 | * received from the MAC. But if the packet is truncated due to running | ||
225 | * out of buffers or due to the iPkt buffer filling up, then the L2_SIZE | ||
226 | * will be reduced to reflect the actual number of valid bytes written to | ||
227 | * Tile memory. | ||
228 | */ | ||
229 | uint_reg_t l2_size : 14; | ||
230 | /** | ||
231 | * CRC Error. | ||
232 | * Generated by the MAC. Asserted if MAC indicated an L2 CRC error or | ||
233 | * other L2 error (bad length etc.) on the packet. | ||
234 | */ | ||
235 | uint_reg_t ce : 1; | ||
236 | /** | ||
237 | * Cut Through. | ||
238 | * Written by the iDMA hardware. Asserted if packet was not completely | ||
239 | * received before being sent to classifier. L2_Size will indicate | ||
240 | * number of bytes received so far. | ||
241 | */ | ||
242 | uint_reg_t ct : 1; | ||
243 | /** | ||
244 | * Written by the classification program. Used by the load balancer to | ||
245 | * select the ring into which this packet descriptor is written. | ||
246 | */ | ||
247 | uint_reg_t bucket_id : 13; | ||
248 | /** Reserved. */ | ||
249 | uint_reg_t __reserved_1 : 3; | ||
250 | /** | ||
251 | * Checksum. | ||
252 | * Written by classification program. When 1, the checksum engine will | ||
253 | * perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES | ||
254 | * fields. The result will be placed in CSUM_VAL. | ||
255 | */ | ||
256 | uint_reg_t cs : 1; | ||
257 | /** | ||
258 | * Notification Ring Select. | ||
259 | * Written by the classification program. When 1, the NotifRingIDX is | ||
260 | * set by classification program rather than being set by load balancer. | ||
261 | */ | ||
262 | uint_reg_t nr : 1; | ||
263 | /** | ||
264 | * Written by classification program. Indicates whether packet and | ||
265 | * descriptor should both be dropped, both be delivered, or only the | ||
266 | * descriptor should be delivered. | ||
267 | */ | ||
268 | uint_reg_t dest : 2; | ||
269 | /** | ||
270 | * General Purpose Sequence Number Enable. | ||
271 | * Written by the classification program. When 1, the GP_SQN_SEL field | ||
272 | * contains the sequence number selector and the GP_SQN field will be | ||
273 | * replaced with the associated sequence number. When clear, the GP_SQN | ||
274 | * field is left intact and be used as "Custom" bytes. | ||
275 | */ | ||
276 | uint_reg_t sq : 1; | ||
277 | /** | ||
278 | * TimeStamp Enable. | ||
279 | * Enable TimeStamp insertion. When clear, timestamp field may be filled | ||
280 | * with custom data by classifier. When set, hardware inserts the | ||
281 | * timestamp when the start of packet is received from the MAC. | ||
282 | */ | ||
283 | uint_reg_t ts : 1; | ||
284 | /** | ||
285 | * Packet Sequence Number Enable. | ||
286 | * Enable PacketSQN insertion. When clear, PacketSQN field may be filled | ||
287 | * with custom data by classifier. When set, hardware inserts the packet | ||
288 | * sequence number when the packet descriptor is written to a | ||
289 | * notification ring. | ||
290 | */ | ||
291 | uint_reg_t ps : 1; | ||
292 | /** | ||
293 | * Buffer Error. | ||
294 | * Written by the iDMA hardware. Asserted if iDMA ran out of buffers | ||
295 | * while writing the packet. Software must still return any buffer | ||
296 | * descriptors whose C field indicates a valid descriptor was consumed. | ||
297 | */ | ||
298 | uint_reg_t be : 1; | ||
299 | /** | ||
300 | * Written by the classification program. The associated counter is | ||
301 | * incremented when the packet is sent. | ||
302 | */ | ||
303 | uint_reg_t ctr0 : 5; | ||
304 | /** Reserved. */ | ||
305 | uint_reg_t __reserved_2 : 3; | ||
306 | #else /* __BIG_ENDIAN__ */ | ||
307 | uint_reg_t __reserved_2 : 3; | ||
308 | uint_reg_t ctr0 : 5; | ||
309 | uint_reg_t be : 1; | ||
310 | uint_reg_t ps : 1; | ||
311 | uint_reg_t ts : 1; | ||
312 | uint_reg_t sq : 1; | ||
313 | uint_reg_t dest : 2; | ||
314 | uint_reg_t nr : 1; | ||
315 | uint_reg_t cs : 1; | ||
316 | uint_reg_t __reserved_1 : 3; | ||
317 | uint_reg_t bucket_id : 13; | ||
318 | uint_reg_t ct : 1; | ||
319 | uint_reg_t ce : 1; | ||
320 | uint_reg_t l2_size : 14; | ||
321 | uint_reg_t tr : 1; | ||
322 | uint_reg_t me : 1; | ||
323 | uint_reg_t __reserved_0 : 1; | ||
324 | uint_reg_t channel : 5; | ||
325 | uint_reg_t notif_ring : 8; | ||
326 | #endif | ||
327 | |||
328 | /* Word 1 */ | ||
329 | |||
330 | #ifndef __BIG_ENDIAN__ | ||
331 | /** | ||
332 | * Written by the classification program. The associated counter is | ||
333 | * incremented when the packet is sent. | ||
334 | */ | ||
335 | uint_reg_t ctr1 : 5; | ||
336 | /** Reserved. */ | ||
337 | uint_reg_t __reserved_3 : 3; | ||
338 | /** | ||
339 | * Written by classification program. Indicates the start byte for | ||
340 | * checksum. Relative to 1st byte received from MAC. | ||
341 | */ | ||
342 | uint_reg_t csum_start : 8; | ||
343 | /** | ||
344 | * Checksum seed written by classification program. Overwritten with | ||
345 | * resultant checksum if CS bit is asserted. The endianness of the CSUM | ||
346 | * value bits when viewed by Tile software match the packet byte order. | ||
347 | * That is, bits[7:0] of the resulting checksum value correspond to | ||
348 | * earlier (more significant) bytes in the packet. To avoid classifier | ||
349 | * software from having to byte swap the CSUM_SEED, the iDMA checksum | ||
350 | * engine byte swaps the classifier's result before seeding the checksum | ||
351 | * calculation. Thus, the CSUM_START byte of packet data is added to | ||
352 | * bits[15:8] of the CSUM_SEED field generated by the classifier. This | ||
353 | * byte swap will be visible to Tile software if the CS bit is clear. | ||
354 | */ | ||
355 | uint_reg_t csum_seed_val : 16; | ||
356 | /** | ||
357 | * Written by the classification program. Not interpreted by mPIPE | ||
358 | * hardware. | ||
359 | */ | ||
360 | uint_reg_t custom0 : 32; | ||
361 | #else /* __BIG_ENDIAN__ */ | ||
362 | uint_reg_t custom0 : 32; | ||
363 | uint_reg_t csum_seed_val : 16; | ||
364 | uint_reg_t csum_start : 8; | ||
365 | uint_reg_t __reserved_3 : 3; | ||
366 | uint_reg_t ctr1 : 5; | ||
367 | #endif | ||
368 | |||
369 | /* Word 2 */ | ||
370 | |||
371 | #ifndef __BIG_ENDIAN__ | ||
372 | /** | ||
373 | * Written by the classification program. Not interpreted by mPIPE | ||
374 | * hardware. | ||
375 | */ | ||
376 | uint_reg_t custom1 : 64; | ||
377 | #else /* __BIG_ENDIAN__ */ | ||
378 | uint_reg_t custom1 : 64; | ||
379 | #endif | ||
380 | |||
381 | /* Word 3 */ | ||
382 | |||
383 | #ifndef __BIG_ENDIAN__ | ||
384 | /** | ||
385 | * Written by the classification program. Not interpreted by mPIPE | ||
386 | * hardware. | ||
387 | */ | ||
388 | uint_reg_t custom2 : 64; | ||
389 | #else /* __BIG_ENDIAN__ */ | ||
390 | uint_reg_t custom2 : 64; | ||
391 | #endif | ||
392 | |||
393 | /* Word 4 */ | ||
394 | |||
395 | #ifndef __BIG_ENDIAN__ | ||
396 | /** | ||
397 | * Written by the classification program. Not interpreted by mPIPE | ||
398 | * hardware. | ||
399 | */ | ||
400 | uint_reg_t custom3 : 64; | ||
401 | #else /* __BIG_ENDIAN__ */ | ||
402 | uint_reg_t custom3 : 64; | ||
403 | #endif | ||
404 | |||
405 | /* Word 5 */ | ||
406 | |||
407 | #ifndef __BIG_ENDIAN__ | ||
408 | /** | ||
409 | * Sequence number applied when packet is distributed. Classifier | ||
410 | * selects which sequence number is to be applied by writing the 13-bit | ||
411 | * SQN-selector into this field. | ||
412 | */ | ||
413 | uint_reg_t gp_sqn : 16; | ||
414 | /** | ||
415 | * Written by notification hardware. The packet sequence number is | ||
416 | * incremented for each packet that wasn't dropped. | ||
417 | */ | ||
418 | uint_reg_t packet_sqn : 48; | ||
419 | #else /* __BIG_ENDIAN__ */ | ||
420 | uint_reg_t packet_sqn : 48; | ||
421 | uint_reg_t gp_sqn : 16; | ||
422 | #endif | ||
423 | |||
424 | /* Word 6 */ | ||
425 | |||
426 | #ifndef __BIG_ENDIAN__ | ||
427 | /** | ||
428 | * Written by hardware when the start-of-packet is received by the mPIPE | ||
429 | * from the MAC. This is the nanoseconds part of the packet timestamp. | ||
430 | */ | ||
431 | uint_reg_t time_stamp_ns : 32; | ||
432 | /** | ||
433 | * Written by hardware when the start-of-packet is received by the mPIPE | ||
434 | * from the MAC. This is the seconds part of the packet timestamp. | ||
435 | */ | ||
436 | uint_reg_t time_stamp_sec : 32; | ||
437 | #else /* __BIG_ENDIAN__ */ | ||
438 | uint_reg_t time_stamp_sec : 32; | ||
439 | uint_reg_t time_stamp_ns : 32; | ||
440 | #endif | ||
441 | |||
442 | /* Word 7 */ | ||
443 | |||
444 | #ifndef __BIG_ENDIAN__ | ||
445 | /** Virtual address. Must be sign extended by consumer. */ | ||
446 | int_reg_t va : 42; | ||
447 | /** Reserved. */ | ||
448 | uint_reg_t __reserved_4 : 6; | ||
449 | /** Index of the buffer stack to which this buffer belongs. */ | ||
450 | uint_reg_t stack_idx : 5; | ||
451 | /** Reserved. */ | ||
452 | uint_reg_t __reserved_5 : 3; | ||
453 | /** | ||
454 | * Instance ID. For devices that support more than one mPIPE instance, | ||
455 | * this field indicates the buffer owner. If the INST field does not | ||
456 | * match the mPIPE's instance number when a packet is egressed, buffers | ||
457 | * with HWB set will be returned to the other mPIPE instance. | ||
458 | */ | ||
459 | uint_reg_t inst : 1; | ||
460 | /** Reserved. */ | ||
461 | uint_reg_t __reserved_6 : 1; | ||
462 | /** | ||
463 | * Always set to one by hardware in iDMA packet descriptors. For eDMA, | ||
464 | * indicates whether the buffer will be released to the buffer stack | ||
465 | * manager. When 0, software is responsible for releasing the buffer. | ||
466 | */ | ||
467 | uint_reg_t hwb : 1; | ||
468 | /** | ||
469 | * Encoded size of buffer. Set by the ingress hardware for iDMA packet | ||
470 | * descriptors. For eDMA descriptors, indicates the buffer size if .c | ||
471 | * indicates a chained packet. If an eDMA descriptor is not chained and | ||
472 | * the .hwb bit is not set, this field is ignored and the size is | ||
473 | * specified by the .xfer_size field. | ||
474 | * 0 = 128 bytes | ||
475 | * 1 = 256 bytes | ||
476 | * 2 = 512 bytes | ||
477 | * 3 = 1024 bytes | ||
478 | * 4 = 1664 bytes | ||
479 | * 5 = 4096 bytes | ||
480 | * 6 = 10368 bytes | ||
481 | * 7 = 16384 bytes | ||
482 | */ | ||
483 | uint_reg_t size : 3; | ||
484 | /** | ||
485 | * Chaining configuration for the buffer. Indicates that an ingress | ||
486 | * packet or egress command is chained across multiple buffers, with each | ||
487 | * buffer's size indicated by the .size field. | ||
488 | */ | ||
489 | uint_reg_t c : 2; | ||
490 | #else /* __BIG_ENDIAN__ */ | ||
491 | uint_reg_t c : 2; | ||
492 | uint_reg_t size : 3; | ||
493 | uint_reg_t hwb : 1; | ||
494 | uint_reg_t __reserved_6 : 1; | ||
495 | uint_reg_t inst : 1; | ||
496 | uint_reg_t __reserved_5 : 3; | ||
497 | uint_reg_t stack_idx : 5; | ||
498 | uint_reg_t __reserved_4 : 6; | ||
499 | int_reg_t va : 42; | ||
500 | #endif | ||
501 | |||
502 | }; | ||
503 | |||
504 | /** Word access */ | ||
505 | uint_reg_t words[8]; | ||
506 | } MPIPE_PDESC_t; | ||
507 | #endif /* !defined(__ASSEMBLER__) */ | ||
508 | |||
509 | #endif /* !defined(__ARCH_MPIPE_SHM_H__) */ | ||
diff --git a/arch/tile/include/arch/mpipe_shm_def.h b/arch/tile/include/arch/mpipe_shm_def.h new file mode 100644 index 000000000000..6124d39c8318 --- /dev/null +++ b/arch/tile/include/arch/mpipe_shm_def.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_MPIPE_SHM_DEF_H__ | ||
18 | #define __ARCH_MPIPE_SHM_DEF_H__ | ||
19 | #define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0 | ||
20 | #define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1 | ||
21 | #define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2 | ||
22 | #define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3 | ||
23 | #endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/trio.h b/arch/tile/include/arch/trio.h new file mode 100644 index 000000000000..d3000a871a21 --- /dev/null +++ b/arch/tile/include/arch/trio.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_H__ | ||
18 | #define __ARCH_TRIO_H__ | ||
19 | |||
20 | #include <arch/abi.h> | ||
21 | #include <arch/trio_def.h> | ||
22 | |||
23 | #ifndef __ASSEMBLER__ | ||
24 | |||
25 | /* | ||
26 | * Tile PIO Region Configuration - CFG Address Format. | ||
27 | * This register describes the address format for PIO accesses when the | ||
28 | * associated region is setup with TYPE=CFG. | ||
29 | */ | ||
30 | |||
31 | __extension__ | ||
32 | typedef union | ||
33 | { | ||
34 | struct | ||
35 | { | ||
36 | #ifndef __BIG_ENDIAN__ | ||
37 | /* Register Address (full byte address). */ | ||
38 | uint_reg_t reg_addr : 12; | ||
39 | /* Function Number */ | ||
40 | uint_reg_t fn : 3; | ||
41 | /* Device Number */ | ||
42 | uint_reg_t dev : 5; | ||
43 | /* BUS Number */ | ||
44 | uint_reg_t bus : 8; | ||
45 | /* Config Type: 0 for access to directly-attached device. 1 otherwise. */ | ||
46 | uint_reg_t type : 1; | ||
47 | /* Reserved. */ | ||
48 | uint_reg_t __reserved_0 : 1; | ||
49 | /* | ||
50 | * MAC select. This must match the configuration in | ||
51 | * TILE_PIO_REGION_SETUP.MAC. | ||
52 | */ | ||
53 | uint_reg_t mac : 2; | ||
54 | /* Reserved. */ | ||
55 | uint_reg_t __reserved_1 : 32; | ||
56 | #else /* __BIG_ENDIAN__ */ | ||
57 | uint_reg_t __reserved_1 : 32; | ||
58 | uint_reg_t mac : 2; | ||
59 | uint_reg_t __reserved_0 : 1; | ||
60 | uint_reg_t type : 1; | ||
61 | uint_reg_t bus : 8; | ||
62 | uint_reg_t dev : 5; | ||
63 | uint_reg_t fn : 3; | ||
64 | uint_reg_t reg_addr : 12; | ||
65 | #endif | ||
66 | }; | ||
67 | |||
68 | uint_reg_t word; | ||
69 | } TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t; | ||
70 | #endif /* !defined(__ASSEMBLER__) */ | ||
71 | |||
72 | #endif /* !defined(__ARCH_TRIO_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h new file mode 100644 index 000000000000..628b045436b8 --- /dev/null +++ b/arch/tile/include/arch/trio_constants.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | |||
16 | #ifndef __ARCH_TRIO_CONSTANTS_H__ | ||
17 | #define __ARCH_TRIO_CONSTANTS_H__ | ||
18 | |||
19 | #define TRIO_NUM_ASIDS 16 | ||
20 | #define TRIO_NUM_TLBS_PER_ASID 16 | ||
21 | |||
22 | #define TRIO_NUM_TPIO_REGIONS 8 | ||
23 | #define TRIO_LOG2_NUM_TPIO_REGIONS 3 | ||
24 | |||
25 | #define TRIO_NUM_MAP_MEM_REGIONS 16 | ||
26 | #define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 | ||
27 | #define TRIO_NUM_MAP_SQ_REGIONS 8 | ||
28 | #define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3 | ||
29 | |||
30 | #define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 | ||
31 | |||
32 | #define TRIO_NUM_PUSH_DMA_RINGS 32 | ||
33 | |||
34 | #define TRIO_NUM_PULL_DMA_RINGS 32 | ||
35 | |||
36 | #endif /* __ARCH_TRIO_CONSTANTS_H__ */ | ||
diff --git a/arch/tile/include/arch/trio_def.h b/arch/tile/include/arch/trio_def.h new file mode 100644 index 000000000000..e80500317dc4 --- /dev/null +++ b/arch/tile/include/arch/trio_def.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_DEF_H__ | ||
18 | #define __ARCH_TRIO_DEF_H__ | ||
19 | #define TRIO_CFG_REGION_ADDR__REG_SHIFT 0 | ||
20 | #define TRIO_CFG_REGION_ADDR__INTFC_SHIFT 16 | ||
21 | #define TRIO_CFG_REGION_ADDR__INTFC_VAL_TRIO 0x0 | ||
22 | #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE 0x1 | ||
23 | #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD 0x2 | ||
24 | #define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED 0x3 | ||
25 | #define TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT 18 | ||
26 | #define TRIO_CFG_REGION_ADDR__PROT_SHIFT 20 | ||
27 | #define TRIO_PIO_REGIONS_ADDR__REGION_SHIFT 32 | ||
28 | #define TRIO_MAP_MEM_REG_INT0 0x1000000000 | ||
29 | #define TRIO_MAP_MEM_REG_INT1 0x1000000008 | ||
30 | #define TRIO_MAP_MEM_REG_INT2 0x1000000010 | ||
31 | #define TRIO_MAP_MEM_REG_INT3 0x1000000018 | ||
32 | #define TRIO_MAP_MEM_REG_INT4 0x1000000020 | ||
33 | #define TRIO_MAP_MEM_REG_INT5 0x1000000028 | ||
34 | #define TRIO_MAP_MEM_REG_INT6 0x1000000030 | ||
35 | #define TRIO_MAP_MEM_REG_INT7 0x1000000038 | ||
36 | #define TRIO_MAP_MEM_LIM__ADDR_SHIFT 12 | ||
37 | #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED 0x0 | ||
38 | #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT 0x1 | ||
39 | #define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD 0x2 | ||
40 | #define TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT 30 | ||
41 | #endif /* !defined(__ARCH_TRIO_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_pcie_intfc.h b/arch/tile/include/arch/trio_pcie_intfc.h new file mode 100644 index 000000000000..0487fdb9d581 --- /dev/null +++ b/arch/tile/include/arch/trio_pcie_intfc.h | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_PCIE_INTFC_H__ | ||
18 | #define __ARCH_TRIO_PCIE_INTFC_H__ | ||
19 | |||
20 | #include <arch/abi.h> | ||
21 | #include <arch/trio_pcie_intfc_def.h> | ||
22 | |||
23 | #ifndef __ASSEMBLER__ | ||
24 | |||
25 | /* | ||
26 | * Port Configuration. | ||
27 | * Configuration of the PCIe Port | ||
28 | */ | ||
29 | |||
30 | __extension__ | ||
31 | typedef union | ||
32 | { | ||
33 | struct | ||
34 | { | ||
35 | #ifndef __BIG_ENDIAN__ | ||
36 | /* Provides the state of the strapping pins for this port. */ | ||
37 | uint_reg_t strap_state : 3; | ||
38 | /* Reserved. */ | ||
39 | uint_reg_t __reserved_0 : 1; | ||
40 | /* | ||
41 | * When 1, the device type will be overridden using OVD_DEV_TYPE_VAL. | ||
42 | * When 0, the device type is determined based on the STRAP_STATE. | ||
43 | */ | ||
44 | uint_reg_t ovd_dev_type : 1; | ||
45 | /* Provides the device type when OVD_DEV_TYPE is 1. */ | ||
46 | uint_reg_t ovd_dev_type_val : 4; | ||
47 | /* Determines how link is trained. */ | ||
48 | uint_reg_t train_mode : 2; | ||
49 | /* Reserved. */ | ||
50 | uint_reg_t __reserved_1 : 1; | ||
51 | /* | ||
52 | * For PCIe, used to flip physical RX lanes that were not properly wired. | ||
53 | * This is not the same as lane reversal which is handled automatically | ||
54 | * during link training. When 0, RX Lane0 must be wired to the link | ||
55 | * partner (either to its Lane0 or it's LaneN). When RX_LANE_FLIP is 1, | ||
56 | * the highest numbered lane for this port becomes Lane0 and Lane0 does | ||
57 | * NOT have to be wired to the link partner. | ||
58 | */ | ||
59 | uint_reg_t rx_lane_flip : 1; | ||
60 | /* | ||
61 | * For PCIe, used to flip physical TX lanes that were not properly wired. | ||
62 | * This is not the same as lane reversal which is handled automatically | ||
63 | * during link training. When 0, TX Lane0 must be wired to the link | ||
64 | * partner (either to its Lane0 or it's LaneN). When TX_LANE_FLIP is 1, | ||
65 | * the highest numbered lane for this port becomes Lane0 and Lane0 does | ||
66 | * NOT have to be wired to the link partner. | ||
67 | */ | ||
68 | uint_reg_t tx_lane_flip : 1; | ||
69 | /* | ||
70 | * For StreamIO port, configures the width of the port when TRAIN_MODE is | ||
71 | * not STRAP. | ||
72 | */ | ||
73 | uint_reg_t stream_width : 2; | ||
74 | /* | ||
75 | * For StreamIO port, configures the rate of the port when TRAIN_MODE is | ||
76 | * not STRAP. | ||
77 | */ | ||
78 | uint_reg_t stream_rate : 2; | ||
79 | /* Reserved. */ | ||
80 | uint_reg_t __reserved_2 : 46; | ||
81 | #else /* __BIG_ENDIAN__ */ | ||
82 | uint_reg_t __reserved_2 : 46; | ||
83 | uint_reg_t stream_rate : 2; | ||
84 | uint_reg_t stream_width : 2; | ||
85 | uint_reg_t tx_lane_flip : 1; | ||
86 | uint_reg_t rx_lane_flip : 1; | ||
87 | uint_reg_t __reserved_1 : 1; | ||
88 | uint_reg_t train_mode : 2; | ||
89 | uint_reg_t ovd_dev_type_val : 4; | ||
90 | uint_reg_t ovd_dev_type : 1; | ||
91 | uint_reg_t __reserved_0 : 1; | ||
92 | uint_reg_t strap_state : 3; | ||
93 | #endif | ||
94 | }; | ||
95 | |||
96 | uint_reg_t word; | ||
97 | } TRIO_PCIE_INTFC_PORT_CONFIG_t; | ||
98 | |||
99 | /* | ||
100 | * Port Status. | ||
101 | * Status of the PCIe Port. This register applies to the StreamIO port when | ||
102 | * StreamIO is enabled. | ||
103 | */ | ||
104 | |||
105 | __extension__ | ||
106 | typedef union | ||
107 | { | ||
108 | struct | ||
109 | { | ||
110 | #ifndef __BIG_ENDIAN__ | ||
111 | /* | ||
112 | * Indicates the DL state of the port. When 1, the port is up and ready | ||
113 | * to receive traffic. | ||
114 | */ | ||
115 | uint_reg_t dl_up : 1; | ||
116 | /* | ||
117 | * Indicates the number of times the link has gone down. Clears on read. | ||
118 | */ | ||
119 | uint_reg_t dl_down_cnt : 7; | ||
120 | /* Indicates the SERDES PLL has spun up and is providing a valid clock. */ | ||
121 | uint_reg_t clock_ready : 1; | ||
122 | /* Reserved. */ | ||
123 | uint_reg_t __reserved_0 : 7; | ||
124 | /* Device revision ID. */ | ||
125 | uint_reg_t device_rev : 8; | ||
126 | /* Link state (PCIe). */ | ||
127 | uint_reg_t ltssm_state : 6; | ||
128 | /* Link power management state (PCIe). */ | ||
129 | uint_reg_t pm_state : 3; | ||
130 | /* Reserved. */ | ||
131 | uint_reg_t __reserved_1 : 31; | ||
132 | #else /* __BIG_ENDIAN__ */ | ||
133 | uint_reg_t __reserved_1 : 31; | ||
134 | uint_reg_t pm_state : 3; | ||
135 | uint_reg_t ltssm_state : 6; | ||
136 | uint_reg_t device_rev : 8; | ||
137 | uint_reg_t __reserved_0 : 7; | ||
138 | uint_reg_t clock_ready : 1; | ||
139 | uint_reg_t dl_down_cnt : 7; | ||
140 | uint_reg_t dl_up : 1; | ||
141 | #endif | ||
142 | }; | ||
143 | |||
144 | uint_reg_t word; | ||
145 | } TRIO_PCIE_INTFC_PORT_STATUS_t; | ||
146 | |||
147 | /* | ||
148 | * Transmit FIFO Control. | ||
149 | * Contains TX FIFO thresholds. These registers are for diagnostics purposes | ||
150 | * only. Changing these values causes undefined behavior. | ||
151 | */ | ||
152 | |||
153 | __extension__ | ||
154 | typedef union | ||
155 | { | ||
156 | struct | ||
157 | { | ||
158 | #ifndef __BIG_ENDIAN__ | ||
159 | /* | ||
160 | * Almost-Empty level for TX0 data. Typically set to at least | ||
161 | * roundup(38.0*M/N) where N=tclk frequency and M=MAC symbol rate in MHz | ||
162 | * for a x4 port (250MHz). | ||
163 | */ | ||
164 | uint_reg_t tx0_data_ae_lvl : 7; | ||
165 | /* Reserved. */ | ||
166 | uint_reg_t __reserved_0 : 1; | ||
167 | /* Almost-Empty level for TX1 data. */ | ||
168 | uint_reg_t tx1_data_ae_lvl : 7; | ||
169 | /* Reserved. */ | ||
170 | uint_reg_t __reserved_1 : 1; | ||
171 | /* Almost-Full level for TX0 data. */ | ||
172 | uint_reg_t tx0_data_af_lvl : 7; | ||
173 | /* Reserved. */ | ||
174 | uint_reg_t __reserved_2 : 1; | ||
175 | /* Almost-Full level for TX1 data. */ | ||
176 | uint_reg_t tx1_data_af_lvl : 7; | ||
177 | /* Reserved. */ | ||
178 | uint_reg_t __reserved_3 : 1; | ||
179 | /* Almost-Full level for TX0 info. */ | ||
180 | uint_reg_t tx0_info_af_lvl : 5; | ||
181 | /* Reserved. */ | ||
182 | uint_reg_t __reserved_4 : 3; | ||
183 | /* Almost-Full level for TX1 info. */ | ||
184 | uint_reg_t tx1_info_af_lvl : 5; | ||
185 | /* Reserved. */ | ||
186 | uint_reg_t __reserved_5 : 3; | ||
187 | /* | ||
188 | * This register provides performance adjustment for high bandwidth | ||
189 | * flows. The MAC will assert almost-full to TRIO if non-posted credits | ||
190 | * fall below this level. Note that setting this larger than the initial | ||
191 | * PORT_CREDIT.NPH value will cause READS to never be sent. If the | ||
192 | * initial credit value from the link partner is smaller than this value | ||
193 | * when the link comes up, the value will be reset to the initial credit | ||
194 | * value to prevent lockup. | ||
195 | */ | ||
196 | uint_reg_t min_np_credits : 8; | ||
197 | /* | ||
198 | * This register provides performance adjustment for high bandwidth | ||
199 | * flows. The MAC will assert almost-full to TRIO if posted credits fall | ||
200 | * below this level. Note that setting this larger than the initial | ||
201 | * PORT_CREDIT.PH value will cause WRITES to never be sent. If the | ||
202 | * initial credit value from the link partner is smaller than this value | ||
203 | * when the link comes up, the value will be reset to the initial credit | ||
204 | * value to prevent lockup. | ||
205 | */ | ||
206 | uint_reg_t min_p_credits : 8; | ||
207 | #else /* __BIG_ENDIAN__ */ | ||
208 | uint_reg_t min_p_credits : 8; | ||
209 | uint_reg_t min_np_credits : 8; | ||
210 | uint_reg_t __reserved_5 : 3; | ||
211 | uint_reg_t tx1_info_af_lvl : 5; | ||
212 | uint_reg_t __reserved_4 : 3; | ||
213 | uint_reg_t tx0_info_af_lvl : 5; | ||
214 | uint_reg_t __reserved_3 : 1; | ||
215 | uint_reg_t tx1_data_af_lvl : 7; | ||
216 | uint_reg_t __reserved_2 : 1; | ||
217 | uint_reg_t tx0_data_af_lvl : 7; | ||
218 | uint_reg_t __reserved_1 : 1; | ||
219 | uint_reg_t tx1_data_ae_lvl : 7; | ||
220 | uint_reg_t __reserved_0 : 1; | ||
221 | uint_reg_t tx0_data_ae_lvl : 7; | ||
222 | #endif | ||
223 | }; | ||
224 | |||
225 | uint_reg_t word; | ||
226 | } TRIO_PCIE_INTFC_TX_FIFO_CTL_t; | ||
227 | #endif /* !defined(__ASSEMBLER__) */ | ||
228 | |||
229 | #endif /* !defined(__ARCH_TRIO_PCIE_INTFC_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_pcie_intfc_def.h b/arch/tile/include/arch/trio_pcie_intfc_def.h new file mode 100644 index 000000000000..d3fd6781fb24 --- /dev/null +++ b/arch/tile/include/arch/trio_pcie_intfc_def.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_PCIE_INTFC_DEF_H__ | ||
18 | #define __ARCH_TRIO_PCIE_INTFC_DEF_H__ | ||
19 | #define TRIO_PCIE_INTFC_MAC_INT_STS 0x0000 | ||
20 | #define TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK 0xf000 | ||
21 | #define TRIO_PCIE_INTFC_PORT_CONFIG 0x0018 | ||
22 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_DISABLED 0x0 | ||
23 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT 0x1 | ||
24 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC 0x2 | ||
25 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 0x3 | ||
26 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 0x4 | ||
27 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_XLINK 0x5 | ||
28 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X1 0x6 | ||
29 | #define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X4 0x7 | ||
30 | #define TRIO_PCIE_INTFC_PORT_STATUS 0x0020 | ||
31 | #define TRIO_PCIE_INTFC_TX_FIFO_CTL 0x0050 | ||
32 | #endif /* !defined(__ARCH_TRIO_PCIE_INTFC_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_pcie_rc.h b/arch/tile/include/arch/trio_pcie_rc.h new file mode 100644 index 000000000000..6a25d0aca857 --- /dev/null +++ b/arch/tile/include/arch/trio_pcie_rc.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_PCIE_RC_H__ | ||
18 | #define __ARCH_TRIO_PCIE_RC_H__ | ||
19 | |||
20 | #include <arch/abi.h> | ||
21 | #include <arch/trio_pcie_rc_def.h> | ||
22 | |||
23 | #ifndef __ASSEMBLER__ | ||
24 | |||
25 | /* Device Capabilities Register. */ | ||
26 | |||
27 | __extension__ | ||
28 | typedef union | ||
29 | { | ||
30 | struct | ||
31 | { | ||
32 | #ifndef __BIG_ENDIAN__ | ||
33 | /* | ||
34 | * Max_Payload_Size Supported, writablethrough the MAC_STANDARD interface | ||
35 | */ | ||
36 | uint_reg_t mps_sup : 3; | ||
37 | /* | ||
38 | * This field is writable through the MAC_STANDARD interface. However, | ||
39 | * Phantom Function is not supported. Therefore, the application must | ||
40 | * not write any value other than 0x0 to this field. | ||
41 | */ | ||
42 | uint_reg_t phantom_function_supported : 2; | ||
43 | /* This bit is writable through the MAC_STANDARD interface. */ | ||
44 | uint_reg_t ext_tag_field_supported : 1; | ||
45 | /* Reserved. */ | ||
46 | uint_reg_t __reserved_0 : 3; | ||
47 | /* Endpoint L1 Acceptable Latency Must be 0x0 for non-Endpoint devices. */ | ||
48 | uint_reg_t l1_lat : 3; | ||
49 | /* | ||
50 | * Undefined since PCI Express 1.1 (Was Attention Button Present for PCI | ||
51 | * Express 1.0a) | ||
52 | */ | ||
53 | uint_reg_t r1 : 1; | ||
54 | /* | ||
55 | * Undefined since PCI Express 1.1 (Was Attention Indicator Present for | ||
56 | * PCI Express 1.0a) | ||
57 | */ | ||
58 | uint_reg_t r2 : 1; | ||
59 | /* | ||
60 | * Undefined since PCI Express 1.1 (Was Power Indicator Present for PCI | ||
61 | * Express 1.0a) | ||
62 | */ | ||
63 | uint_reg_t r3 : 1; | ||
64 | /* | ||
65 | * Role-Based Error Reporting, writable through the MAC_STANDARD | ||
66 | * interface. Required to be set for device compliant to 1.1 spec and | ||
67 | * later. | ||
68 | */ | ||
69 | uint_reg_t rer : 1; | ||
70 | /* Reserved. */ | ||
71 | uint_reg_t __reserved_1 : 2; | ||
72 | /* Captured Slot Power Limit Value Upstream port only. */ | ||
73 | uint_reg_t slot_pwr_lim : 8; | ||
74 | /* Captured Slot Power Limit Scale Upstream port only. */ | ||
75 | uint_reg_t slot_pwr_scale : 2; | ||
76 | /* Reserved. */ | ||
77 | uint_reg_t __reserved_2 : 4; | ||
78 | /* Endpoint L0s Acceptable LatencyMust be 0x0 for non-Endpoint devices. */ | ||
79 | uint_reg_t l0s_lat : 1; | ||
80 | /* Reserved. */ | ||
81 | uint_reg_t __reserved_3 : 31; | ||
82 | #else /* __BIG_ENDIAN__ */ | ||
83 | uint_reg_t __reserved_3 : 31; | ||
84 | uint_reg_t l0s_lat : 1; | ||
85 | uint_reg_t __reserved_2 : 4; | ||
86 | uint_reg_t slot_pwr_scale : 2; | ||
87 | uint_reg_t slot_pwr_lim : 8; | ||
88 | uint_reg_t __reserved_1 : 2; | ||
89 | uint_reg_t rer : 1; | ||
90 | uint_reg_t r3 : 1; | ||
91 | uint_reg_t r2 : 1; | ||
92 | uint_reg_t r1 : 1; | ||
93 | uint_reg_t l1_lat : 3; | ||
94 | uint_reg_t __reserved_0 : 3; | ||
95 | uint_reg_t ext_tag_field_supported : 1; | ||
96 | uint_reg_t phantom_function_supported : 2; | ||
97 | uint_reg_t mps_sup : 3; | ||
98 | #endif | ||
99 | }; | ||
100 | |||
101 | uint_reg_t word; | ||
102 | } TRIO_PCIE_RC_DEVICE_CAP_t; | ||
103 | |||
104 | /* Device Control Register. */ | ||
105 | |||
106 | __extension__ | ||
107 | typedef union | ||
108 | { | ||
109 | struct | ||
110 | { | ||
111 | #ifndef __BIG_ENDIAN__ | ||
112 | /* Correctable Error Reporting Enable */ | ||
113 | uint_reg_t cor_err_ena : 1; | ||
114 | /* Non-Fatal Error Reporting Enable */ | ||
115 | uint_reg_t nf_err_ena : 1; | ||
116 | /* Fatal Error Reporting Enable */ | ||
117 | uint_reg_t fatal_err_ena : 1; | ||
118 | /* Unsupported Request Reporting Enable */ | ||
119 | uint_reg_t ur_ena : 1; | ||
120 | /* Relaxed orderring enable */ | ||
121 | uint_reg_t ro_ena : 1; | ||
122 | /* Max Payload Size */ | ||
123 | uint_reg_t max_payload_size : 3; | ||
124 | /* Extended Tag Field Enable */ | ||
125 | uint_reg_t ext_tag : 1; | ||
126 | /* Phantom Function Enable */ | ||
127 | uint_reg_t ph_fn_ena : 1; | ||
128 | /* AUX Power PM Enable */ | ||
129 | uint_reg_t aux_pm_ena : 1; | ||
130 | /* Enable NoSnoop */ | ||
131 | uint_reg_t no_snoop : 1; | ||
132 | /* Max read request size */ | ||
133 | uint_reg_t max_read_req_sz : 3; | ||
134 | /* Reserved. */ | ||
135 | uint_reg_t __reserved : 49; | ||
136 | #else /* __BIG_ENDIAN__ */ | ||
137 | uint_reg_t __reserved : 49; | ||
138 | uint_reg_t max_read_req_sz : 3; | ||
139 | uint_reg_t no_snoop : 1; | ||
140 | uint_reg_t aux_pm_ena : 1; | ||
141 | uint_reg_t ph_fn_ena : 1; | ||
142 | uint_reg_t ext_tag : 1; | ||
143 | uint_reg_t max_payload_size : 3; | ||
144 | uint_reg_t ro_ena : 1; | ||
145 | uint_reg_t ur_ena : 1; | ||
146 | uint_reg_t fatal_err_ena : 1; | ||
147 | uint_reg_t nf_err_ena : 1; | ||
148 | uint_reg_t cor_err_ena : 1; | ||
149 | #endif | ||
150 | }; | ||
151 | |||
152 | uint_reg_t word; | ||
153 | } TRIO_PCIE_RC_DEVICE_CONTROL_t; | ||
154 | #endif /* !defined(__ASSEMBLER__) */ | ||
155 | |||
156 | #endif /* !defined(__ARCH_TRIO_PCIE_RC_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_pcie_rc_def.h b/arch/tile/include/arch/trio_pcie_rc_def.h new file mode 100644 index 000000000000..74081a65b6f2 --- /dev/null +++ b/arch/tile/include/arch/trio_pcie_rc_def.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_PCIE_RC_DEF_H__ | ||
18 | #define __ARCH_TRIO_PCIE_RC_DEF_H__ | ||
19 | #define TRIO_PCIE_RC_DEVICE_CAP 0x0074 | ||
20 | #define TRIO_PCIE_RC_DEVICE_CONTROL 0x0078 | ||
21 | #define TRIO_PCIE_RC_DEVICE_ID_VEN_ID 0x0000 | ||
22 | #define TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT 16 | ||
23 | #define TRIO_PCIE_RC_REVISION_ID 0x0008 | ||
24 | #endif /* !defined(__ARCH_TRIO_PCIE_RC_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_shm.h b/arch/tile/include/arch/trio_shm.h new file mode 100644 index 000000000000..3382e38245af --- /dev/null +++ b/arch/tile/include/arch/trio_shm.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | |||
18 | #ifndef __ARCH_TRIO_SHM_H__ | ||
19 | #define __ARCH_TRIO_SHM_H__ | ||
20 | |||
21 | #include <arch/abi.h> | ||
22 | #include <arch/trio_shm_def.h> | ||
23 | |||
24 | #ifndef __ASSEMBLER__ | ||
25 | /** | ||
26 | * TRIO DMA Descriptor. | ||
27 | * The TRIO DMA descriptor is written by software and consumed by hardware. | ||
28 | * It is used to specify the location of transaction data in the IO and Tile | ||
29 | * domains. | ||
30 | */ | ||
31 | |||
32 | __extension__ | ||
33 | typedef union | ||
34 | { | ||
35 | struct | ||
36 | { | ||
37 | /* Word 0 */ | ||
38 | |||
39 | #ifndef __BIG_ENDIAN__ | ||
40 | /** Tile side virtual address. */ | ||
41 | int_reg_t va : 42; | ||
42 | /** | ||
43 | * Encoded size of buffer used on push DMA when C=1: | ||
44 | * 0 = 128 bytes | ||
45 | * 1 = 256 bytes | ||
46 | * 2 = 512 bytes | ||
47 | * 3 = 1024 bytes | ||
48 | * 4 = 1664 bytes | ||
49 | * 5 = 4096 bytes | ||
50 | * 6 = 10368 bytes | ||
51 | * 7 = 16384 bytes | ||
52 | */ | ||
53 | uint_reg_t bsz : 3; | ||
54 | /** | ||
55 | * Chaining designation. Always zero for pull DMA | ||
56 | * 0 : Unchained buffer pointer | ||
57 | * 1 : Chained buffer pointer. Next buffer descriptor (e.g. VA) stored | ||
58 | * in 1st 8-bytes in buffer. For chained buffers, first 8-bytes of each | ||
59 | * buffer contain the next buffer descriptor formatted exactly like a PDE | ||
60 | * buffer descriptor. This allows a chained PDE buffer to be sent using | ||
61 | * push DMA. | ||
62 | */ | ||
63 | uint_reg_t c : 1; | ||
64 | /** | ||
65 | * Notification interrupt will be delivered when the transaction has | ||
66 | * completed (all data has been read from or written to the Tile-side | ||
67 | * buffer). | ||
68 | */ | ||
69 | uint_reg_t notif : 1; | ||
70 | /** | ||
71 | * When 0, the XSIZE field specifies the total byte count for the | ||
72 | * transaction. When 1, the XSIZE field is encoded as 2^(N+14) for N in | ||
73 | * {0..6}: | ||
74 | * 0 = 16KB | ||
75 | * 1 = 32KB | ||
76 | * 2 = 64KB | ||
77 | * 3 = 128KB | ||
78 | * 4 = 256KB | ||
79 | * 5 = 512KB | ||
80 | * 6 = 1MB | ||
81 | * All other encodings of the XSIZE field are reserved when SMOD=1 | ||
82 | */ | ||
83 | uint_reg_t smod : 1; | ||
84 | /** | ||
85 | * Total number of bytes to move for this transaction. When SMOD=1, | ||
86 | * this field is encoded - see SMOD description. | ||
87 | */ | ||
88 | uint_reg_t xsize : 14; | ||
89 | /** Reserved. */ | ||
90 | uint_reg_t __reserved_0 : 1; | ||
91 | /** | ||
92 | * Generation number. Used to indicate a valid descriptor in ring. When | ||
93 | * a new descriptor is written into the ring, software must toggle this | ||
94 | * bit. The net effect is that the GEN bit being written into new | ||
95 | * descriptors toggles each time the ring tail pointer wraps. | ||
96 | */ | ||
97 | uint_reg_t gen : 1; | ||
98 | #else /* __BIG_ENDIAN__ */ | ||
99 | uint_reg_t gen : 1; | ||
100 | uint_reg_t __reserved_0 : 1; | ||
101 | uint_reg_t xsize : 14; | ||
102 | uint_reg_t smod : 1; | ||
103 | uint_reg_t notif : 1; | ||
104 | uint_reg_t c : 1; | ||
105 | uint_reg_t bsz : 3; | ||
106 | int_reg_t va : 42; | ||
107 | #endif | ||
108 | |||
109 | /* Word 1 */ | ||
110 | |||
111 | #ifndef __BIG_ENDIAN__ | ||
112 | /** IO-side address */ | ||
113 | uint_reg_t io_address : 64; | ||
114 | #else /* __BIG_ENDIAN__ */ | ||
115 | uint_reg_t io_address : 64; | ||
116 | #endif | ||
117 | |||
118 | }; | ||
119 | |||
120 | /** Word access */ | ||
121 | uint_reg_t words[2]; | ||
122 | } TRIO_DMA_DESC_t; | ||
123 | #endif /* !defined(__ASSEMBLER__) */ | ||
124 | |||
125 | #endif /* !defined(__ARCH_TRIO_SHM_H__) */ | ||
diff --git a/arch/tile/include/arch/trio_shm_def.h b/arch/tile/include/arch/trio_shm_def.h new file mode 100644 index 000000000000..72a59c88b06a --- /dev/null +++ b/arch/tile/include/arch/trio_shm_def.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_TRIO_SHM_DEF_H__ | ||
18 | #define __ARCH_TRIO_SHM_DEF_H__ | ||
19 | #endif /* !defined(__ARCH_TRIO_SHM_DEF_H__) */ | ||
diff --git a/arch/tile/include/arch/usb_host.h b/arch/tile/include/arch/usb_host.h new file mode 100644 index 000000000000..d09f32683962 --- /dev/null +++ b/arch/tile/include/arch/usb_host.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_USB_HOST_H__ | ||
18 | #define __ARCH_USB_HOST_H__ | ||
19 | |||
20 | #include <arch/abi.h> | ||
21 | #include <arch/usb_host_def.h> | ||
22 | |||
23 | #ifndef __ASSEMBLER__ | ||
24 | #endif /* !defined(__ASSEMBLER__) */ | ||
25 | |||
26 | #endif /* !defined(__ARCH_USB_HOST_H__) */ | ||
diff --git a/arch/tile/include/arch/usb_host_def.h b/arch/tile/include/arch/usb_host_def.h new file mode 100644 index 000000000000..aeed7753e8e1 --- /dev/null +++ b/arch/tile/include/arch/usb_host_def.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Machine-generated file; do not edit. */ | ||
16 | |||
17 | #ifndef __ARCH_USB_HOST_DEF_H__ | ||
18 | #define __ARCH_USB_HOST_DEF_H__ | ||
19 | #endif /* !defined(__ARCH_USB_HOST_DEF_H__) */ | ||
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 143473e3a0bb..fb7c65ae8de0 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild | |||
@@ -9,7 +9,6 @@ header-y += hardwall.h | |||
9 | generic-y += bug.h | 9 | generic-y += bug.h |
10 | generic-y += bugs.h | 10 | generic-y += bugs.h |
11 | generic-y += cputime.h | 11 | generic-y += cputime.h |
12 | generic-y += device.h | ||
13 | generic-y += div64.h | 12 | generic-y += div64.h |
14 | generic-y += emergency-restart.h | 13 | generic-y += emergency-restart.h |
15 | generic-y += errno.h | 14 | generic-y += errno.h |
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 392e5333dd8b..a9a529964e07 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h | |||
@@ -27,11 +27,17 @@ | |||
27 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | 27 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN. | 30 | * TILEPro I/O is not always coherent (networking typically uses coherent |
31 | * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the | ||
32 | * L2 cacheline size helps ensure that kernel heap allocations are aligned. | ||
33 | * TILE-Gx I/O is always coherent when used on hash-for-home pages. | ||
34 | * | ||
35 | * However, it's possible at runtime to request not to use hash-for-home | ||
36 | * for the kernel heap, in which case the kernel will use flush-and-inval | ||
37 | * to manage coherence. As a result, we use L2_CACHE_BYTES for the | ||
38 | * DMA minimum alignment to avoid false sharing in the kernel heap. | ||
31 | */ | 39 | */ |
32 | #ifndef __tilegx__ | ||
33 | #define ARCH_DMA_MINALIGN L2_CACHE_BYTES | 40 | #define ARCH_DMA_MINALIGN L2_CACHE_BYTES |
34 | #endif | ||
35 | 41 | ||
36 | /* use the cache line size for the L2, which is where it counts */ | 42 | /* use the cache line size for the L2, which is where it counts */ |
37 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT | 43 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT |
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h index a120766c7264..b21a2fdec9f7 100644 --- a/arch/tile/include/asm/checksum.h +++ b/arch/tile/include/asm/checksum.h | |||
@@ -21,4 +21,22 @@ | |||
21 | __wsum do_csum(const unsigned char *buff, int len); | 21 | __wsum do_csum(const unsigned char *buff, int len); |
22 | #define do_csum do_csum | 22 | #define do_csum do_csum |
23 | 23 | ||
24 | /* | ||
25 | * Return the sum of all the 16-bit subwords in a long. | ||
26 | * This sums two subwords on a 32-bit machine, and four on 64 bits. | ||
27 | * The implementation does two vector adds to capture any overflow. | ||
28 | */ | ||
29 | static inline unsigned int csum_long(unsigned long x) | ||
30 | { | ||
31 | unsigned long ret; | ||
32 | #ifdef __tilegx__ | ||
33 | ret = __insn_v2sadu(x, 0); | ||
34 | ret = __insn_v2sadu(ret, 0); | ||
35 | #else | ||
36 | ret = __insn_sadh_u(x, 0); | ||
37 | ret = __insn_sadh_u(ret, 0); | ||
38 | #endif | ||
39 | return ret; | ||
40 | } | ||
41 | |||
24 | #endif /* _ASM_TILE_CHECKSUM_H */ | 42 | #endif /* _ASM_TILE_CHECKSUM_H */ |
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h new file mode 100644 index 000000000000..5182705bd056 --- /dev/null +++ b/arch/tile/include/asm/device.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * Arch specific extensions to struct device | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_DEVICE_H | ||
17 | #define _ASM_TILE_DEVICE_H | ||
18 | |||
19 | struct dev_archdata { | ||
20 | /* DMA operations on that device */ | ||
21 | struct dma_map_ops *dma_ops; | ||
22 | |||
23 | /* Offset of the DMA address from the PA. */ | ||
24 | dma_addr_t dma_offset; | ||
25 | |||
26 | /* Highest DMA address that can be generated by this device. */ | ||
27 | dma_addr_t max_direct_dma_addr; | ||
28 | }; | ||
29 | |||
30 | struct pdev_archdata { | ||
31 | }; | ||
32 | |||
33 | #endif /* _ASM_TILE_DEVICE_H */ | ||
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index eaa06d175b39..4b6247d1a315 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -20,69 +20,80 @@ | |||
20 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | 22 | ||
23 | /* | 23 | extern struct dma_map_ops *tile_dma_map_ops; |
24 | * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | 24 | extern struct dma_map_ops *gx_pci_dma_map_ops; |
25 | * that is used for all the DMA operations. For now, we don't have an | 25 | extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; |
26 | * equivalent on tile, because we only have a single way of doing DMA. | 26 | |
27 | * (Tilera bug 7994 to use dma_mapping_ops.) | 27 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
28 | */ | 28 | { |
29 | if (dev && dev->archdata.dma_ops) | ||
30 | return dev->archdata.dma_ops; | ||
31 | else | ||
32 | return tile_dma_map_ops; | ||
33 | } | ||
34 | |||
35 | static inline dma_addr_t get_dma_offset(struct device *dev) | ||
36 | { | ||
37 | return dev->archdata.dma_offset; | ||
38 | } | ||
39 | |||
40 | static inline void set_dma_offset(struct device *dev, dma_addr_t off) | ||
41 | { | ||
42 | dev->archdata.dma_offset = off; | ||
43 | } | ||
29 | 44 | ||
30 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 45 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
31 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 46 | { |
32 | 47 | return paddr + get_dma_offset(dev); | |
33 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 48 | } |
34 | enum dma_data_direction); | 49 | |
35 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | 50 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) |
36 | size_t size, enum dma_data_direction); | 51 | { |
37 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 52 | return daddr - get_dma_offset(dev); |
38 | enum dma_data_direction); | 53 | } |
39 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 54 | |
40 | int nhwentries, enum dma_data_direction); | 55 | static inline void dma_mark_clean(void *addr, size_t size) {} |
41 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | 56 | |
42 | unsigned long offset, size_t size, | 57 | #include <asm-generic/dma-mapping-common.h> |
43 | enum dma_data_direction); | 58 | |
44 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 59 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
45 | size_t size, enum dma_data_direction); | 60 | { |
46 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 61 | dev->archdata.dma_ops = ops; |
47 | int nelems, enum dma_data_direction); | 62 | } |
48 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 63 | |
49 | int nelems, enum dma_data_direction); | 64 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
50 | 65 | { | |
51 | 66 | if (!dev->dma_mask) | |
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | 67 | return 0; |
53 | dma_addr_t *dma_handle, gfp_t flag); | 68 | |
54 | 69 | return addr + size - 1 <= *dev->dma_mask; | |
55 | void dma_free_coherent(struct device *dev, size_t size, | 70 | } |
56 | void *vaddr, dma_addr_t dma_handle); | ||
57 | |||
58 | extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||
59 | enum dma_data_direction); | ||
60 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||
61 | size_t, enum dma_data_direction); | ||
62 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||
63 | unsigned long offset, size_t, | ||
64 | enum dma_data_direction); | ||
65 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||
66 | unsigned long offset, size_t, | ||
67 | enum dma_data_direction); | ||
68 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t, | ||
69 | enum dma_data_direction); | ||
70 | 71 | ||
71 | static inline int | 72 | static inline int |
72 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 73 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
73 | { | 74 | { |
74 | return 0; | 75 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); |
75 | } | 76 | } |
76 | 77 | ||
77 | static inline int | 78 | static inline int |
78 | dma_supported(struct device *dev, u64 mask) | 79 | dma_supported(struct device *dev, u64 mask) |
79 | { | 80 | { |
80 | return 1; | 81 | return get_dma_ops(dev)->dma_supported(dev, mask); |
81 | } | 82 | } |
82 | 83 | ||
83 | static inline int | 84 | static inline int |
84 | dma_set_mask(struct device *dev, u64 mask) | 85 | dma_set_mask(struct device *dev, u64 mask) |
85 | { | 86 | { |
87 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
88 | |||
89 | /* Handle legacy PCI devices with limited memory addressability. */ | ||
90 | if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) { | ||
91 | set_dma_ops(dev, gx_legacy_pci_dma_map_ops); | ||
92 | set_dma_offset(dev, 0); | ||
93 | if (mask > dev->archdata.max_direct_dma_addr) | ||
94 | mask = dev->archdata.max_direct_dma_addr; | ||
95 | } | ||
96 | |||
86 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 97 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
87 | return -EIO; | 98 | return -EIO; |
88 | 99 | ||
@@ -91,4 +102,43 @@ dma_set_mask(struct device *dev, u64 mask) | |||
91 | return 0; | 102 | return 0; |
92 | } | 103 | } |
93 | 104 | ||
105 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
106 | dma_addr_t *dma_handle, gfp_t flag, | ||
107 | struct dma_attrs *attrs) | ||
108 | { | ||
109 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
110 | void *cpu_addr; | ||
111 | |||
112 | cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); | ||
113 | |||
114 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
115 | |||
116 | return cpu_addr; | ||
117 | } | ||
118 | |||
119 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
120 | void *cpu_addr, dma_addr_t dma_handle, | ||
121 | struct dma_attrs *attrs) | ||
122 | { | ||
123 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
124 | |||
125 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
126 | |||
127 | dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
128 | } | ||
129 | |||
130 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) | ||
131 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) | ||
132 | #define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) | ||
133 | #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) | ||
134 | |||
135 | /* | ||
136 | * dma_alloc_noncoherent() is #defined to return coherent memory, | ||
137 | * so there's no need to do any flushing here. | ||
138 | */ | ||
139 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
140 | enum dma_data_direction direction) | ||
141 | { | ||
142 | } | ||
143 | |||
94 | #endif /* _ASM_TILE_DMA_MAPPING_H */ | 144 | #endif /* _ASM_TILE_DMA_MAPPING_H */ |
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h index c66f7933beaa..e16dbf929cb5 100644 --- a/arch/tile/include/asm/fixmap.h +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -45,15 +45,23 @@ | |||
45 | * | 45 | * |
46 | * TLB entries of such buffers will not be flushed across | 46 | * TLB entries of such buffers will not be flushed across |
47 | * task switches. | 47 | * task switches. |
48 | * | ||
49 | * We don't bother with a FIX_HOLE since above the fixmaps | ||
50 | * is unmapped memory in any case. | ||
51 | */ | 48 | */ |
52 | enum fixed_addresses { | 49 | enum fixed_addresses { |
50 | #ifdef __tilegx__ | ||
51 | /* | ||
52 | * TILEPro has unmapped memory above so the hole isn't needed, | ||
53 | * and in any case the hole pushes us over a single 16MB pmd. | ||
54 | */ | ||
55 | FIX_HOLE, | ||
56 | #endif | ||
53 | #ifdef CONFIG_HIGHMEM | 57 | #ifdef CONFIG_HIGHMEM |
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 58 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
55 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | 59 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
56 | #endif | 60 | #endif |
61 | #ifdef __tilegx__ /* see homecache.c */ | ||
62 | FIX_HOMECACHE_BEGIN, | ||
63 | FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1, | ||
64 | #endif | ||
57 | __end_of_permanent_fixed_addresses, | 65 | __end_of_permanent_fixed_addresses, |
58 | 66 | ||
59 | /* | 67 | /* |
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h index a8243865d49e..7b7771328642 100644 --- a/arch/tile/include/asm/homecache.h +++ b/arch/tile/include/asm/homecache.h | |||
@@ -79,10 +79,17 @@ extern void homecache_change_page_home(struct page *, int order, int home); | |||
79 | /* | 79 | /* |
80 | * Flush a page out of whatever cache(s) it is in. | 80 | * Flush a page out of whatever cache(s) it is in. |
81 | * This is more than just finv, since it properly handles waiting | 81 | * This is more than just finv, since it properly handles waiting |
82 | * for the data to reach memory on tilepro, but it can be quite | 82 | * for the data to reach memory, but it can be quite |
83 | * heavyweight, particularly on hash-for-home memory. | 83 | * heavyweight, particularly on incoherent or immutable memory. |
84 | */ | 84 | */ |
85 | extern void homecache_flush_cache(struct page *, int order); | 85 | extern void homecache_finv_page(struct page *); |
86 | |||
87 | /* | ||
88 | * Flush a page out of the specified home cache. | ||
89 | * Note that the specified home need not be the actual home of the page, | ||
90 | * as for example might be the case when coordinating with I/O devices. | ||
91 | */ | ||
92 | extern void homecache_finv_map_page(struct page *, int home); | ||
86 | 93 | ||
87 | /* | 94 | /* |
88 | * Allocate a page with the given GFP flags, home, and optionally | 95 | * Allocate a page with the given GFP flags, home, and optionally |
@@ -104,10 +111,10 @@ extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | |||
104 | * routines use homecache_change_page_home() to reset the home | 111 | * routines use homecache_change_page_home() to reset the home |
105 | * back to the default before returning the page to the allocator. | 112 | * back to the default before returning the page to the allocator. |
106 | */ | 113 | */ |
114 | void __homecache_free_pages(struct page *, unsigned int order); | ||
107 | void homecache_free_pages(unsigned long addr, unsigned int order); | 115 | void homecache_free_pages(unsigned long addr, unsigned int order); |
108 | #define homecache_free_page(page) \ | 116 | #define __homecache_free_page(page) __homecache_free_pages((page), 0) |
109 | homecache_free_pages((page), 0) | 117 | #define homecache_free_page(page) homecache_free_pages((page), 0) |
110 | |||
111 | 118 | ||
112 | 119 | ||
113 | /* | 120 | /* |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index d2152deb1f3c..2a9b293fece6 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -62,6 +62,92 @@ extern void iounmap(volatile void __iomem *addr); | |||
62 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) | 62 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) |
63 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) | 63 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) |
64 | 64 | ||
65 | #if CHIP_HAS_MMIO() | ||
66 | |||
67 | /* | ||
68 | * We use inline assembly to guarantee that the compiler does not | ||
69 | * split an access into multiple byte-sized accesses as it might | ||
70 | * sometimes do if a register data structure is marked "packed". | ||
71 | * Obviously on tile we can't tolerate such an access being | ||
72 | * actually unaligned, but we want to avoid the case where the | ||
73 | * compiler conservatively would generate multiple accesses even | ||
74 | * for an aligned read or write. | ||
75 | */ | ||
76 | |||
77 | static inline u8 __raw_readb(const volatile void __iomem *addr) | ||
78 | { | ||
79 | return *(const volatile u8 __force *)addr; | ||
80 | } | ||
81 | |||
82 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
83 | { | ||
84 | u16 ret; | ||
85 | asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr)); | ||
86 | barrier(); | ||
87 | return le16_to_cpu(ret); | ||
88 | } | ||
89 | |||
90 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
91 | { | ||
92 | u32 ret; | ||
93 | /* Sign-extend to conform to u32 ABI sign-extension convention. */ | ||
94 | asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr)); | ||
95 | barrier(); | ||
96 | return le32_to_cpu(ret); | ||
97 | } | ||
98 | |||
99 | static inline u64 __raw_readq(const volatile void __iomem *addr) | ||
100 | { | ||
101 | u64 ret; | ||
102 | asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr)); | ||
103 | barrier(); | ||
104 | return le64_to_cpu(ret); | ||
105 | } | ||
106 | |||
107 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | ||
108 | { | ||
109 | *(volatile u8 __force *)addr = val; | ||
110 | } | ||
111 | |||
112 | static inline void __raw_writew(u16 val, volatile void __iomem *addr) | ||
113 | { | ||
114 | asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val))); | ||
115 | } | ||
116 | |||
117 | static inline void __raw_writel(u32 val, volatile void __iomem *addr) | ||
118 | { | ||
119 | asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val))); | ||
120 | } | ||
121 | |||
122 | static inline void __raw_writeq(u64 val, volatile void __iomem *addr) | ||
123 | { | ||
124 | asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val))); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * The on-chip I/O hardware on tilegx is configured with VA=PA for the | ||
129 | * kernel's PA range. The low-level APIs and field names use "va" and | ||
130 | * "void *" nomenclature, to be consistent with the general notion | ||
131 | * that the addresses in question are virtualizable, but in the kernel | ||
132 | * context we are actually manipulating PA values. (In other contexts, | ||
133 | * e.g. access from user space, we do in fact use real virtual addresses | ||
134 | * in the va fields.) To allow readers of the code to understand what's | ||
135 | * happening, we direct their attention to this comment by using the | ||
136 | * following two functions that just duplicate __va() and __pa(). | ||
137 | */ | ||
138 | typedef unsigned long tile_io_addr_t; | ||
139 | static inline tile_io_addr_t va_to_tile_io_addr(void *va) | ||
140 | { | ||
141 | BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t)); | ||
142 | return __pa(va); | ||
143 | } | ||
144 | static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr) | ||
145 | { | ||
146 | return __va(tile_io_addr); | ||
147 | } | ||
148 | |||
149 | #else /* CHIP_HAS_MMIO() */ | ||
150 | |||
65 | #ifdef CONFIG_PCI | 151 | #ifdef CONFIG_PCI |
66 | 152 | ||
67 | extern u8 _tile_readb(unsigned long addr); | 153 | extern u8 _tile_readb(unsigned long addr); |
@@ -73,10 +159,19 @@ extern void _tile_writew(u16 val, unsigned long addr); | |||
73 | extern void _tile_writel(u32 val, unsigned long addr); | 159 | extern void _tile_writel(u32 val, unsigned long addr); |
74 | extern void _tile_writeq(u64 val, unsigned long addr); | 160 | extern void _tile_writeq(u64 val, unsigned long addr); |
75 | 161 | ||
76 | #else | 162 | #define __raw_readb(addr) _tile_readb((unsigned long)addr) |
163 | #define __raw_readw(addr) _tile_readw((unsigned long)addr) | ||
164 | #define __raw_readl(addr) _tile_readl((unsigned long)addr) | ||
165 | #define __raw_readq(addr) _tile_readq((unsigned long)addr) | ||
166 | #define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||
167 | #define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||
168 | #define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||
169 | #define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||
170 | |||
171 | #else /* CONFIG_PCI */ | ||
77 | 172 | ||
78 | /* | 173 | /* |
79 | * The Tile architecture does not support IOMEM unless PCI is enabled. | 174 | * The tilepro architecture does not support IOMEM unless PCI is enabled. |
80 | * Unfortunately we can't yet simply not declare these methods, | 175 | * Unfortunately we can't yet simply not declare these methods, |
81 | * since some generic code that compiles into the kernel, but | 176 | * since some generic code that compiles into the kernel, but |
82 | * we never run, uses them unconditionally. | 177 | * we never run, uses them unconditionally. |
@@ -88,65 +183,58 @@ static inline int iomem_panic(void) | |||
88 | return 0; | 183 | return 0; |
89 | } | 184 | } |
90 | 185 | ||
91 | static inline u8 _tile_readb(unsigned long addr) | 186 | static inline u8 readb(unsigned long addr) |
92 | { | 187 | { |
93 | return iomem_panic(); | 188 | return iomem_panic(); |
94 | } | 189 | } |
95 | 190 | ||
96 | static inline u16 _tile_readw(unsigned long addr) | 191 | static inline u16 _readw(unsigned long addr) |
97 | { | 192 | { |
98 | return iomem_panic(); | 193 | return iomem_panic(); |
99 | } | 194 | } |
100 | 195 | ||
101 | static inline u32 _tile_readl(unsigned long addr) | 196 | static inline u32 readl(unsigned long addr) |
102 | { | 197 | { |
103 | return iomem_panic(); | 198 | return iomem_panic(); |
104 | } | 199 | } |
105 | 200 | ||
106 | static inline u64 _tile_readq(unsigned long addr) | 201 | static inline u64 readq(unsigned long addr) |
107 | { | 202 | { |
108 | return iomem_panic(); | 203 | return iomem_panic(); |
109 | } | 204 | } |
110 | 205 | ||
111 | static inline void _tile_writeb(u8 val, unsigned long addr) | 206 | static inline void writeb(u8 val, unsigned long addr) |
112 | { | 207 | { |
113 | iomem_panic(); | 208 | iomem_panic(); |
114 | } | 209 | } |
115 | 210 | ||
116 | static inline void _tile_writew(u16 val, unsigned long addr) | 211 | static inline void writew(u16 val, unsigned long addr) |
117 | { | 212 | { |
118 | iomem_panic(); | 213 | iomem_panic(); |
119 | } | 214 | } |
120 | 215 | ||
121 | static inline void _tile_writel(u32 val, unsigned long addr) | 216 | static inline void writel(u32 val, unsigned long addr) |
122 | { | 217 | { |
123 | iomem_panic(); | 218 | iomem_panic(); |
124 | } | 219 | } |
125 | 220 | ||
126 | static inline void _tile_writeq(u64 val, unsigned long addr) | 221 | static inline void writeq(u64 val, unsigned long addr) |
127 | { | 222 | { |
128 | iomem_panic(); | 223 | iomem_panic(); |
129 | } | 224 | } |
130 | 225 | ||
131 | #endif | 226 | #endif /* CONFIG_PCI */ |
227 | |||
228 | #endif /* CHIP_HAS_MMIO() */ | ||
132 | 229 | ||
133 | #define readb(addr) _tile_readb((unsigned long)addr) | 230 | #define readb __raw_readb |
134 | #define readw(addr) _tile_readw((unsigned long)addr) | 231 | #define readw __raw_readw |
135 | #define readl(addr) _tile_readl((unsigned long)addr) | 232 | #define readl __raw_readl |
136 | #define readq(addr) _tile_readq((unsigned long)addr) | 233 | #define readq __raw_readq |
137 | #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | 234 | #define writeb __raw_writeb |
138 | #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | 235 | #define writew __raw_writew |
139 | #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | 236 | #define writel __raw_writel |
140 | #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | 237 | #define writeq __raw_writeq |
141 | |||
142 | #define __raw_readb readb | ||
143 | #define __raw_readw readw | ||
144 | #define __raw_readl readl | ||
145 | #define __raw_readq readq | ||
146 | #define __raw_writeb writeb | ||
147 | #define __raw_writew writew | ||
148 | #define __raw_writel writel | ||
149 | #define __raw_writeq writeq | ||
150 | 238 | ||
151 | #define readb_relaxed readb | 239 | #define readb_relaxed readb |
152 | #define readw_relaxed readw | 240 | #define readw_relaxed readw |
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h deleted file mode 100644 index 359949be28c1..000000000000 --- a/arch/tile/include/asm/memprof.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The hypervisor's memory controller profiling infrastructure allows | ||
15 | * the programmer to find out what fraction of the available memory | ||
16 | * bandwidth is being consumed at each memory controller. The | ||
17 | * profiler provides start, stop, and clear operations to allows | ||
18 | * profiling over a specific time window, as well as an interface for | ||
19 | * reading the most recent profile values. | ||
20 | * | ||
21 | * This header declares IOCTL codes necessary to control memprof. | ||
22 | */ | ||
23 | #ifndef _ASM_TILE_MEMPROF_H | ||
24 | #define _ASM_TILE_MEMPROF_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | |||
28 | #define MEMPROF_IOCTL_TYPE 0xB4 | ||
29 | #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||
30 | #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||
31 | #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||
32 | |||
33 | #endif /* _ASM_TILE_MEMPROF_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 9d9131e5c552..dd033a4fd627 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h | |||
@@ -174,7 +174,9 @@ static inline __attribute_const__ int get_order(unsigned long size) | |||
174 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ | 174 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ |
175 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ | 175 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ |
176 | #define PAGE_OFFSET MEM_HIGH_START | 176 | #define PAGE_OFFSET MEM_HIGH_START |
177 | #define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */ | 177 | #define FIXADDR_BASE _AC(0xfffffff400000000, UL) /* 4 GB */ |
178 | #define FIXADDR_TOP _AC(0xfffffff500000000, UL) /* 4 GB */ | ||
179 | #define _VMALLOC_START FIXADDR_TOP | ||
178 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ | 180 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ |
179 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ | 181 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ |
180 | #define MEM_SV_INTRPT MEM_SV_START | 182 | #define MEM_SV_INTRPT MEM_SV_START |
@@ -185,9 +187,6 @@ static inline __attribute_const__ int get_order(unsigned long size) | |||
185 | /* Highest DTLB address we will use */ | 187 | /* Highest DTLB address we will use */ |
186 | #define KERNEL_HIGH_VADDR MEM_SV_START | 188 | #define KERNEL_HIGH_VADDR MEM_SV_START |
187 | 189 | ||
188 | /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||
189 | #define FIXADDR_TOP MEM_HV_START | ||
190 | |||
191 | #else /* !__tilegx__ */ | 190 | #else /* !__tilegx__ */ |
192 | 191 | ||
193 | /* | 192 | /* |
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index 32e6cbe8dff3..302cdf71ceed 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h | |||
@@ -15,9 +15,13 @@ | |||
15 | #ifndef _ASM_TILE_PCI_H | 15 | #ifndef _ASM_TILE_PCI_H |
16 | #define _ASM_TILE_PCI_H | 16 | #define _ASM_TILE_PCI_H |
17 | 17 | ||
18 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/numa.h> | ||
19 | #include <asm-generic/pci_iomap.h> | 21 | #include <asm-generic/pci_iomap.h> |
20 | 22 | ||
23 | #ifndef __tilegx__ | ||
24 | |||
21 | /* | 25 | /* |
22 | * Structure of a PCI controller (host bridge) | 26 | * Structure of a PCI controller (host bridge) |
23 | */ | 27 | */ |
@@ -41,21 +45,151 @@ struct pci_controller { | |||
41 | }; | 45 | }; |
42 | 46 | ||
43 | /* | 47 | /* |
48 | * This flag tells if the platform is TILEmpower that needs | ||
49 | * special configuration for the PLX switch chip. | ||
50 | */ | ||
51 | extern int tile_plx_gen1; | ||
52 | |||
53 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} | ||
54 | |||
55 | #define TILE_NUM_PCIE 2 | ||
56 | |||
57 | /* | ||
44 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | 58 | * The hypervisor maps the entirety of CPA-space as bus addresses, so |
45 | * bus addresses are physical addresses. The networking and block | 59 | * bus addresses are physical addresses. The networking and block |
46 | * device layers use this boolean for bounce buffer decisions. | 60 | * device layers use this boolean for bounce buffer decisions. |
47 | */ | 61 | */ |
48 | #define PCI_DMA_BUS_IS_PHYS 1 | 62 | #define PCI_DMA_BUS_IS_PHYS 1 |
49 | 63 | ||
64 | /* generic pci stuff */ | ||
65 | #include <asm-generic/pci.h> | ||
66 | |||
67 | #else | ||
68 | |||
69 | #include <asm/page.h> | ||
70 | #include <gxio/trio.h> | ||
71 | |||
72 | /** | ||
73 | * We reserve the hugepage-size address range at the top of the 64-bit address | ||
74 | * space to serve as the PCI window, emulating the BAR0 space of an endpoint | ||
75 | * device. This window is used by the chip-to-chip applications running on | ||
76 | * the RC node. The reason for carving out this window is that Mem-Maps that | ||
77 | * back up this window will not overlap with those that map the real physical | ||
78 | * memory. | ||
79 | */ | ||
80 | #define PCIE_HOST_BAR0_SIZE HPAGE_SIZE | ||
81 | #define PCIE_HOST_BAR0_START HPAGE_MASK | ||
82 | |||
83 | /** | ||
84 | * The first PAGE_SIZE of the above "BAR" window is mapped to the | ||
85 | * gxpci_host_regs structure. | ||
86 | */ | ||
87 | #define PCIE_HOST_REGS_SIZE PAGE_SIZE | ||
88 | |||
89 | /* | ||
90 | * This is the PCI address where the Mem-Map interrupt regions start. | ||
91 | * We use the 2nd to the last huge page of the 64-bit address space. | ||
92 | * The last huge page is used for the rootcomplex "bar", for C2C purpose. | ||
93 | */ | ||
94 | #define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE) | ||
95 | |||
96 | /* | ||
97 | * Each Mem-Map interrupt region occupies 4KB. | ||
98 | */ | ||
99 | #define MEM_MAP_INTR_REGION_SIZE (1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT) | ||
100 | |||
101 | /* | ||
102 | * Allocate the PCI BAR window right below 4GB. | ||
103 | */ | ||
104 | #define TILE_PCI_BAR_WINDOW_TOP (1ULL << 32) | ||
105 | |||
106 | /* | ||
107 | * Allocate 1GB for the PCI BAR window. | ||
108 | */ | ||
109 | #define TILE_PCI_BAR_WINDOW_SIZE (1 << 30) | ||
110 | |||
111 | /* | ||
112 | * This is the highest bus address targeting the host memory that | ||
113 | * can be generated by legacy PCI devices with 32-bit or less | ||
114 | * DMA capability, dictated by the BAR window size and location. | ||
115 | */ | ||
116 | #define TILE_PCI_MAX_DIRECT_DMA_ADDRESS \ | ||
117 | (TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1) | ||
118 | |||
119 | /* | ||
120 | * We shift the PCI bus range for all the physical memory up by the whole PA | ||
121 | * range. The corresponding CPA of an incoming PCI request will be the PCI | ||
122 | * address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies | ||
123 | * that the 64-bit capable devices will be given DMA addresses as | ||
124 | * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit | ||
125 | * devices, we create a separate map region that handles the low | ||
126 | * 4GB. | ||
127 | */ | ||
128 | #define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH()) | ||
129 | |||
130 | /* | ||
131 | * Start of the PCI memory resource, which starts at the end of the | ||
132 | * maximum system physical RAM address. | ||
133 | */ | ||
134 | #define TILE_PCI_MEM_START (1ULL << CHIP_PA_WIDTH()) | ||
135 | |||
136 | /* | ||
137 | * Structure of a PCI controller (host bridge) on Gx. | ||
138 | */ | ||
139 | struct pci_controller { | ||
140 | |||
141 | /* Pointer back to the TRIO that this PCIe port is connected to. */ | ||
142 | gxio_trio_context_t *trio; | ||
143 | int mac; /* PCIe mac index on the TRIO shim */ | ||
144 | int trio_index; /* Index of TRIO shim that contains the MAC. */ | ||
145 | |||
146 | int pio_mem_index; /* PIO region index for memory access */ | ||
147 | |||
148 | /* | ||
149 | * Mem-Map regions for all the memory controllers so that Linux can | ||
150 | * map all of its physical memory space to the PCI bus. | ||
151 | */ | ||
152 | int mem_maps[MAX_NUMNODES]; | ||
153 | |||
154 | int index; /* PCI domain number */ | ||
155 | struct pci_bus *root_bus; | ||
156 | |||
157 | /* PCI memory space resource for this controller. */ | ||
158 | struct resource mem_space; | ||
159 | char mem_space_name[32]; | ||
160 | |||
161 | uint64_t mem_offset; /* cpu->bus memory mapping offset. */ | ||
162 | |||
163 | int first_busno; | ||
164 | |||
165 | struct pci_ops *ops; | ||
166 | |||
167 | /* Table that maps the INTx numbers to Linux irq numbers. */ | ||
168 | int irq_intx_table[4]; | ||
169 | |||
170 | /* Address ranges that are routed to this controller/bridge. */ | ||
171 | struct resource mem_resources[3]; | ||
172 | }; | ||
173 | |||
174 | extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; | ||
175 | extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; | ||
176 | |||
177 | extern void pci_iounmap(struct pci_dev *dev, void __iomem *); | ||
178 | |||
179 | /* | ||
180 | * The PCI address space does not equal the physical memory address | ||
181 | * space (we have an IOMMU). The IDE and SCSI device layers use this | ||
182 | * boolean for bounce buffer decisions. | ||
183 | */ | ||
184 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
185 | |||
186 | #endif /* __tilegx__ */ | ||
187 | |||
50 | int __init tile_pci_init(void); | 188 | int __init tile_pci_init(void); |
51 | int __init pcibios_init(void); | 189 | int __init pcibios_init(void); |
52 | 190 | ||
53 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} | ||
54 | |||
55 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | 191 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); |
56 | 192 | ||
57 | #define TILE_NUM_PCIE 2 | ||
58 | |||
59 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | 193 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) |
60 | 194 | ||
61 | /* | 195 | /* |
@@ -79,19 +213,10 @@ static inline int pcibios_assign_all_busses(void) | |||
79 | #define PCIBIOS_MIN_MEM 0 | 213 | #define PCIBIOS_MIN_MEM 0 |
80 | #define PCIBIOS_MIN_IO 0 | 214 | #define PCIBIOS_MIN_IO 0 |
81 | 215 | ||
82 | /* | ||
83 | * This flag tells if the platform is TILEmpower that needs | ||
84 | * special configuration for the PLX switch chip. | ||
85 | */ | ||
86 | extern int tile_plx_gen1; | ||
87 | |||
88 | /* Use any cpu for PCI. */ | 216 | /* Use any cpu for PCI. */ |
89 | #define cpumask_of_pcibus(bus) cpu_online_mask | 217 | #define cpumask_of_pcibus(bus) cpu_online_mask |
90 | 218 | ||
91 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 219 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
92 | #include <asm-generic/pci-dma-compat.h> | 220 | #include <asm-generic/pci-dma-compat.h> |
93 | 221 | ||
94 | /* generic pci stuff */ | ||
95 | #include <asm-generic/pci.h> | ||
96 | |||
97 | #endif /* _ASM_TILE_PCI_H */ | 222 | #endif /* _ASM_TILE_PCI_H */ |
diff --git a/arch/tile/include/gxio/common.h b/arch/tile/include/gxio/common.h new file mode 100644 index 000000000000..724595a24d04 --- /dev/null +++ b/arch/tile/include/gxio/common.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _GXIO_COMMON_H_ | ||
16 | #define _GXIO_COMMON_H_ | ||
17 | |||
18 | /* | ||
19 | * Routines shared between the various GXIO device components. | ||
20 | */ | ||
21 | |||
22 | #include <hv/iorpc.h> | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/compiler.h> | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | /* Define the standard gxio MMIO functions using kernel functions. */ | ||
29 | #define __gxio_mmio_read8(addr) readb(addr) | ||
30 | #define __gxio_mmio_read16(addr) readw(addr) | ||
31 | #define __gxio_mmio_read32(addr) readl(addr) | ||
32 | #define __gxio_mmio_read64(addr) readq(addr) | ||
33 | #define __gxio_mmio_write8(addr, val) writeb((val), (addr)) | ||
34 | #define __gxio_mmio_write16(addr, val) writew((val), (addr)) | ||
35 | #define __gxio_mmio_write32(addr, val) writel((val), (addr)) | ||
36 | #define __gxio_mmio_write64(addr, val) writeq((val), (addr)) | ||
37 | #define __gxio_mmio_read(addr) __gxio_mmio_read64(addr) | ||
38 | #define __gxio_mmio_write(addr, val) __gxio_mmio_write64((addr), (val)) | ||
39 | |||
40 | #endif /* !_GXIO_COMMON_H_ */ | ||
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h new file mode 100644 index 000000000000..00654feb7db0 --- /dev/null +++ b/arch/tile/include/gxio/dma_queue.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _GXIO_DMA_QUEUE_H_ | ||
16 | #define _GXIO_DMA_QUEUE_H_ | ||
17 | |||
18 | /* | ||
19 | * DMA queue management APIs shared between TRIO and mPIPE. | ||
20 | */ | ||
21 | |||
22 | #include "common.h" | ||
23 | |||
24 | /* The credit counter lives in the high 32 bits. */ | ||
25 | #define DMA_QUEUE_CREDIT_SHIFT 32 | ||
26 | |||
27 | /* | ||
28 | * State object that tracks a DMA queue's head and tail indices, as | ||
29 | * well as the number of commands posted and completed. The | ||
30 | * structure is accessed via a thread-safe, lock-free algorithm. | ||
31 | */ | ||
32 | typedef struct { | ||
33 | /* | ||
34 | * Address of a MPIPE_EDMA_POST_REGION_VAL_t, | ||
35 | * TRIO_PUSH_DMA_REGION_VAL_t, or TRIO_PULL_DMA_REGION_VAL_t | ||
36 | * register. These register have identical encodings and provide | ||
37 | * information about how many commands have been processed. | ||
38 | */ | ||
39 | void *post_region_addr; | ||
40 | |||
41 | /* | ||
42 | * A lazily-updated count of how many edescs the hardware has | ||
43 | * completed. | ||
44 | */ | ||
45 | uint64_t hw_complete_count __attribute__ ((aligned(64))); | ||
46 | |||
47 | /* | ||
48 | * High 32 bits are a count of available egress command credits, | ||
49 | * low 24 bits are the next egress "slot". | ||
50 | */ | ||
51 | int64_t credits_and_next_index; | ||
52 | |||
53 | } __gxio_dma_queue_t; | ||
54 | |||
55 | /* Initialize a dma queue. */ | ||
56 | extern void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||
57 | void *post_region_addr, | ||
58 | unsigned int num_entries); | ||
59 | |||
60 | /* | ||
61 | * Update the "credits_and_next_index" and "hw_complete_count" fields | ||
62 | * based on pending hardware completions. Note that some other thread | ||
63 | * may have already done this and, importantly, may still be in the | ||
64 | * process of updating "credits_and_next_index". | ||
65 | */ | ||
66 | extern void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue); | ||
67 | |||
68 | /* Wait for credits to become available. */ | ||
69 | extern int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||
70 | int64_t modifier); | ||
71 | |||
72 | /* Reserve slots in the queue, optionally waiting for slots to become | ||
73 | * available, and optionally returning a "completion_slot" suitable for | ||
74 | * direct comparison to "hw_complete_count". | ||
75 | */ | ||
76 | static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue, | ||
77 | unsigned int num, bool wait, | ||
78 | bool completion) | ||
79 | { | ||
80 | uint64_t slot; | ||
81 | |||
82 | /* | ||
83 | * Try to reserve 'num' egress command slots. We do this by | ||
84 | * constructing a constant that subtracts N credits and adds N to | ||
85 | * the index, and using fetchaddgez to only apply it if the credits | ||
86 | * count doesn't go negative. | ||
87 | */ | ||
88 | int64_t modifier = (((int64_t)(-num)) << DMA_QUEUE_CREDIT_SHIFT) | num; | ||
89 | int64_t old = | ||
90 | __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
91 | modifier); | ||
92 | |||
93 | if (unlikely(old + modifier < 0)) { | ||
94 | /* | ||
95 | * We're out of credits. Try once to get more by checking for | ||
96 | * completed egress commands. If that fails, wait or fail. | ||
97 | */ | ||
98 | __gxio_dma_queue_update_credits(dma_queue); | ||
99 | old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
100 | modifier); | ||
101 | if (old + modifier < 0) { | ||
102 | if (wait) | ||
103 | old = __gxio_dma_queue_wait_for_credits | ||
104 | (dma_queue, modifier); | ||
105 | else | ||
106 | return GXIO_ERR_DMA_CREDITS; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* The bottom 24 bits of old encode the "slot". */ | ||
111 | slot = (old & 0xffffff); | ||
112 | |||
113 | if (completion) { | ||
114 | /* | ||
115 | * A "completion_slot" is a "slot" which can be compared to | ||
116 | * "hw_complete_count" at any time in the future. To convert | ||
117 | * "slot" into a "completion_slot", we access "hw_complete_count" | ||
118 | * once (knowing that we have reserved a slot, and thus, it will | ||
119 | * be "basically" accurate), and combine its high 40 bits with | ||
120 | * the 24 bit "slot", and handle "wrapping" by adding "1 << 24" | ||
121 | * if the result is LESS than "hw_complete_count". | ||
122 | */ | ||
123 | uint64_t complete; | ||
124 | complete = ACCESS_ONCE(dma_queue->hw_complete_count); | ||
125 | slot |= (complete & 0xffffffffff000000); | ||
126 | if (slot < complete) | ||
127 | slot += 0x1000000; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * If any of our slots mod 256 were equivalent to 0, go ahead and | ||
132 | * collect some egress credits, and update "hw_complete_count", and | ||
133 | * make sure the index doesn't overflow into the credits. | ||
134 | */ | ||
135 | if (unlikely(((old + num) & 0xff) < num)) { | ||
136 | __gxio_dma_queue_update_credits(dma_queue); | ||
137 | |||
138 | /* Make sure the index doesn't overflow into the credits. */ | ||
139 | #ifdef __BIG_ENDIAN__ | ||
140 | *(((uint8_t *)&dma_queue->credits_and_next_index) + 4) = 0; | ||
141 | #else | ||
142 | *(((uint8_t *)&dma_queue->credits_and_next_index) + 3) = 0; | ||
143 | #endif | ||
144 | } | ||
145 | |||
146 | return slot; | ||
147 | } | ||
148 | |||
149 | /* Non-inlinable "__gxio_dma_queue_reserve(..., true)". */ | ||
150 | extern int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||
151 | unsigned int num, int wait); | ||
152 | |||
153 | /* Check whether a particular "completion slot" has completed. | ||
154 | * | ||
155 | * Note that this function requires a "completion slot", and thus | ||
156 | * cannot be used with the result of any "reserve_fast" function. | ||
157 | */ | ||
158 | extern int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||
159 | int64_t completion_slot, int update); | ||
160 | |||
161 | #endif /* !_GXIO_DMA_QUEUE_H_ */ | ||
diff --git a/arch/tile/include/gxio/iorpc_globals.h b/arch/tile/include/gxio/iorpc_globals.h new file mode 100644 index 000000000000..52c721f8dad9 --- /dev/null +++ b/arch/tile/include/gxio/iorpc_globals.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #ifndef __IORPC_LINUX_RPC_H__ | ||
17 | #define __IORPC_LINUX_RPC_H__ | ||
18 | |||
19 | #include <hv/iorpc.h> | ||
20 | |||
21 | #include <linux/string.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | |||
25 | #define IORPC_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) | ||
26 | #define IORPC_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) | ||
27 | #define IORPC_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||
28 | #define IORPC_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||
29 | |||
30 | int __iorpc_arm_pollfd(int fd, int pollfd_cookie); | ||
31 | |||
32 | int __iorpc_close_pollfd(int fd, int pollfd_cookie); | ||
33 | |||
34 | int __iorpc_get_mmio_base(int fd, HV_PTE *base); | ||
35 | |||
36 | int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size); | ||
37 | |||
38 | #endif /* !__IORPC_LINUX_RPC_H__ */ | ||
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h new file mode 100644 index 000000000000..9d50fce1b1a7 --- /dev/null +++ b/arch/tile/include/gxio/iorpc_mpipe.h | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #ifndef __GXIO_MPIPE_LINUX_RPC_H__ | ||
17 | #define __GXIO_MPIPE_LINUX_RPC_H__ | ||
18 | |||
19 | #include <hv/iorpc.h> | ||
20 | |||
21 | #include <hv/drv_mpipe_intf.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <gxio/kiorpc.h> | ||
24 | #include <gxio/mpipe.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | |||
29 | #define GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1200) | ||
30 | #define GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1201) | ||
31 | |||
32 | #define GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1203) | ||
33 | #define GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1204) | ||
34 | #define GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1205) | ||
35 | #define GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1206) | ||
36 | #define GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1207) | ||
37 | #define GXIO_MPIPE_OP_INIT_NOTIF_GROUP IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1208) | ||
38 | #define GXIO_MPIPE_OP_ALLOC_BUCKETS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1209) | ||
39 | #define GXIO_MPIPE_OP_INIT_BUCKET IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120a) | ||
40 | #define GXIO_MPIPE_OP_ALLOC_EDMA_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120b) | ||
41 | #define GXIO_MPIPE_OP_INIT_EDMA_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x120c) | ||
42 | |||
43 | #define GXIO_MPIPE_OP_COMMIT_RULES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120f) | ||
44 | #define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210) | ||
45 | #define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211) | ||
46 | #define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) | ||
47 | |||
48 | #define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) | ||
49 | #define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) | ||
50 | #define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) | ||
51 | #define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) | ||
52 | #define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) | ||
53 | #define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||
54 | #define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||
55 | |||
56 | int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, | ||
57 | unsigned int count, unsigned int first, | ||
58 | unsigned int flags); | ||
59 | |||
60 | int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, | ||
61 | void *mem_va, size_t mem_size, | ||
62 | unsigned int mem_flags, unsigned int stack, | ||
63 | unsigned int buffer_size_enum); | ||
64 | |||
65 | |||
66 | int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, | ||
67 | unsigned int count, unsigned int first, | ||
68 | unsigned int flags); | ||
69 | |||
70 | int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||
71 | size_t mem_size, unsigned int mem_flags, | ||
72 | unsigned int ring); | ||
73 | |||
74 | int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||
75 | int inter_x, int inter_y, | ||
76 | int inter_ipi, int inter_event, | ||
77 | unsigned int ring); | ||
78 | |||
79 | int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, | ||
80 | unsigned int ring); | ||
81 | |||
82 | int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, | ||
83 | unsigned int count, unsigned int first, | ||
84 | unsigned int flags); | ||
85 | |||
86 | int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, | ||
87 | unsigned int group, | ||
88 | gxio_mpipe_notif_group_bits_t bits); | ||
89 | |||
90 | int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, | ||
91 | unsigned int first, unsigned int flags); | ||
92 | |||
93 | int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, | ||
94 | MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); | ||
95 | |||
96 | int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, | ||
97 | unsigned int count, unsigned int first, | ||
98 | unsigned int flags); | ||
99 | |||
100 | int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, | ||
101 | size_t mem_size, unsigned int mem_flags, | ||
102 | unsigned int ring, unsigned int channel); | ||
103 | |||
104 | |||
105 | int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, | ||
106 | size_t blob_size); | ||
107 | |||
108 | int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, | ||
109 | unsigned int iotlb, HV_PTE pte, | ||
110 | unsigned int flags); | ||
111 | |||
112 | int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, | ||
113 | _gxio_mpipe_link_name_t name, unsigned int flags); | ||
114 | |||
115 | int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); | ||
116 | |||
117 | |||
118 | int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, | ||
119 | uint64_t * nsec, uint64_t * cycles); | ||
120 | |||
121 | int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, | ||
122 | uint64_t nsec, uint64_t cycles); | ||
123 | |||
124 | int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, | ||
125 | int64_t nsec); | ||
126 | |||
127 | int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); | ||
128 | |||
129 | int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); | ||
130 | |||
131 | int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); | ||
132 | |||
133 | int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, | ||
134 | unsigned long offset, unsigned long size); | ||
135 | |||
136 | #endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */ | ||
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h new file mode 100644 index 000000000000..0bcf3f71ce8b --- /dev/null +++ b/arch/tile/include/gxio/iorpc_mpipe_info.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #ifndef __GXIO_MPIPE_INFO_LINUX_RPC_H__ | ||
17 | #define __GXIO_MPIPE_INFO_LINUX_RPC_H__ | ||
18 | |||
19 | #include <hv/iorpc.h> | ||
20 | |||
21 | #include <hv/drv_mpipe_intf.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <gxio/kiorpc.h> | ||
24 | #include <gxio/mpipe.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | |||
29 | |||
30 | #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) | ||
31 | #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||
32 | #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||
33 | |||
34 | |||
35 | int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, | ||
36 | unsigned int idx, | ||
37 | _gxio_mpipe_link_name_t * name, | ||
38 | _gxio_mpipe_link_mac_t * mac); | ||
39 | |||
40 | int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, | ||
41 | HV_PTE *base); | ||
42 | |||
43 | int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, | ||
44 | unsigned long offset, unsigned long size); | ||
45 | |||
46 | #endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */ | ||
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h new file mode 100644 index 000000000000..15fb77992083 --- /dev/null +++ b/arch/tile/include/gxio/iorpc_trio.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #ifndef __GXIO_TRIO_LINUX_RPC_H__ | ||
17 | #define __GXIO_TRIO_LINUX_RPC_H__ | ||
18 | |||
19 | #include <hv/iorpc.h> | ||
20 | |||
21 | #include <hv/drv_trio_intf.h> | ||
22 | #include <gxio/trio.h> | ||
23 | #include <gxio/kiorpc.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | |||
28 | #define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) | ||
29 | |||
30 | #define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) | ||
31 | |||
32 | #define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) | ||
33 | #define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f) | ||
34 | |||
35 | #define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) | ||
36 | #define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418) | ||
37 | #define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419) | ||
38 | #define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a) | ||
39 | |||
40 | #define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) | ||
41 | #define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) | ||
42 | #define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) | ||
43 | #define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||
44 | #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||
45 | |||
46 | int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, | ||
47 | unsigned int first, unsigned int flags); | ||
48 | |||
49 | |||
50 | int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, | ||
51 | unsigned int count, unsigned int first, | ||
52 | unsigned int flags); | ||
53 | |||
54 | |||
55 | int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, | ||
56 | unsigned int count, unsigned int first, | ||
57 | unsigned int flags); | ||
58 | |||
59 | int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, | ||
60 | unsigned int pio_region, unsigned int mac, | ||
61 | uint32_t bus_address_hi, unsigned int flags); | ||
62 | |||
63 | |||
64 | int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, | ||
65 | unsigned int map, unsigned long va, | ||
66 | uint64_t size, unsigned int asid, | ||
67 | unsigned int mac, uint64_t bus_address, | ||
68 | unsigned int node, | ||
69 | unsigned int order_mode); | ||
70 | |||
71 | int gxio_trio_get_port_property(gxio_trio_context_t * context, | ||
72 | struct pcie_trio_ports_property *trio_ports); | ||
73 | |||
74 | int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, | ||
75 | int inter_y, int inter_ipi, int inter_event, | ||
76 | unsigned int mac, unsigned int intx); | ||
77 | |||
78 | int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, | ||
79 | int inter_y, int inter_ipi, int inter_event, | ||
80 | unsigned int mac, unsigned int mem_map, | ||
81 | uint64_t mem_map_base, uint64_t mem_map_limit, | ||
82 | unsigned int asid); | ||
83 | |||
84 | |||
85 | int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, | ||
86 | uint16_t mrs, unsigned int mac); | ||
87 | |||
88 | int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); | ||
89 | |||
90 | int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); | ||
91 | |||
92 | int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); | ||
93 | |||
94 | int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, | ||
95 | unsigned long offset, unsigned long size); | ||
96 | |||
97 | #endif /* !__GXIO_TRIO_LINUX_RPC_H__ */ | ||
diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h new file mode 100644 index 000000000000..8622e7d126ad --- /dev/null +++ b/arch/tile/include/gxio/iorpc_usb_host.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | #ifndef __GXIO_USB_HOST_LINUX_RPC_H__ | ||
17 | #define __GXIO_USB_HOST_LINUX_RPC_H__ | ||
18 | |||
19 | #include <hv/iorpc.h> | ||
20 | |||
21 | #include <hv/drv_usb_host_intf.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <gxio/kiorpc.h> | ||
24 | #include <gxio/usb_host.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | |||
29 | #define GXIO_USB_HOST_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1800) | ||
30 | #define GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1801) | ||
31 | #define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) | ||
32 | #define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) | ||
33 | |||
34 | int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, | ||
35 | int inter_y, int inter_ipi, int inter_event); | ||
36 | |||
37 | int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, | ||
38 | HV_PTE pte, unsigned int flags); | ||
39 | |||
40 | int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, | ||
41 | HV_PTE *base); | ||
42 | |||
43 | int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, | ||
44 | unsigned long offset, unsigned long size); | ||
45 | |||
46 | #endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */ | ||
diff --git a/arch/tile/include/gxio/kiorpc.h b/arch/tile/include/gxio/kiorpc.h new file mode 100644 index 000000000000..ee5820979ff3 --- /dev/null +++ b/arch/tile/include/gxio/kiorpc.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support routines for kernel IORPC drivers. | ||
15 | */ | ||
16 | |||
17 | #ifndef _GXIO_KIORPC_H | ||
18 | #define _GXIO_KIORPC_H | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #if CHIP_HAS_MMIO() | ||
25 | void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset, | ||
26 | unsigned long size); | ||
27 | #endif | ||
28 | |||
29 | #endif /* _GXIO_KIORPC_H */ | ||
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h new file mode 100644 index 000000000000..78c598618c97 --- /dev/null +++ b/arch/tile/include/gxio/mpipe.h | |||
@@ -0,0 +1,1736 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _GXIO_MPIPE_H_ | ||
16 | #define _GXIO_MPIPE_H_ | ||
17 | |||
18 | /* | ||
19 | * | ||
20 | * An API for allocating, configuring, and manipulating mPIPE hardware | ||
21 | * resources. | ||
22 | */ | ||
23 | |||
24 | #include "common.h" | ||
25 | #include "dma_queue.h" | ||
26 | |||
27 | #include <linux/time.h> | ||
28 | |||
29 | #include <arch/mpipe_def.h> | ||
30 | #include <arch/mpipe_shm.h> | ||
31 | |||
32 | #include <hv/drv_mpipe_intf.h> | ||
33 | #include <hv/iorpc.h> | ||
34 | |||
35 | /* | ||
36 | * | ||
37 | * The TILE-Gx mPIPE&tm; shim provides Ethernet connectivity, packet | ||
38 | * classification, and packet load balancing services. The | ||
39 | * gxio_mpipe_ API, declared in <gxio/mpipe.h>, allows applications to | ||
40 | * allocate mPIPE IO channels, configure packet distribution | ||
41 | * parameters, and send and receive Ethernet packets. The API is | ||
42 | * designed to be a minimal wrapper around the mPIPE hardware, making | ||
43 | * system calls only where necessary to preserve inter-process | ||
44 | * protection guarantees. | ||
45 | * | ||
46 | * The APIs described below allow the programmer to allocate and | ||
47 | * configure mPIPE resources. As described below, the mPIPE is a | ||
48 | * single shared hardware device that provides partitionable resources | ||
49 | * that are shared between all applications in the system. The | ||
50 | * gxio_mpipe_ API allows userspace code to make resource request | ||
51 | * calls to the hypervisor, which in turns keeps track of the | ||
52 | * resources in use by all applications, maintains protection | ||
53 | * guarantees, and resets resources upon application shutdown. | ||
54 | * | ||
55 | * We strongly recommend reading the mPIPE section of the IO Device | ||
56 | * Guide (UG404) before working with this API. Most functions in the | ||
57 | * gxio_mpipe_ API are directly analogous to hardware interfaces and | ||
58 | * the documentation assumes that the reader understands those | ||
59 | * hardware interfaces. | ||
60 | * | ||
61 | * @section mpipe__ingress mPIPE Ingress Hardware Resources | ||
62 | * | ||
63 | * The mPIPE ingress hardware provides extensive hardware offload for | ||
64 | * tasks like packet header parsing, load balancing, and memory | ||
65 | * management. This section provides a brief introduction to the | ||
66 | * hardware components and the gxio_mpipe_ calls used to manage them; | ||
67 | * see the IO Device Guide for a much more detailed description of the | ||
68 | * mPIPE's capabilities. | ||
69 | * | ||
70 | * When a packet arrives at one of the mPIPE's Ethernet MACs, it is | ||
71 | * assigned a channel number indicating which MAC received it. It | ||
72 | * then proceeds through the following hardware pipeline: | ||
73 | * | ||
74 | * @subsection mpipe__classification Classification | ||
75 | * | ||
76 | * A set of classification processors run header parsing code on each | ||
77 | * incoming packet, extracting information including the destination | ||
78 | * MAC address, VLAN, Ethernet type, and five-tuple hash. Some of | ||
79 | * this information is then used to choose which buffer stack will be | ||
80 | * used to hold the packet, and which bucket will be used by the load | ||
81 | * balancer to determine which application will receive the packet. | ||
82 | * | ||
83 | * The rules by which the buffer stack and bucket are chosen can be | ||
84 | * configured via the @ref gxio_mpipe_classifier API. A given app can | ||
85 | * specify multiple rules, each one specifying a bucket range, and a | ||
86 | * set of buffer stacks, to be used for packets matching the rule. | ||
87 | * Each rule can optionally specify a restricted set of channels, | ||
88 | * VLANs, and/or dMACs, in which it is interested. By default, a | ||
89 | * given rule starts out matching all channels associated with the | ||
90 | * mPIPE context's set of open links; all VLANs; and all dMACs. | ||
91 | * Subsequent restrictions can then be added. | ||
92 | * | ||
93 | * @subsection mpipe__load_balancing Load Balancing | ||
94 | * | ||
95 | * The mPIPE load balancer is responsible for choosing the NotifRing | ||
96 | * to which the packet will be delivered. This decision is based on | ||
97 | * the bucket number indicated by the classification program. In | ||
98 | * general, the bucket number is based on some number of low bits of | ||
99 | * the packet's flow hash (applications that aren't interested in flow | ||
100 | * hashing use a single bucket). Each load balancer bucket keeps a | ||
101 | * record of the NotifRing to which packets directed to that bucket | ||
102 | * are currently being delivered. Based on the bucket's load | ||
103 | * balancing mode (@ref gxio_mpipe_bucket_mode_t), the load balancer | ||
104 | * either forwards the packet to the previously assigned NotifRing or | ||
105 | * decides to choose a new NotifRing. If a new NotifRing is required, | ||
106 | * the load balancer chooses the least loaded ring in the NotifGroup | ||
107 | * associated with the bucket. | ||
108 | * | ||
109 | * The load balancer is a shared resource. Each application needs to | ||
110 | * explicitly allocate NotifRings, NotifGroups, and buckets, using | ||
111 | * gxio_mpipe_alloc_notif_rings(), gxio_mpipe_alloc_notif_groups(), | ||
112 | * and gxio_mpipe_alloc_buckets(). Then the application needs to | ||
113 | * configure them using gxio_mpipe_init_notif_ring() and | ||
114 | * gxio_mpipe_init_notif_group_and_buckets(). | ||
115 | * | ||
116 | * @subsection mpipe__buffers Buffer Selection and Packet Delivery | ||
117 | * | ||
118 | * Once the load balancer has chosen the destination NotifRing, the | ||
119 | * mPIPE DMA engine pops at least one buffer off of the 'buffer stack' | ||
120 | * chosen by the classification program and DMAs the packet data into | ||
121 | * that buffer. Each buffer stack provides a hardware-accelerated | ||
122 | * stack of data buffers with the same size. If the packet data is | ||
123 | * larger than the buffers provided by the chosen buffer stack, the | ||
124 | * mPIPE hardware pops off multiple buffers and chains the packet data | ||
125 | * through a multi-buffer linked list. Once the packet data is | ||
126 | * delivered to the buffer(s), the mPIPE hardware writes the | ||
127 | * ::gxio_mpipe_idesc_t metadata object (calculated by the classifier) | ||
128 | * into the NotifRing and increments the number of packets delivered | ||
129 | * to that ring. | ||
130 | * | ||
131 | * Applications can push buffers onto a buffer stack by calling | ||
132 | * gxio_mpipe_push_buffer() or by egressing a packet with the | ||
133 | * ::gxio_mpipe_edesc_t::hwb bit set, indicating that the egressed | ||
134 | * buffers should be returned to the stack. | ||
135 | * | ||
136 | * Applications can allocate and initialize buffer stacks with the | ||
137 | * gxio_mpipe_alloc_buffer_stacks() and gxio_mpipe_init_buffer_stack() | ||
138 | * APIs. | ||
139 | * | ||
140 | * The application must also register the memory pages that will hold | ||
141 | * packets. This requires calling gxio_mpipe_register_page() for each | ||
142 | * memory page that will hold packets allocated by the application for | ||
143 | * a given buffer stack. Since each buffer stack is limited to 16 | ||
144 | * registered pages, it may be necessary to use huge pages, or even | ||
145 | * extremely huge pages, to hold all the buffers. | ||
146 | * | ||
147 | * @subsection mpipe__iqueue NotifRings | ||
148 | * | ||
149 | * Each NotifRing is a region of shared memory, allocated by the | ||
150 | * application, to which the mPIPE delivers packet descriptors | ||
151 | * (::gxio_mpipe_idesc_t). The application can allocate them via | ||
152 | * gxio_mpipe_alloc_notif_rings(). The application can then either | ||
153 | * explicitly initialize them with gxio_mpipe_init_notif_ring() and | ||
154 | * then read from them manually, or can make use of the convenience | ||
155 | * wrappers provided by @ref gxio_mpipe_wrappers. | ||
156 | * | ||
157 | * @section mpipe__egress mPIPE Egress Hardware | ||
158 | * | ||
159 | * Applications use eDMA rings to queue packets for egress. The | ||
160 | * application can allocate them via gxio_mpipe_alloc_edma_rings(). | ||
161 | * The application can then either explicitly initialize them with | ||
162 | * gxio_mpipe_init_edma_ring() and then write to them manually, or | ||
163 | * can make use of the convenience wrappers provided by | ||
164 | * @ref gxio_mpipe_wrappers. | ||
165 | * | ||
166 | * @section gxio__shortcomings Plans for Future API Revisions | ||
167 | * | ||
168 | * The API defined here is only an initial version of the mPIPE API. | ||
169 | * Future plans include: | ||
170 | * | ||
171 | * - Higher level wrapper functions to provide common initialization | ||
172 | * patterns. This should help users start writing mPIPE programs | ||
173 | * without having to learn the details of the hardware. | ||
174 | * | ||
175 | * - Support for reset and deallocation of resources, including | ||
176 | * cleanup upon application shutdown. | ||
177 | * | ||
178 | * - Support for calling these APIs in the BME. | ||
179 | * | ||
180 | * - Support for IO interrupts. | ||
181 | * | ||
182 | * - Clearer definitions of thread safety guarantees. | ||
183 | * | ||
184 | * @section gxio__mpipe_examples Examples | ||
185 | * | ||
186 | * See the following mPIPE example programs for more information about | ||
187 | * allocating mPIPE resources and using them in real applications: | ||
188 | * | ||
189 | * - @ref mpipe/ingress/app.c : Receiving packets. | ||
190 | * | ||
191 | * - @ref mpipe/forward/app.c : Forwarding packets. | ||
192 | * | ||
193 | * Note that there are several more examples. | ||
194 | */ | ||
195 | |||
196 | /* Flags that can be passed to resource allocation functions. */ | ||
197 | enum gxio_mpipe_alloc_flags_e { | ||
198 | /* Require an allocation to start at a specified resource index. */ | ||
199 | GXIO_MPIPE_ALLOC_FIXED = HV_MPIPE_ALLOC_FIXED, | ||
200 | }; | ||
201 | |||
202 | /* Flags that can be passed to memory registration functions. */ | ||
203 | enum gxio_mpipe_mem_flags_e { | ||
204 | /* Do not fill L3 when writing, and invalidate lines upon egress. */ | ||
205 | GXIO_MPIPE_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT, | ||
206 | |||
207 | /* L3 cache fills should only populate IO cache ways. */ | ||
208 | GXIO_MPIPE_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN, | ||
209 | }; | ||
210 | |||
211 | /* An ingress packet descriptor. When a packet arrives, the mPIPE | ||
212 | * hardware generates this structure and writes it into a NotifRing. | ||
213 | */ | ||
214 | typedef MPIPE_PDESC_t gxio_mpipe_idesc_t; | ||
215 | |||
216 | /* An egress command descriptor. Applications write this structure | ||
217 | * into eDMA rings and the hardware performs the indicated operation | ||
218 | * (normally involving egressing some bytes). Note that egressing a | ||
219 | * single packet may involve multiple egress command descriptors. | ||
220 | */ | ||
221 | typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; | ||
222 | |||
223 | /* Get the "va" field from an "idesc". | ||
224 | * | ||
225 | * This is the address at which the ingress hardware copied the first | ||
226 | * byte of the packet. | ||
227 | * | ||
228 | * If the classifier detected a custom header, then this will point to | ||
229 | * the custom header, and gxio_mpipe_idesc_get_l2_start() will point | ||
230 | * to the actual L2 header. | ||
231 | * | ||
232 | * Note that this value may be misleading if "idesc->be" is set. | ||
233 | * | ||
234 | * @param idesc An ingress packet descriptor. | ||
235 | */ | ||
236 | static inline unsigned char *gxio_mpipe_idesc_get_va(gxio_mpipe_idesc_t *idesc) | ||
237 | { | ||
238 | return (unsigned char *)(long)idesc->va; | ||
239 | } | ||
240 | |||
241 | /* Get the "xfer_size" from an "idesc". | ||
242 | * | ||
243 | * This is the actual number of packet bytes transferred into memory | ||
244 | * by the hardware. | ||
245 | * | ||
246 | * Note that this value may be misleading if "idesc->be" is set. | ||
247 | * | ||
248 | * @param idesc An ingress packet descriptor. | ||
249 | * | ||
250 | * ISSUE: Is this the best name for this? | ||
251 | * FIXME: Add more docs about chaining, clipping, etc. | ||
252 | */ | ||
253 | static inline unsigned int gxio_mpipe_idesc_get_xfer_size(gxio_mpipe_idesc_t | ||
254 | *idesc) | ||
255 | { | ||
256 | return idesc->l2_size; | ||
257 | } | ||
258 | |||
259 | /* Get the "l2_offset" from an "idesc". | ||
260 | * | ||
261 | * Extremely customized classifiers might not support this function. | ||
262 | * | ||
263 | * This is the number of bytes between the "va" and the L2 header. | ||
264 | * | ||
265 | * The L2 header consists of a destination mac address, a source mac | ||
266 | * address, and an initial ethertype. Various initial ethertypes | ||
267 | * allow encoding extra information in the L2 header, often including | ||
268 | * a vlan, and/or a new ethertype. | ||
269 | * | ||
270 | * Note that the "l2_offset" will be non-zero if (and only if) the | ||
271 | * classifier processed a custom header for the packet. | ||
272 | * | ||
273 | * @param idesc An ingress packet descriptor. | ||
274 | */ | ||
275 | static inline uint8_t gxio_mpipe_idesc_get_l2_offset(gxio_mpipe_idesc_t *idesc) | ||
276 | { | ||
277 | return (idesc->custom1 >> 32) & 0xFF; | ||
278 | } | ||
279 | |||
280 | /* Get the "l2_start" from an "idesc". | ||
281 | * | ||
282 | * This is simply gxio_mpipe_idesc_get_va() plus | ||
283 | * gxio_mpipe_idesc_get_l2_offset(). | ||
284 | * | ||
285 | * @param idesc An ingress packet descriptor. | ||
286 | */ | ||
287 | static inline unsigned char *gxio_mpipe_idesc_get_l2_start(gxio_mpipe_idesc_t | ||
288 | *idesc) | ||
289 | { | ||
290 | unsigned char *va = gxio_mpipe_idesc_get_va(idesc); | ||
291 | return va + gxio_mpipe_idesc_get_l2_offset(idesc); | ||
292 | } | ||
293 | |||
294 | /* Get the "l2_length" from an "idesc". | ||
295 | * | ||
296 | * This is simply gxio_mpipe_idesc_get_xfer_size() minus | ||
297 | * gxio_mpipe_idesc_get_l2_offset(). | ||
298 | * | ||
299 | * @param idesc An ingress packet descriptor. | ||
300 | */ | ||
301 | static inline unsigned int gxio_mpipe_idesc_get_l2_length(gxio_mpipe_idesc_t | ||
302 | *idesc) | ||
303 | { | ||
304 | unsigned int xfer_size = idesc->l2_size; | ||
305 | return xfer_size - gxio_mpipe_idesc_get_l2_offset(idesc); | ||
306 | } | ||
307 | |||
308 | /* A context object used to manage mPIPE hardware resources. */ | ||
309 | typedef struct { | ||
310 | |||
311 | /* File descriptor for calling up to Linux (and thus the HV). */ | ||
312 | int fd; | ||
313 | |||
314 | /* The VA at which configuration registers are mapped. */ | ||
315 | char *mmio_cfg_base; | ||
316 | |||
317 | /* The VA at which IDMA, EDMA, and buffer manager are mapped. */ | ||
318 | char *mmio_fast_base; | ||
319 | |||
320 | /* The "initialized" buffer stacks. */ | ||
321 | gxio_mpipe_rules_stacks_t __stacks; | ||
322 | |||
323 | } gxio_mpipe_context_t; | ||
324 | |||
325 | /* This is only used internally, but it's most easily made visible here. */ | ||
326 | typedef gxio_mpipe_context_t gxio_mpipe_info_context_t; | ||
327 | |||
328 | /* Initialize an mPIPE context. | ||
329 | * | ||
330 | * This function allocates an mPIPE "service domain" and maps the MMIO | ||
331 | * registers into the caller's VA space. | ||
332 | * | ||
333 | * @param context Context object to be initialized. | ||
334 | * @param mpipe_instance Instance number of mPIPE shim to be controlled via | ||
335 | * context. | ||
336 | */ | ||
337 | extern int gxio_mpipe_init(gxio_mpipe_context_t *context, | ||
338 | unsigned int mpipe_instance); | ||
339 | |||
340 | /* Destroy an mPIPE context. | ||
341 | * | ||
342 | * This function frees the mPIPE "service domain" and unmaps the MMIO | ||
343 | * registers from the caller's VA space. | ||
344 | * | ||
345 | * If a user process exits without calling this routine, the kernel | ||
346 | * will destroy the mPIPE context as part of process teardown. | ||
347 | * | ||
348 | * @param context Context object to be destroyed. | ||
349 | */ | ||
350 | extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context); | ||
351 | |||
352 | /***************************************************************** | ||
353 | * Buffer Stacks * | ||
354 | ******************************************************************/ | ||
355 | |||
356 | /* Allocate a set of buffer stacks. | ||
357 | * | ||
358 | * The return value is NOT interesting if count is zero. | ||
359 | * | ||
360 | * @param context An initialized mPIPE context. | ||
361 | * @param count Number of stacks required. | ||
362 | * @param first Index of first stack if ::GXIO_MPIPE_ALLOC_FIXED flag is set, | ||
363 | * otherwise ignored. | ||
364 | * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e. | ||
365 | * @return Index of first allocated buffer stack, or | ||
366 | * ::GXIO_MPIPE_ERR_NO_BUFFER_STACK if allocation failed. | ||
367 | */ | ||
368 | extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, | ||
369 | unsigned int count, | ||
370 | unsigned int first, | ||
371 | unsigned int flags); | ||
372 | |||
373 | /* Enum codes for buffer sizes supported by mPIPE. */ | ||
374 | typedef enum { | ||
375 | /* 128 byte packet data buffer. */ | ||
376 | GXIO_MPIPE_BUFFER_SIZE_128 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128, | ||
377 | /* 256 byte packet data buffer. */ | ||
378 | GXIO_MPIPE_BUFFER_SIZE_256 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256, | ||
379 | /* 512 byte packet data buffer. */ | ||
380 | GXIO_MPIPE_BUFFER_SIZE_512 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512, | ||
381 | /* 1024 byte packet data buffer. */ | ||
382 | GXIO_MPIPE_BUFFER_SIZE_1024 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024, | ||
383 | /* 1664 byte packet data buffer. */ | ||
384 | GXIO_MPIPE_BUFFER_SIZE_1664 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664, | ||
385 | /* 4096 byte packet data buffer. */ | ||
386 | GXIO_MPIPE_BUFFER_SIZE_4096 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096, | ||
387 | /* 10368 byte packet data buffer. */ | ||
388 | GXIO_MPIPE_BUFFER_SIZE_10368 = | ||
389 | MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368, | ||
390 | /* 16384 byte packet data buffer. */ | ||
391 | GXIO_MPIPE_BUFFER_SIZE_16384 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 | ||
392 | } gxio_mpipe_buffer_size_enum_t; | ||
393 | |||
394 | /* Convert a buffer size in bytes into a buffer size enum. */ | ||
395 | extern gxio_mpipe_buffer_size_enum_t | ||
396 | gxio_mpipe_buffer_size_to_buffer_size_enum(size_t size); | ||
397 | |||
398 | /* Convert a buffer size enum into a buffer size in bytes. */ | ||
399 | extern size_t | ||
400 | gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t | ||
401 | buffer_size_enum); | ||
402 | |||
403 | /* Calculate the number of bytes required to store a given number of | ||
404 | * buffers in the memory registered with a buffer stack via | ||
405 | * gxio_mpipe_init_buffer_stack(). | ||
406 | */ | ||
407 | extern size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers); | ||
408 | |||
409 | /* Initialize a buffer stack. This function binds a region of memory | ||
410 | * to be used by the hardware for storing buffer addresses pushed via | ||
411 | * gxio_mpipe_push_buffer() or as the result of sending a buffer out | ||
412 | * the egress with the 'push to stack when done' bit set. Once this | ||
413 | * function returns, the memory region's contents may be arbitrarily | ||
414 | * modified by the hardware at any time and software should not access | ||
415 | * the memory region again. | ||
416 | * | ||
417 | * @param context An initialized mPIPE context. | ||
418 | * @param stack The buffer stack index. | ||
419 | * @param buffer_size_enum The size of each buffer in the buffer stack, | ||
420 | * as an enum. | ||
421 | * @param mem The address of the buffer stack. This memory must be | ||
422 | * physically contiguous and aligned to a 64kB boundary. | ||
423 | * @param mem_size The size of the buffer stack, in bytes. | ||
424 | * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags. | ||
425 | * @return Zero on success, ::GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE if | ||
426 | * buffer_size_enum is invalid, ::GXIO_MPIPE_ERR_BAD_BUFFER_STACK if | ||
427 | * stack has not been allocated. | ||
428 | */ | ||
429 | extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, | ||
430 | unsigned int stack, | ||
431 | gxio_mpipe_buffer_size_enum_t | ||
432 | buffer_size_enum, void *mem, | ||
433 | size_t mem_size, | ||
434 | unsigned int mem_flags); | ||
435 | |||
436 | /* Push a buffer onto a previously initialized buffer stack. | ||
437 | * | ||
438 | * The size of the buffer being pushed must match the size that was | ||
439 | * registered with gxio_mpipe_init_buffer_stack(). All packet buffer | ||
440 | * addresses are 128-byte aligned; the low 7 bits of the specified | ||
441 | * buffer address will be ignored. | ||
442 | * | ||
443 | * @param context An initialized mPIPE context. | ||
444 | * @param stack The buffer stack index. | ||
445 | * @param buffer The buffer (the low seven bits are ignored). | ||
446 | */ | ||
447 | static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context, | ||
448 | unsigned int stack, void *buffer) | ||
449 | { | ||
450 | MPIPE_BSM_REGION_ADDR_t offset = { {0} }; | ||
451 | MPIPE_BSM_REGION_VAL_t val = { {0} }; | ||
452 | |||
453 | /* | ||
454 | * The mmio_fast_base region starts at the IDMA region, so subtract | ||
455 | * off that initial offset. | ||
456 | */ | ||
457 | offset.region = | ||
458 | MPIPE_MMIO_ADDR__REGION_VAL_BSM - | ||
459 | MPIPE_MMIO_ADDR__REGION_VAL_IDMA; | ||
460 | offset.stack = stack; | ||
461 | |||
462 | #if __SIZEOF_POINTER__ == 4 | ||
463 | val.va = ((ulong) buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT; | ||
464 | #else | ||
465 | val.va = ((long)buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT; | ||
466 | #endif | ||
467 | |||
468 | __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word); | ||
469 | } | ||
470 | |||
471 | /* Pop a buffer off of a previously initialized buffer stack. | ||
472 | * | ||
473 | * @param context An initialized mPIPE context. | ||
474 | * @param stack The buffer stack index. | ||
475 | * @return The buffer, or NULL if the stack is empty. | ||
476 | */ | ||
477 | static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context, | ||
478 | unsigned int stack) | ||
479 | { | ||
480 | MPIPE_BSM_REGION_ADDR_t offset = { {0} }; | ||
481 | |||
482 | /* | ||
483 | * The mmio_fast_base region starts at the IDMA region, so subtract | ||
484 | * off that initial offset. | ||
485 | */ | ||
486 | offset.region = | ||
487 | MPIPE_MMIO_ADDR__REGION_VAL_BSM - | ||
488 | MPIPE_MMIO_ADDR__REGION_VAL_IDMA; | ||
489 | offset.stack = stack; | ||
490 | |||
491 | while (1) { | ||
492 | /* | ||
493 | * Case 1: val.c == ..._UNCHAINED, va is non-zero. | ||
494 | * Case 2: val.c == ..._INVALID, va is zero. | ||
495 | * Case 3: val.c == ..._NOT_RDY, va is zero. | ||
496 | */ | ||
497 | MPIPE_BSM_REGION_VAL_t val; | ||
498 | val.word = | ||
499 | __gxio_mmio_read(context->mmio_fast_base + | ||
500 | offset.word); | ||
501 | |||
502 | /* | ||
503 | * Handle case 1 and 2 by returning the buffer (or NULL). | ||
504 | * Handle case 3 by waiting for the prefetch buffer to refill. | ||
505 | */ | ||
506 | if (val.c != MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY) | ||
507 | return (void *)((unsigned long)val. | ||
508 | va << MPIPE_BSM_REGION_VAL__VA_SHIFT); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | /***************************************************************** | ||
513 | * NotifRings * | ||
514 | ******************************************************************/ | ||
515 | |||
516 | /* Allocate a set of NotifRings. | ||
517 | * | ||
518 | * The return value is NOT interesting if count is zero. | ||
519 | * | ||
520 | * Note that NotifRings are allocated in chunks, so allocating one at | ||
521 | * a time is much less efficient than allocating several at once. | ||
522 | * | ||
523 | * @param context An initialized mPIPE context. | ||
524 | * @param count Number of NotifRings required. | ||
525 | * @param first Index of first NotifRing if ::GXIO_MPIPE_ALLOC_FIXED flag | ||
526 | * is set, otherwise ignored. | ||
527 | * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e. | ||
528 | * @return Index of first allocated buffer NotifRing, or | ||
529 | * ::GXIO_MPIPE_ERR_NO_NOTIF_RING if allocation failed. | ||
530 | */ | ||
531 | extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, | ||
532 | unsigned int count, unsigned int first, | ||
533 | unsigned int flags); | ||
534 | |||
535 | /* Initialize a NotifRing, using the given memory and size. | ||
536 | * | ||
537 | * @param context An initialized mPIPE context. | ||
538 | * @param ring The NotifRing index. | ||
539 | * @param mem A physically contiguous region of memory to be filled | ||
540 | * with a ring of ::gxio_mpipe_idesc_t structures. | ||
541 | * @param mem_size Number of bytes in the ring. Must be 128, 512, | ||
542 | * 2048, or 65536 * sizeof(gxio_mpipe_idesc_t). | ||
543 | * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags. | ||
544 | * | ||
545 | * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_NOTIF_RING or | ||
546 | * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. | ||
547 | */ | ||
548 | extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, | ||
549 | unsigned int ring, | ||
550 | void *mem, size_t mem_size, | ||
551 | unsigned int mem_flags); | ||
552 | |||
553 | /* Configure an interrupt to be sent to a tile on incoming NotifRing | ||
554 | * traffic. Once an interrupt is sent for a particular ring, no more | ||
555 | * will be sent until gxio_mica_enable_notif_ring_interrupt() is called. | ||
556 | * | ||
557 | * @param context An initialized mPIPE context. | ||
558 | * @param x X coordinate of interrupt target tile. | ||
559 | * @param y Y coordinate of interrupt target tile. | ||
560 | * @param i Index of the IPI register which will receive the interrupt. | ||
561 | * @param e Specific event which will be set in the target IPI register when | ||
562 | * the interrupt occurs. | ||
563 | * @param ring The NotifRing index. | ||
564 | * @return Zero on success, GXIO_ERR_INVAL if params are out of range. | ||
565 | */ | ||
566 | extern int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t | ||
567 | *context, int x, int y, | ||
568 | int i, int e, | ||
569 | unsigned int ring); | ||
570 | |||
571 | /* Enable an interrupt on incoming NotifRing traffic. | ||
572 | * | ||
573 | * @param context An initialized mPIPE context. | ||
574 | * @param ring The NotifRing index. | ||
575 | * @return Zero on success, GXIO_ERR_INVAL if params are out of range. | ||
576 | */ | ||
577 | extern int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t | ||
578 | *context, unsigned int ring); | ||
579 | |||
580 | /* Map all of a client's memory via the given IOTLB. | ||
581 | * @param context An initialized mPIPE context. | ||
582 | * @param iotlb IOTLB index. | ||
583 | * @param pte Page table entry. | ||
584 | * @param flags Flags. | ||
585 | * @return Zero on success, or a negative error code. | ||
586 | */ | ||
587 | extern int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, | ||
588 | unsigned int iotlb, HV_PTE pte, | ||
589 | unsigned int flags); | ||
590 | |||
591 | /***************************************************************** | ||
592 | * Notif Groups * | ||
593 | ******************************************************************/ | ||
594 | |||
595 | /* Allocate a set of NotifGroups. | ||
596 | * | ||
597 | * The return value is NOT interesting if count is zero. | ||
598 | * | ||
599 | * @param context An initialized mPIPE context. | ||
600 | * @param count Number of NotifGroups required. | ||
601 | * @param first Index of first NotifGroup if ::GXIO_MPIPE_ALLOC_FIXED flag | ||
602 | * is set, otherwise ignored. | ||
603 | * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e. | ||
604 | * @return Index of first allocated buffer NotifGroup, or | ||
605 | * ::GXIO_MPIPE_ERR_NO_NOTIF_GROUP if allocation failed. | ||
606 | */ | ||
607 | extern int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, | ||
608 | unsigned int count, | ||
609 | unsigned int first, | ||
610 | unsigned int flags); | ||
611 | |||
612 | /* Add a NotifRing to a NotifGroup. This only sets a bit in the | ||
613 | * application's 'group' object; the hardware NotifGroup can be | ||
614 | * initialized by passing 'group' to gxio_mpipe_init_notif_group() or | ||
615 | * gxio_mpipe_init_notif_group_and_buckets(). | ||
616 | */ | ||
617 | static inline void | ||
618 | gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring) | ||
619 | { | ||
620 | bits->ring_mask[ring / 64] |= (1ull << (ring % 64)); | ||
621 | } | ||
622 | |||
623 | /* Set a particular NotifGroup bitmask. Since the load balancer | ||
624 | * makes decisions based on both bucket and NotifGroup state, most | ||
625 | * applications should use gxio_mpipe_init_notif_group_and_buckets() | ||
626 | * rather than using this function to configure just a NotifGroup. | ||
627 | */ | ||
628 | extern int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, | ||
629 | unsigned int group, | ||
630 | gxio_mpipe_notif_group_bits_t bits); | ||
631 | |||
632 | /***************************************************************** | ||
633 | * Load Balancer * | ||
634 | ******************************************************************/ | ||
635 | |||
636 | /* Allocate a set of load balancer buckets. | ||
637 | * | ||
638 | * The return value is NOT interesting if count is zero. | ||
639 | * | ||
640 | * Note that buckets are allocated in chunks, so allocating one at | ||
641 | * a time is much less efficient than allocating several at once. | ||
642 | * | ||
643 | * Note that the buckets are actually divided into two sub-ranges, of | ||
644 | * different sizes, and different chunk sizes, and the range you get | ||
645 | * by default is determined by the size of the request. Allocations | ||
646 | * cannot span the two sub-ranges. | ||
647 | * | ||
648 | * @param context An initialized mPIPE context. | ||
649 | * @param count Number of buckets required. | ||
650 | * @param first Index of first bucket if ::GXIO_MPIPE_ALLOC_FIXED flag is set, | ||
651 | * otherwise ignored. | ||
652 | * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e. | ||
653 | * @return Index of first allocated buffer bucket, or | ||
654 | * ::GXIO_MPIPE_ERR_NO_BUCKET if allocation failed. | ||
655 | */ | ||
656 | extern int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, | ||
657 | unsigned int count, unsigned int first, | ||
658 | unsigned int flags); | ||
659 | |||
660 | /* The legal modes for gxio_mpipe_bucket_info_t and | ||
661 | * gxio_mpipe_init_notif_group_and_buckets(). | ||
662 | * | ||
663 | * All modes except ::GXIO_MPIPE_BUCKET_ROUND_ROBIN expect that the user | ||
664 | * will allocate a power-of-two number of buckets and initialize them | ||
665 | * to the same mode. The classifier program then uses the appropriate | ||
666 | * number of low bits from the incoming packet's flow hash to choose a | ||
667 | * load balancer bucket. Based on that bucket's load balancing mode, | ||
668 | * reference count, and currently active NotifRing, the load balancer | ||
669 | * chooses the NotifRing to which the packet will be delivered. | ||
670 | */ | ||
671 | typedef enum { | ||
672 | /* All packets for a bucket go to the same NotifRing unless the | ||
673 | * NotifRing gets full, in which case packets will be dropped. If | ||
674 | * the bucket reference count ever reaches zero, a new NotifRing may | ||
675 | * be chosen. | ||
676 | */ | ||
677 | GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY = | ||
678 | MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA, | ||
679 | |||
680 | /* All packets for a bucket always go to the same NotifRing. | ||
681 | */ | ||
682 | GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY = | ||
683 | MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED, | ||
684 | |||
685 | /* All packets for a bucket go to the least full NotifRing in the | ||
686 | * group, providing load balancing round robin behavior. | ||
687 | */ | ||
688 | GXIO_MPIPE_BUCKET_ROUND_ROBIN = | ||
689 | MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK, | ||
690 | |||
691 | /* All packets for a bucket go to the same NotifRing unless the | ||
692 | * NotifRing gets full, at which point the bucket starts using the | ||
693 | * least full NotifRing in the group. If all NotifRings in the | ||
694 | * group are full, packets will be dropped. | ||
695 | */ | ||
696 | GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY = | ||
697 | MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY, | ||
698 | |||
699 | /* All packets for a bucket go to the same NotifRing unless the | ||
700 | * NotifRing gets full, or a random timer fires, at which point the | ||
701 | * bucket starts using the least full NotifRing in the group. If | ||
702 | * all NotifRings in the group are full, packets will be dropped. | ||
703 | * WARNING: This mode is BROKEN on chips with fewer than 64 tiles. | ||
704 | */ | ||
705 | GXIO_MPIPE_BUCKET_PREFER_FLOW_LOCALITY = | ||
706 | MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND, | ||
707 | |||
708 | } gxio_mpipe_bucket_mode_t; | ||
709 | |||
710 | /* Copy a set of bucket initialization values into the mPIPE | ||
711 | * hardware. Since the load balancer makes decisions based on both | ||
712 | * bucket and NotifGroup state, most applications should use | ||
713 | * gxio_mpipe_init_notif_group_and_buckets() rather than using this | ||
714 | * function to configure a single bucket. | ||
715 | * | ||
716 | * @param context An initialized mPIPE context. | ||
717 | * @param bucket Bucket index to be initialized. | ||
718 | * @param bucket_info Initial reference count, NotifRing index, and mode. | ||
719 | * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET on failure. | ||
720 | */ | ||
721 | extern int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, | ||
722 | unsigned int bucket, | ||
723 | gxio_mpipe_bucket_info_t bucket_info); | ||
724 | |||
725 | /* Initializes a group and range of buckets and range of rings such | ||
726 | * that the load balancer runs a particular load balancing function. | ||
727 | * | ||
728 | * First, the group is initialized with the given rings. | ||
729 | * | ||
730 | * Second, each bucket is initialized with the mode and group, and a | ||
731 | * ring chosen round-robin from the given rings. | ||
732 | * | ||
733 | * Normally, the classifier picks a bucket, and then the load balancer | ||
734 | * picks a ring, based on the bucket's mode, group, and current ring, | ||
735 | * possibly updating the bucket's ring. | ||
736 | * | ||
737 | * @param context An initialized mPIPE context. | ||
738 | * @param group The group. | ||
739 | * @param ring The first ring. | ||
740 | * @param num_rings The number of rings. | ||
741 | * @param bucket The first bucket. | ||
742 | * @param num_buckets The number of buckets. | ||
743 | * @param mode The load balancing mode. | ||
744 | * | ||
745 | * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET, | ||
746 | * ::GXIO_MPIPE_ERR_BAD_NOTIF_GROUP, or | ||
747 | * ::GXIO_MPIPE_ERR_BAD_NOTIF_RING on failure. | ||
748 | */ | ||
749 | extern int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t | ||
750 | *context, | ||
751 | unsigned int group, | ||
752 | unsigned int ring, | ||
753 | unsigned int num_rings, | ||
754 | unsigned int bucket, | ||
755 | unsigned int num_buckets, | ||
756 | gxio_mpipe_bucket_mode_t | ||
757 | mode); | ||
758 | |||
759 | /* Return credits to a NotifRing and/or bucket. | ||
760 | * | ||
761 | * @param context An initialized mPIPE context. | ||
762 | * @param ring The NotifRing index, or -1. | ||
763 | * @param bucket The bucket, or -1. | ||
764 | * @param count The number of credits to return. | ||
765 | */ | ||
766 | static inline void gxio_mpipe_credit(gxio_mpipe_context_t *context, | ||
767 | int ring, int bucket, unsigned int count) | ||
768 | { | ||
769 | /* NOTE: Fancy struct initialization would break "C89" header test. */ | ||
770 | |||
771 | MPIPE_IDMA_RELEASE_REGION_ADDR_t offset = { {0} }; | ||
772 | MPIPE_IDMA_RELEASE_REGION_VAL_t val = { {0} }; | ||
773 | |||
774 | /* | ||
775 | * The mmio_fast_base region starts at the IDMA region, so subtract | ||
776 | * off that initial offset. | ||
777 | */ | ||
778 | offset.region = | ||
779 | MPIPE_MMIO_ADDR__REGION_VAL_IDMA - | ||
780 | MPIPE_MMIO_ADDR__REGION_VAL_IDMA; | ||
781 | offset.ring = ring; | ||
782 | offset.bucket = bucket; | ||
783 | offset.ring_enable = (ring >= 0); | ||
784 | offset.bucket_enable = (bucket >= 0); | ||
785 | val.count = count; | ||
786 | |||
787 | __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word); | ||
788 | } | ||
789 | |||
790 | /***************************************************************** | ||
791 | * Egress Rings * | ||
792 | ******************************************************************/ | ||
793 | |||
794 | /* Allocate a set of eDMA rings. | ||
795 | * | ||
796 | * The return value is NOT interesting if count is zero. | ||
797 | * | ||
798 | * @param context An initialized mPIPE context. | ||
799 | * @param count Number of eDMA rings required. | ||
800 | * @param first Index of first eDMA ring if ::GXIO_MPIPE_ALLOC_FIXED flag | ||
801 | * is set, otherwise ignored. | ||
802 | * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e. | ||
803 | * @return Index of first allocated buffer eDMA ring, or | ||
804 | * ::GXIO_MPIPE_ERR_NO_EDMA_RING if allocation failed. | ||
805 | */ | ||
806 | extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, | ||
807 | unsigned int count, unsigned int first, | ||
808 | unsigned int flags); | ||
809 | |||
810 | /* Initialize an eDMA ring, using the given memory and size. | ||
811 | * | ||
812 | * @param context An initialized mPIPE context. | ||
813 | * @param ring The eDMA ring index. | ||
814 | * @param channel The channel to use. This must be one of the channels | ||
815 | * associated with the context's set of open links. | ||
816 | * @param mem A physically contiguous region of memory to be filled | ||
817 | * with a ring of ::gxio_mpipe_edesc_t structures. | ||
818 | * @param mem_size Number of bytes in the ring. Must be 512, 2048, | ||
819 | * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)). | ||
820 | * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags. | ||
821 | * | ||
822 | * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or | ||
823 | * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. | ||
824 | */ | ||
825 | extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, | ||
826 | unsigned int ring, unsigned int channel, | ||
827 | void *mem, size_t mem_size, | ||
828 | unsigned int mem_flags); | ||
829 | |||
830 | /***************************************************************** | ||
831 | * Classifier Program * | ||
832 | ******************************************************************/ | ||
833 | |||
834 | /* | ||
835 | * | ||
836 | * Functions for loading or configuring the mPIPE classifier program. | ||
837 | * | ||
838 | * The mPIPE classification processors all run a special "classifier" | ||
839 | * program which, for each incoming packet, parses the packet headers, | ||
840 | * encodes some packet metadata in the "idesc", and either drops the | ||
841 | * packet, or picks a notif ring to handle the packet, and a buffer | ||
842 | * stack to contain the packet, usually based on the channel, VLAN, | ||
843 | * dMAC, flow hash, and packet size, under the guidance of the "rules" | ||
844 | * API described below. | ||
845 | * | ||
846 | * @section gxio_mpipe_classifier_default Default Classifier | ||
847 | * | ||
848 | * The MDE provides a simple "default" classifier program. It is | ||
849 | * shipped as source in "$TILERA_ROOT/src/sys/mpipe/classifier.c", | ||
850 | * which serves as its official documentation. It is shipped as a | ||
851 | * binary program in "$TILERA_ROOT/tile/boot/classifier", which is | ||
852 | * automatically included in bootroms created by "tile-monitor", and | ||
853 | * is automatically loaded by the hypervisor at boot time. | ||
854 | * | ||
855 | * The L2 analysis handles LLC packets, SNAP packets, and "VLAN | ||
856 | * wrappers" (keeping the outer VLAN). | ||
857 | * | ||
858 | * The L3 analysis handles IPv4 and IPv6, dropping packets with bad | ||
859 | * IPv4 header checksums, requesting computation of a TCP/UDP checksum | ||
860 | * if appropriate, and hashing the dest and src IP addresses, plus the | ||
861 | * ports for TCP/UDP packets, into the flow hash. No special analysis | ||
862 | * is done for "fragmented" packets or "tunneling" protocols. Thus, | ||
863 | * the first fragment of a fragmented TCP/UDP packet is hashed using | ||
864 | * src/dest IP address and ports and all subsequent fragments are only | ||
865 | * hashed according to src/dest IP address. | ||
866 | * | ||
867 | * The L3 analysis handles other packets too, hashing the dMAC | ||
868 | * smac into a flow hash. | ||
869 | * | ||
870 | * The channel, VLAN, and dMAC used to pick a "rule" (see the | ||
871 | * "rules" APIs below), which in turn is used to pick a buffer stack | ||
872 | * (based on the packet size) and a bucket (based on the flow hash). | ||
873 | * | ||
874 | * To receive traffic matching a particular (channel/VLAN/dMAC | ||
875 | * pattern, an application should allocate its own buffer stacks and | ||
876 | * load balancer buckets, and map traffic to those stacks and buckets, | ||
877 | * as decribed by the "rules" API below. | ||
878 | * | ||
879 | * Various packet metadata is encoded in the idesc. The flow hash is | ||
880 | * four bytes at 0x0C. The VLAN is two bytes at 0x10. The ethtype is | ||
881 | * two bytes at 0x12. The l3 start is one byte at 0x14. The l4 start | ||
882 | * is one byte at 0x15 for IPv4 and IPv6 packets, and otherwise zero. | ||
883 | * The protocol is one byte at 0x16 for IPv4 and IPv6 packets, and | ||
884 | * otherwise zero. | ||
885 | * | ||
886 | * @section gxio_mpipe_classifier_custom Custom Classifiers. | ||
887 | * | ||
888 | * A custom classifier may be created using "tile-mpipe-cc" with a | ||
889 | * customized version of the default classifier sources. | ||
890 | * | ||
891 | * The custom classifier may be included in bootroms using the | ||
892 | * "--classifier" option to "tile-monitor", or loaded dynamically | ||
893 | * using gxio_mpipe_classifier_load_from_file(). | ||
894 | * | ||
895 | * Be aware that "extreme" customizations may break the assumptions of | ||
896 | * the "rules" APIs described below, but simple customizations, such | ||
897 | * as adding new packet metadata, should be fine. | ||
898 | */ | ||
899 | |||
900 | /* A set of classifier rules, plus a context. */ | ||
901 | typedef struct { | ||
902 | |||
903 | /* The context. */ | ||
904 | gxio_mpipe_context_t *context; | ||
905 | |||
906 | /* The actual rules. */ | ||
907 | gxio_mpipe_rules_list_t list; | ||
908 | |||
909 | } gxio_mpipe_rules_t; | ||
910 | |||
911 | /* Initialize a classifier program rules list. | ||
912 | * | ||
913 | * This function can be called on a previously initialized rules list | ||
914 | * to discard any previously added rules. | ||
915 | * | ||
916 | * @param rules Rules list to initialize. | ||
917 | * @param context An initialized mPIPE context. | ||
918 | */ | ||
919 | extern void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules, | ||
920 | gxio_mpipe_context_t *context); | ||
921 | |||
922 | /* Begin a new rule on the indicated rules list. | ||
923 | * | ||
924 | * Note that an empty rule matches all packets, but an empty rule list | ||
925 | * matches no packets. | ||
926 | * | ||
927 | * @param rules Rules list to which new rule is appended. | ||
928 | * @param bucket First load balancer bucket to which packets will be | ||
929 | * delivered. | ||
930 | * @param num_buckets Number of buckets (must be a power of two) across | ||
931 | * which packets will be distributed based on the "flow hash". | ||
932 | * @param stacks Either NULL, to assign each packet to the smallest | ||
933 | * initialized buffer stack which does not induce chaining (and to | ||
934 | * drop packets which exceed the largest initialized buffer stack | ||
935 | * buffer size), or an array, with each entry indicating which buffer | ||
936 | * stack should be used for packets up to that size (with 255 | ||
937 | * indicating that those packets should be dropped). | ||
938 | * @return 0 on success, or a negative error code on failure. | ||
939 | */ | ||
940 | extern int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules, | ||
941 | unsigned int bucket, | ||
942 | unsigned int num_buckets, | ||
943 | gxio_mpipe_rules_stacks_t *stacks); | ||
944 | |||
945 | /* Set the headroom of the current rule. | ||
946 | * | ||
947 | * @param rules Rules list whose current rule will be modified. | ||
948 | * @param headroom The headroom. | ||
949 | * @return 0 on success, or a negative error code on failure. | ||
950 | */ | ||
951 | extern int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, | ||
952 | uint8_t headroom); | ||
953 | |||
954 | /* Indicate that packets from a particular channel can be delivered | ||
955 | * to the buckets and buffer stacks associated with the current rule. | ||
956 | * | ||
957 | * Channels added must be associated with links opened by the mPIPE context | ||
958 | * used in gxio_mpipe_rules_init(). A rule with no channels is equivalent | ||
959 | * to a rule naming all such associated channels. | ||
960 | * | ||
961 | * @param rules Rules list whose current rule will be modified. | ||
962 | * @param channel The channel to add. | ||
963 | * @return 0 on success, or a negative error code on failure. | ||
964 | */ | ||
965 | extern int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules, | ||
966 | unsigned int channel); | ||
967 | |||
968 | /* Commit rules. | ||
969 | * | ||
970 | * The rules are sent to the hypervisor, where they are combined with | ||
971 | * the rules from other apps, and used to program the hardware classifier. | ||
972 | * | ||
973 | * Note that if this function returns an error, then the rules will NOT | ||
974 | * have been committed, even if the error is due to interactions with | ||
975 | * rules from another app. | ||
976 | * | ||
977 | * @param rules Rules list to commit. | ||
978 | * @return 0 on success, or a negative error code on failure. | ||
979 | */ | ||
980 | extern int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules); | ||
981 | |||
982 | /***************************************************************** | ||
983 | * Ingress Queue Wrapper * | ||
984 | ******************************************************************/ | ||
985 | |||
986 | /* | ||
987 | * | ||
988 | * Convenience functions for receiving packets from a NotifRing and | ||
989 | * sending packets via an eDMA ring. | ||
990 | * | ||
991 | * The mpipe ingress and egress hardware uses shared memory packet | ||
992 | * descriptors to describe packets that have arrived on ingress or | ||
993 | * are destined for egress. These descriptors are stored in shared | ||
994 | * memory ring buffers and written or read by hardware as necessary. | ||
995 | * The gxio library provides wrapper functions that manage the head and | ||
996 | * tail pointers for these rings, allowing the user to easily read or | ||
997 | * write packet descriptors. | ||
998 | * | ||
999 | * The initialization interface for ingress and egress rings is quite | ||
1000 | * similar. For example, to create an ingress queue, the user passes | ||
1001 | * a ::gxio_mpipe_iqueue_t state object, a ring number from | ||
1002 | * gxio_mpipe_alloc_notif_rings(), and the address of memory to hold a | ||
1003 | * ring buffer to the gxio_mpipe_iqueue_init() function. The function | ||
1004 | * returns success when the state object has been initialized and the | ||
1005 | * hardware configured to deliver packets to the specified ring | ||
1006 | * buffer. Similarly, gxio_mpipe_equeue_init() takes a | ||
1007 | * ::gxio_mpipe_equeue_t state object, a ring number from | ||
1008 | * gxio_mpipe_alloc_edma_rings(), and a shared memory buffer. | ||
1009 | * | ||
1010 | * @section gxio_mpipe_iqueue Working with Ingress Queues | ||
1011 | * | ||
1012 | * Once initialized, the gxio_mpipe_iqueue_t API provides two flows | ||
1013 | * for getting the ::gxio_mpipe_idesc_t packet descriptor associated | ||
1014 | * with incoming packets. The simplest is to call | ||
1015 | * gxio_mpipe_iqueue_get() or gxio_mpipe_iqueue_try_get(). These | ||
1016 | * functions copy the oldest packet descriptor out of the NotifRing and | ||
1017 | * into a descriptor provided by the caller. They also immediately | ||
1018 | * inform the hardware that a descriptor has been processed. | ||
1019 | * | ||
1020 | * For applications with stringent performance requirements, higher | ||
1021 | * efficiency can be achieved by avoiding the packet descriptor copy | ||
1022 | * and processing multiple descriptors at once. The | ||
1023 | * gxio_mpipe_iqueue_peek() and gxio_mpipe_iqueue_try_peek() functions | ||
1024 | * allow such optimizations. These functions provide a pointer to the | ||
1025 | * next valid ingress descriptor in the NotifRing's shared memory ring | ||
1026 | * buffer, and a count of how many contiguous descriptors are ready to | ||
1027 | * be processed. The application can then process any number of those | ||
1028 | * descriptors in place, calling gxio_mpipe_iqueue_consume() to inform | ||
1029 | * the hardware after each one has been processed. | ||
1030 | * | ||
1031 | * @section gxio_mpipe_equeue Working with Egress Queues | ||
1032 | * | ||
1033 | * Similarly, the egress queue API provides a high-performance | ||
1034 | * interface plus a simple wrapper for use in posting | ||
1035 | * ::gxio_mpipe_edesc_t egress packet descriptors. The simple | ||
1036 | * version, gxio_mpipe_equeue_put(), allows the programmer to wait for | ||
1037 | * an eDMA ring slot to become available and write a single descriptor | ||
1038 | * into the ring. | ||
1039 | * | ||
1040 | * Alternatively, you can reserve slots in the eDMA ring using | ||
1041 | * gxio_mpipe_equeue_reserve() or gxio_mpipe_equeue_try_reserve(), and | ||
1042 | * then fill in each slot using gxio_mpipe_equeue_put_at(). This | ||
1043 | * capability can be used to amortize the cost of reserving slots | ||
1044 | * across several packets. It also allows gather operations to be | ||
1045 | * performed on a shared equeue, by ensuring that the edescs for all | ||
1046 | * the fragments are all contiguous in the eDMA ring. | ||
1047 | * | ||
1048 | * The gxio_mpipe_equeue_reserve() and gxio_mpipe_equeue_try_reserve() | ||
1049 | * functions return a 63-bit "completion slot", which is actually a | ||
1050 | * sequence number, the low bits of which indicate the ring buffer | ||
1051 | * index and the high bits the number of times the application has | ||
1052 | * gone around the egress ring buffer. The extra bits allow an | ||
1053 | * application to check for egress completion by calling | ||
1054 | * gxio_mpipe_equeue_is_complete() to see whether a particular 'slot' | ||
1055 | * number has finished. Given the maximum packet rates of the Gx | ||
1056 | * processor, the 63-bit slot number will never wrap. | ||
1057 | * | ||
1058 | * In practice, most applications use the ::gxio_mpipe_edesc_t::hwb | ||
1059 | * bit to indicate that the buffers containing egress packet data | ||
1060 | * should be pushed onto a buffer stack when egress is complete. Such | ||
1061 | * applications generally do not need to know when an egress operation | ||
1062 | * completes (since there is no need to free a buffer post-egress), | ||
1063 | * and thus can use the optimized gxio_mpipe_equeue_reserve_fast() or | ||
1064 | * gxio_mpipe_equeue_try_reserve_fast() functions, which return a 24 | ||
1065 | * bit "slot", instead of a 63-bit "completion slot". | ||
1066 | * | ||
1067 | * Once a slot has been "reserved", it MUST be filled. If the | ||
1068 | * application reserves a slot and then decides that it does not | ||
1069 | * actually need it, it can set the ::gxio_mpipe_edesc_t::ns (no send) | ||
1070 | * bit on the descriptor passed to gxio_mpipe_equeue_put_at() to | ||
1071 | * indicate that no data should be sent. This technique can also be | ||
1072 | * used to drop an incoming packet, instead of forwarding it, since | ||
1073 | * any buffer will still be pushed onto the buffer stack when the | ||
1074 | * egress descriptor is processed. | ||
1075 | */ | ||
1076 | |||
1077 | /* A convenient interface to a NotifRing, for use by a single thread. | ||
1078 | */ | ||
1079 | typedef struct { | ||
1080 | |||
1081 | /* The context. */ | ||
1082 | gxio_mpipe_context_t *context; | ||
1083 | |||
1084 | /* The actual NotifRing. */ | ||
1085 | gxio_mpipe_idesc_t *idescs; | ||
1086 | |||
1087 | /* The number of entries. */ | ||
1088 | unsigned long num_entries; | ||
1089 | |||
1090 | /* The number of entries minus one. */ | ||
1091 | unsigned long mask_num_entries; | ||
1092 | |||
1093 | /* The log2() of the number of entries. */ | ||
1094 | unsigned long log2_num_entries; | ||
1095 | |||
1096 | /* The next entry. */ | ||
1097 | unsigned int head; | ||
1098 | |||
1099 | /* The NotifRing id. */ | ||
1100 | unsigned int ring; | ||
1101 | |||
1102 | #ifdef __BIG_ENDIAN__ | ||
1103 | /* The number of byteswapped entries. */ | ||
1104 | unsigned int swapped; | ||
1105 | #endif | ||
1106 | |||
1107 | } gxio_mpipe_iqueue_t; | ||
1108 | |||
1109 | /* Initialize an "iqueue". | ||
1110 | * | ||
1111 | * Takes the iqueue plus the same args as gxio_mpipe_init_notif_ring(). | ||
1112 | */ | ||
1113 | extern int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue, | ||
1114 | gxio_mpipe_context_t *context, | ||
1115 | unsigned int ring, | ||
1116 | void *mem, size_t mem_size, | ||
1117 | unsigned int mem_flags); | ||
1118 | |||
1119 | /* Advance over some old entries in an iqueue. | ||
1120 | * | ||
1121 | * Please see the documentation for gxio_mpipe_iqueue_consume(). | ||
1122 | * | ||
1123 | * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init(). | ||
1124 | * @param count The number of entries to advance over. | ||
1125 | */ | ||
1126 | static inline void gxio_mpipe_iqueue_advance(gxio_mpipe_iqueue_t *iqueue, | ||
1127 | int count) | ||
1128 | { | ||
1129 | /* Advance with proper wrap. */ | ||
1130 | int head = iqueue->head + count; | ||
1131 | iqueue->head = | ||
1132 | (head & iqueue->mask_num_entries) + | ||
1133 | (head >> iqueue->log2_num_entries); | ||
1134 | |||
1135 | #ifdef __BIG_ENDIAN__ | ||
1136 | /* HACK: Track swapped entries. */ | ||
1137 | iqueue->swapped -= count; | ||
1138 | #endif | ||
1139 | } | ||
1140 | |||
1141 | /* Release the ring and bucket for an old entry in an iqueue. | ||
1142 | * | ||
1143 | * Releasing the ring allows more packets to be delivered to the ring. | ||
1144 | * | ||
1145 | * Releasing the bucket allows flows using the bucket to be moved to a | ||
1146 | * new ring when using GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY. | ||
1147 | * | ||
1148 | * This function is shorthand for "gxio_mpipe_credit(iqueue->context, | ||
1149 | * iqueue->ring, idesc->bucket_id, 1)", and it may be more convenient | ||
1150 | * to make that underlying call, using those values, instead of | ||
1151 | * tracking the entire "idesc". | ||
1152 | * | ||
1153 | * If packet processing is deferred, optimal performance requires that | ||
1154 | * the releasing be deferred as well. | ||
1155 | * | ||
1156 | * Please see the documentation for gxio_mpipe_iqueue_consume(). | ||
1157 | * | ||
1158 | * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init(). | ||
1159 | * @param idesc The descriptor which was processed. | ||
1160 | */ | ||
1161 | static inline void gxio_mpipe_iqueue_release(gxio_mpipe_iqueue_t *iqueue, | ||
1162 | gxio_mpipe_idesc_t *idesc) | ||
1163 | { | ||
1164 | gxio_mpipe_credit(iqueue->context, iqueue->ring, idesc->bucket_id, 1); | ||
1165 | } | ||
1166 | |||
1167 | /* Consume a packet from an "iqueue". | ||
1168 | * | ||
1169 | * After processing packets peeked at via gxio_mpipe_iqueue_peek() | ||
1170 | * or gxio_mpipe_iqueue_try_peek(), you must call this function, or | ||
1171 | * gxio_mpipe_iqueue_advance() plus gxio_mpipe_iqueue_release(), to | ||
1172 | * advance over those entries, and release their rings and buckets. | ||
1173 | * | ||
1174 | * You may call this function as each packet is processed, or you can | ||
1175 | * wait until several packets have been processed. | ||
1176 | * | ||
1177 | * Note that if you are using a single bucket, and you are handling | ||
1178 | * batches of N packets, then you can replace several calls to this | ||
1179 | * function with calls to "gxio_mpipe_iqueue_advance(iqueue, N)" and | ||
1180 | * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)". | ||
1181 | * | ||
1182 | * Note that if your classifier sets "idesc->nr", then you should | ||
1183 | * explicitly call "gxio_mpipe_iqueue_advance(iqueue, idesc)" plus | ||
1184 | * "gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, 1)", to | ||
1185 | * avoid incorrectly crediting the (unused) bucket. | ||
1186 | * | ||
1187 | * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init(). | ||
1188 | * @param idesc The descriptor which was processed. | ||
1189 | */ | ||
1190 | static inline void gxio_mpipe_iqueue_consume(gxio_mpipe_iqueue_t *iqueue, | ||
1191 | gxio_mpipe_idesc_t *idesc) | ||
1192 | { | ||
1193 | gxio_mpipe_iqueue_advance(iqueue, 1); | ||
1194 | gxio_mpipe_iqueue_release(iqueue, idesc); | ||
1195 | } | ||
1196 | |||
1197 | /* Peek at the next packet(s) in an "iqueue", without waiting. | ||
1198 | * | ||
1199 | * If no packets are available, fills idesc_ref with NULL, and then | ||
1200 | * returns ::GXIO_MPIPE_ERR_IQUEUE_EMPTY. Otherwise, fills idesc_ref | ||
1201 | * with the address of the next valid packet descriptor, and returns | ||
1202 | * the maximum number of valid descriptors which can be processed. | ||
1203 | * You may process fewer descriptors if desired. | ||
1204 | * | ||
1205 | * Call gxio_mpipe_iqueue_consume() on each packet once it has been | ||
1206 | * processed (or dropped), to allow more packets to be delivered. | ||
1207 | * | ||
1208 | * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init(). | ||
1209 | * @param idesc_ref A pointer to a packet descriptor pointer. | ||
1210 | * @return The (positive) number of packets which can be processed, | ||
1211 | * or ::GXIO_MPIPE_ERR_IQUEUE_EMPTY if no packets are available. | ||
1212 | */ | ||
1213 | static inline int gxio_mpipe_iqueue_try_peek(gxio_mpipe_iqueue_t *iqueue, | ||
1214 | gxio_mpipe_idesc_t **idesc_ref) | ||
1215 | { | ||
1216 | gxio_mpipe_idesc_t *next; | ||
1217 | |||
1218 | uint64_t head = iqueue->head; | ||
1219 | uint64_t tail = __gxio_mmio_read(iqueue->idescs); | ||
1220 | |||
1221 | /* Available entries. */ | ||
1222 | uint64_t avail = | ||
1223 | (tail >= head) ? (tail - head) : (iqueue->num_entries - head); | ||
1224 | |||
1225 | if (avail == 0) { | ||
1226 | *idesc_ref = NULL; | ||
1227 | return GXIO_MPIPE_ERR_IQUEUE_EMPTY; | ||
1228 | } | ||
1229 | |||
1230 | next = &iqueue->idescs[head]; | ||
1231 | |||
1232 | /* ISSUE: Is this helpful? */ | ||
1233 | __insn_prefetch(next); | ||
1234 | |||
1235 | #ifdef __BIG_ENDIAN__ | ||
1236 | /* HACK: Swap new entries directly in memory. */ | ||
1237 | { | ||
1238 | int i, j; | ||
1239 | for (i = iqueue->swapped; i < avail; i++) { | ||
1240 | for (j = 0; j < 8; j++) | ||
1241 | next[i].words[j] = | ||
1242 | __builtin_bswap64(next[i].words[j]); | ||
1243 | } | ||
1244 | iqueue->swapped = avail; | ||
1245 | } | ||
1246 | #endif | ||
1247 | |||
1248 | *idesc_ref = next; | ||
1249 | |||
1250 | return avail; | ||
1251 | } | ||
1252 | |||
1253 | /* Drop a packet by pushing its buffer (if appropriate). | ||
1254 | * | ||
1255 | * NOTE: The caller must still call gxio_mpipe_iqueue_consume() if idesc | ||
1256 | * came from gxio_mpipe_iqueue_try_peek() or gxio_mpipe_iqueue_peek(). | ||
1257 | * | ||
1258 | * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init(). | ||
1259 | * @param idesc A packet descriptor. | ||
1260 | */ | ||
1261 | static inline void gxio_mpipe_iqueue_drop(gxio_mpipe_iqueue_t *iqueue, | ||
1262 | gxio_mpipe_idesc_t *idesc) | ||
1263 | { | ||
1264 | /* FIXME: Handle "chaining" properly. */ | ||
1265 | |||
1266 | if (!idesc->be) { | ||
1267 | unsigned char *va = gxio_mpipe_idesc_get_va(idesc); | ||
1268 | gxio_mpipe_push_buffer(iqueue->context, idesc->stack_idx, va); | ||
1269 | } | ||
1270 | } | ||
1271 | |||
1272 | /***************************************************************** | ||
1273 | * Egress Queue Wrapper * | ||
1274 | ******************************************************************/ | ||
1275 | |||
1276 | /* A convenient, thread-safe interface to an eDMA ring. */ | ||
1277 | typedef struct { | ||
1278 | |||
1279 | /* State object for tracking head and tail pointers. */ | ||
1280 | __gxio_dma_queue_t dma_queue; | ||
1281 | |||
1282 | /* The ring entries. */ | ||
1283 | gxio_mpipe_edesc_t *edescs; | ||
1284 | |||
1285 | /* The number of entries minus one. */ | ||
1286 | unsigned long mask_num_entries; | ||
1287 | |||
1288 | /* The log2() of the number of entries. */ | ||
1289 | unsigned long log2_num_entries; | ||
1290 | |||
1291 | } gxio_mpipe_equeue_t; | ||
1292 | |||
1293 | /* Initialize an "equeue". | ||
1294 | * | ||
1295 | * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring(). | ||
1296 | */ | ||
1297 | extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, | ||
1298 | gxio_mpipe_context_t *context, | ||
1299 | unsigned int edma_ring_id, | ||
1300 | unsigned int channel, | ||
1301 | void *mem, unsigned int mem_size, | ||
1302 | unsigned int mem_flags); | ||
1303 | |||
1304 | /* Reserve completion slots for edescs. | ||
1305 | * | ||
1306 | * Use gxio_mpipe_equeue_put_at() to actually populate the slots. | ||
1307 | * | ||
1308 | * This function is slower than gxio_mpipe_equeue_reserve_fast(), but | ||
1309 | * returns a full 64 bit completion slot, which can be used with | ||
1310 | * gxio_mpipe_equeue_is_complete(). | ||
1311 | * | ||
1312 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1313 | * @param num Number of slots to reserve (must be non-zero). | ||
1314 | * @return The first reserved completion slot, or a negative error code. | ||
1315 | */ | ||
1316 | static inline int64_t gxio_mpipe_equeue_reserve(gxio_mpipe_equeue_t *equeue, | ||
1317 | unsigned int num) | ||
1318 | { | ||
1319 | return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, true); | ||
1320 | } | ||
1321 | |||
1322 | /* Reserve completion slots for edescs, if possible. | ||
1323 | * | ||
1324 | * Use gxio_mpipe_equeue_put_at() to actually populate the slots. | ||
1325 | * | ||
1326 | * This function is slower than gxio_mpipe_equeue_try_reserve_fast(), | ||
1327 | * but returns a full 64 bit completion slot, which can be used with | ||
1328 | * gxio_mpipe_equeue_is_complete(). | ||
1329 | * | ||
1330 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1331 | * @param num Number of slots to reserve (must be non-zero). | ||
1332 | * @return The first reserved completion slot, or a negative error code. | ||
1333 | */ | ||
1334 | static inline int64_t gxio_mpipe_equeue_try_reserve(gxio_mpipe_equeue_t | ||
1335 | *equeue, unsigned int num) | ||
1336 | { | ||
1337 | return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, false); | ||
1338 | } | ||
1339 | |||
1340 | /* Reserve slots for edescs. | ||
1341 | * | ||
1342 | * Use gxio_mpipe_equeue_put_at() to actually populate the slots. | ||
1343 | * | ||
1344 | * This function is faster than gxio_mpipe_equeue_reserve(), but | ||
1345 | * returns a 24 bit slot (instead of a 64 bit completion slot), which | ||
1346 | * thus cannot be used with gxio_mpipe_equeue_is_complete(). | ||
1347 | * | ||
1348 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1349 | * @param num Number of slots to reserve (should be non-zero). | ||
1350 | * @return The first reserved slot, or a negative error code. | ||
1351 | */ | ||
1352 | static inline int64_t gxio_mpipe_equeue_reserve_fast(gxio_mpipe_equeue_t | ||
1353 | *equeue, unsigned int num) | ||
1354 | { | ||
1355 | return __gxio_dma_queue_reserve(&equeue->dma_queue, num, true, false); | ||
1356 | } | ||
1357 | |||
1358 | /* Reserve slots for edescs, if possible. | ||
1359 | * | ||
1360 | * Use gxio_mpipe_equeue_put_at() to actually populate the slots. | ||
1361 | * | ||
1362 | * This function is faster than gxio_mpipe_equeue_try_reserve(), but | ||
1363 | * returns a 24 bit slot (instead of a 64 bit completion slot), which | ||
1364 | * thus cannot be used with gxio_mpipe_equeue_is_complete(). | ||
1365 | * | ||
1366 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1367 | * @param num Number of slots to reserve (should be non-zero). | ||
1368 | * @return The first reserved slot, or a negative error code. | ||
1369 | */ | ||
1370 | static inline int64_t gxio_mpipe_equeue_try_reserve_fast(gxio_mpipe_equeue_t | ||
1371 | *equeue, | ||
1372 | unsigned int num) | ||
1373 | { | ||
1374 | return __gxio_dma_queue_reserve(&equeue->dma_queue, num, false, false); | ||
1375 | } | ||
1376 | |||
1377 | /* | ||
1378 | * HACK: This helper function tricks gcc 4.6 into avoiding saving | ||
1379 | * a copy of "edesc->words[0]" on the stack for no obvious reason. | ||
1380 | */ | ||
1381 | |||
1382 | static inline void gxio_mpipe_equeue_put_at_aux(gxio_mpipe_equeue_t *equeue, | ||
1383 | uint_reg_t ew[2], | ||
1384 | unsigned long slot) | ||
1385 | { | ||
1386 | unsigned long edma_slot = slot & equeue->mask_num_entries; | ||
1387 | gxio_mpipe_edesc_t *edesc_p = &equeue->edescs[edma_slot]; | ||
1388 | |||
1389 | /* | ||
1390 | * ISSUE: Could set eDMA ring to be on generation 1 at start, which | ||
1391 | * would avoid the negation here, perhaps allowing "__insn_bfins()". | ||
1392 | */ | ||
1393 | ew[0] |= !((slot >> equeue->log2_num_entries) & 1); | ||
1394 | |||
1395 | /* | ||
1396 | * NOTE: We use "__gxio_mpipe_write()", plus the fact that the eDMA | ||
1397 | * queue alignment restrictions ensure that these two words are on | ||
1398 | * the same cacheline, to force proper ordering between the stores. | ||
1399 | */ | ||
1400 | __gxio_mmio_write64(&edesc_p->words[1], ew[1]); | ||
1401 | __gxio_mmio_write64(&edesc_p->words[0], ew[0]); | ||
1402 | } | ||
1403 | |||
1404 | /* Post an edesc to a given slot in an equeue. | ||
1405 | * | ||
1406 | * This function copies the supplied edesc into entry "slot mod N" in | ||
1407 | * the underlying ring, setting the "gen" bit to the appropriate value | ||
1408 | * based on "(slot mod N*2)", where "N" is the size of the ring. Note | ||
1409 | * that the higher bits of slot are unused, and thus, this function | ||
1410 | * can handle "slots" as well as "completion slots". | ||
1411 | * | ||
1412 | * Normally this function is used to fill in slots reserved by | ||
1413 | * gxio_mpipe_equeue_try_reserve(), gxio_mpipe_equeue_reserve(), | ||
1414 | * gxio_mpipe_equeue_try_reserve_fast(), or | ||
1415 | * gxio_mpipe_equeue_reserve_fast(), | ||
1416 | * | ||
1417 | * This function can also be used without "reserving" slots, if the | ||
1418 | * application KNOWS that the ring can never overflow, for example, by | ||
1419 | * pushing fewer buffers into the buffer stacks than there are total | ||
1420 | * slots in the equeue, but this is NOT recommended. | ||
1421 | * | ||
1422 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1423 | * @param edesc The egress descriptor to be posted. | ||
1424 | * @param slot An egress slot (only the low bits are actually used). | ||
1425 | */ | ||
1426 | static inline void gxio_mpipe_equeue_put_at(gxio_mpipe_equeue_t *equeue, | ||
1427 | gxio_mpipe_edesc_t edesc, | ||
1428 | unsigned long slot) | ||
1429 | { | ||
1430 | gxio_mpipe_equeue_put_at_aux(equeue, edesc.words, slot); | ||
1431 | } | ||
1432 | |||
1433 | /* Post an edesc to the next slot in an equeue. | ||
1434 | * | ||
1435 | * This is a convenience wrapper around | ||
1436 | * gxio_mpipe_equeue_reserve_fast() and gxio_mpipe_equeue_put_at(). | ||
1437 | * | ||
1438 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1439 | * @param edesc The egress descriptor to be posted. | ||
1440 | * @return 0 on success. | ||
1441 | */ | ||
1442 | static inline int gxio_mpipe_equeue_put(gxio_mpipe_equeue_t *equeue, | ||
1443 | gxio_mpipe_edesc_t edesc) | ||
1444 | { | ||
1445 | int64_t slot = gxio_mpipe_equeue_reserve_fast(equeue, 1); | ||
1446 | if (slot < 0) | ||
1447 | return (int)slot; | ||
1448 | |||
1449 | gxio_mpipe_equeue_put_at(equeue, edesc, slot); | ||
1450 | |||
1451 | return 0; | ||
1452 | } | ||
1453 | |||
1454 | /* Ask the mPIPE hardware to egress outstanding packets immediately. | ||
1455 | * | ||
1456 | * This call is not necessary, but may slightly reduce overall latency. | ||
1457 | * | ||
1458 | * Technically, you should flush all gxio_mpipe_equeue_put_at() writes | ||
1459 | * to memory before calling this function, to ensure the descriptors | ||
1460 | * are visible in memory before the mPIPE hardware actually looks for | ||
1461 | * them. But this should be very rare, and the only side effect would | ||
1462 | * be increased latency, so it is up to the caller to decide whether | ||
1463 | * or not to flush memory. | ||
1464 | * | ||
1465 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1466 | */ | ||
1467 | static inline void gxio_mpipe_equeue_flush(gxio_mpipe_equeue_t *equeue) | ||
1468 | { | ||
1469 | /* Use "ring_idx = 0" and "count = 0" to "wake up" the eDMA ring. */ | ||
1470 | MPIPE_EDMA_POST_REGION_VAL_t val = { {0} }; | ||
1471 | /* Flush the write buffers. */ | ||
1472 | __insn_flushwb(); | ||
1473 | __gxio_mmio_write(equeue->dma_queue.post_region_addr, val.word); | ||
1474 | } | ||
1475 | |||
1476 | /* Determine if a given edesc has been completed. | ||
1477 | * | ||
1478 | * Note that this function requires a "completion slot", and thus may | ||
1479 | * NOT be used with a "slot" from gxio_mpipe_equeue_reserve_fast() or | ||
1480 | * gxio_mpipe_equeue_try_reserve_fast(). | ||
1481 | * | ||
1482 | * @param equeue An egress queue initialized via gxio_mpipe_equeue_init(). | ||
1483 | * @param completion_slot The completion slot used by the edesc. | ||
1484 | * @param update If true, and the desc does not appear to have completed | ||
1485 | * yet, then update any software cache of the hardware completion counter, | ||
1486 | * and check again. This should normally be true. | ||
1487 | * @return True iff the given edesc has been completed. | ||
1488 | */ | ||
1489 | static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue, | ||
1490 | int64_t completion_slot, | ||
1491 | int update) | ||
1492 | { | ||
1493 | return __gxio_dma_queue_is_complete(&equeue->dma_queue, | ||
1494 | completion_slot, update); | ||
1495 | } | ||
1496 | |||
1497 | /***************************************************************** | ||
1498 | * Link Management * | ||
1499 | ******************************************************************/ | ||
1500 | |||
1501 | /* | ||
1502 | * | ||
1503 | * Functions for manipulating and sensing the state and configuration | ||
1504 | * of physical network links. | ||
1505 | * | ||
1506 | * @section gxio_mpipe_link_perm Link Permissions | ||
1507 | * | ||
1508 | * Opening a link (with gxio_mpipe_link_open()) requests a set of link | ||
1509 | * permissions, which control what may be done with the link, and potentially | ||
1510 | * what permissions may be granted to other processes. | ||
1511 | * | ||
1512 | * Data permission allows the process to receive packets from the link by | ||
1513 | * specifying the link's channel number in mPIPE packet distribution rules, | ||
1514 | * and to send packets to the link by using the link's channel number as | ||
1515 | * the target for an eDMA ring. | ||
1516 | * | ||
1517 | * Stats permission allows the process to retrieve link attributes (such as | ||
1518 | * the speeds it is capable of running at, or whether it is currently up), and | ||
1519 | * to read and write certain statistics-related registers in the link's MAC. | ||
1520 | * | ||
1521 | * Control permission allows the process to retrieve and modify link attributes | ||
1522 | * (so that it may, for example, bring the link up and take it down), and | ||
1523 | * read and write many registers in the link's MAC and PHY. | ||
1524 | * | ||
1525 | * Any permission may be requested as shared, which allows other processes | ||
1526 | * to also request shared permission, or exclusive, which prevents other | ||
1527 | * processes from requesting it. In keeping with GXIO's typical usage in | ||
1528 | * an embedded environment, the defaults for all permissions are shared. | ||
1529 | * | ||
1530 | * Permissions are granted on a first-come, first-served basis, so if two | ||
1531 | * applications request an exclusive permission on the same link, the one | ||
1532 | * to run first will win. Note, however, that some system components, like | ||
1533 | * the kernel Ethernet driver, may get an opportunity to open links before | ||
1534 | * any applications run. | ||
1535 | * | ||
1536 | * @section gxio_mpipe_link_names Link Names | ||
1537 | * | ||
1538 | * Link names are of the form gbe<em>number</em> (for Gigabit Ethernet), | ||
1539 | * xgbe<em>number</em> (for 10 Gigabit Ethernet), loop<em>number</em> (for | ||
1540 | * internal mPIPE loopback), or ilk<em>number</em>/<em>channel</em> | ||
1541 | * (for Interlaken links); for instance, gbe0, xgbe1, loop3, and | ||
1542 | * ilk0/12 are all possible link names. The correspondence between | ||
1543 | * the link name and an mPIPE instance number or mPIPE channel number is | ||
1544 | * system-dependent; all links will not exist on all systems, and the set | ||
1545 | * of numbers used for a particular link type may not start at zero and may | ||
1546 | * not be contiguous. Use gxio_mpipe_link_enumerate() to retrieve the set of | ||
1547 | * links which exist on a system, and always use gxio_mpipe_link_instance() | ||
1548 | * to determine which mPIPE controls a particular link. | ||
1549 | * | ||
1550 | * Note that in some cases, links may share hardware, such as PHYs, or | ||
1551 | * internal mPIPE buffers; in these cases, only one of the links may be | ||
1552 | * opened at a time. This is especially common with xgbe and gbe ports, | ||
1553 | * since each xgbe port uses 4 SERDES lanes, each of which may also be | ||
1554 | * configured as one gbe port. | ||
1555 | * | ||
1556 | * @section gxio_mpipe_link_states Link States | ||
1557 | * | ||
1558 | * The mPIPE link management model revolves around three different states, | ||
1559 | * which are maintained for each link: | ||
1560 | * | ||
1561 | * 1. The <em>current</em> link state: is the link up now, and if so, at | ||
1562 | * what speed? | ||
1563 | * | ||
1564 | * 2. The <em>desired</em> link state: what do we want the link state to be? | ||
1565 | * The system is always working to make this state the current state; | ||
1566 | * thus, if the desired state is up, and the link is down, we'll be | ||
1567 | * constantly trying to bring it up, automatically. | ||
1568 | * | ||
1569 | * 3. The <em>possible</em> link state: what speeds are valid for this | ||
1570 | * particular link? Or, in other words, what are the capabilities of | ||
1571 | * the link hardware? | ||
1572 | * | ||
1573 | * These link states are not, strictly speaking, related to application | ||
1574 | * state; they may be manipulated at any time, whether or not the link | ||
1575 | * is currently being used for data transfer. However, for convenience, | ||
1576 | * gxio_mpipe_link_open() and gxio_mpipe_link_close() (or application exit) | ||
1577 | * can affect the link state. These implicit link management operations | ||
1578 | * may be modified or disabled by the use of link open flags. | ||
1579 | * | ||
1580 | * From an application, you can use gxio_mpipe_link_get_attr() | ||
1581 | * and gxio_mpipe_link_set_attr() to manipulate the link states. | ||
1582 | * gxio_mpipe_link_get_attr() with ::GXIO_MPIPE_LINK_POSSIBLE_STATE | ||
1583 | * gets you the possible link state. gxio_mpipe_link_get_attr() with | ||
1584 | * ::GXIO_MPIPE_LINK_CURRENT_STATE gets you the current link state. | ||
1585 | * Finally, gxio_mpipe_link_set_attr() and gxio_mpipe_link_get_attr() | ||
1586 | * with ::GXIO_MPIPE_LINK_DESIRED_STATE allow you to modify or retrieve | ||
1587 | * the desired link state. | ||
1588 | * | ||
1589 | * If you want to manage a link from a part of your application which isn't | ||
1590 | * involved in packet processing, you can use the ::GXIO_MPIPE_LINK_NO_DATA | ||
1591 | * flags on a gxio_mpipe_link_open() call. This opens the link, but does | ||
1592 | * not request data permission, so it does not conflict with any exclusive | ||
1593 | * permissions which may be held by other processes. You can then can use | ||
1594 | * gxio_mpipe_link_get_attr() and gxio_mpipe_link_set_attr() on this link | ||
1595 | * object to bring up or take down the link. | ||
1596 | * | ||
1597 | * Some links support link state bits which support various loopback | ||
1598 | * modes. ::GXIO_MPIPE_LINK_LOOP_MAC tests datapaths within the Tile | ||
1599 | * Processor itself; ::GXIO_MPIPE_LINK_LOOP_PHY tests the datapath between | ||
1600 | * the Tile Processor and the external physical layer interface chip; and | ||
1601 | * ::GXIO_MPIPE_LINK_LOOP_EXT tests the entire network datapath with the | ||
1602 | * aid of an external loopback connector. In addition to enabling hardware | ||
1603 | * testing, such configuration can be useful for software testing, as well. | ||
1604 | * | ||
1605 | * When LOOP_MAC or LOOP_PHY is enabled, packets transmitted on a channel | ||
1606 | * will be received by that channel, instead of being emitted on the | ||
1607 | * physical link, and packets received on the physical link will be ignored. | ||
1608 | * Other than that, all standard GXIO operations work as you might expect. | ||
1609 | * Note that loopback operation requires that the link be brought up using | ||
1610 | * one or more of the GXIO_MPIPE_LINK_SPEED_xxx link state bits. | ||
1611 | * | ||
1612 | * Those familiar with previous versions of the MDE on TILEPro hardware | ||
1613 | * will notice significant similarities between the NetIO link management | ||
1614 | * model and the mPIPE link management model. However, the NetIO model | ||
1615 | * was developed in stages, and some of its features -- for instance, | ||
1616 | * the default setting of certain flags -- were shaped by the need to be | ||
1617 | * compatible with previous versions of NetIO. Since the features provided | ||
1618 | * by the mPIPE hardware and the mPIPE GXIO library are significantly | ||
1619 | * different than those provided by NetIO, in some cases, we have made | ||
1620 | * different choices in the mPIPE link management API. Thus, please read | ||
1621 | * this documentation carefully before assuming that mPIPE link management | ||
1622 | * operations are exactly equivalent to their NetIO counterparts. | ||
1623 | */ | ||
1624 | |||
1625 | /* An object used to manage mPIPE link state and resources. */ | ||
1626 | typedef struct { | ||
1627 | /* The overall mPIPE context. */ | ||
1628 | gxio_mpipe_context_t *context; | ||
1629 | |||
1630 | /* The channel number used by this link. */ | ||
1631 | uint8_t channel; | ||
1632 | |||
1633 | /* The MAC index used by this link. */ | ||
1634 | uint8_t mac; | ||
1635 | } gxio_mpipe_link_t; | ||
1636 | |||
1637 | /* Retrieve one of this system's legal link names, and its MAC address. | ||
1638 | * | ||
1639 | * @param index Link name index. If a system supports N legal link names, | ||
1640 | * then indices between 0 and N - 1, inclusive, each correspond to one of | ||
1641 | * those names. Thus, to retrieve all of a system's legal link names, | ||
1642 | * call this function in a loop, starting with an index of zero, and | ||
1643 | * incrementing it once per iteration until -1 is returned. | ||
1644 | * @param link_name Pointer to the buffer which will receive the retrieved | ||
1645 | * link name. The buffer should contain space for at least | ||
1646 | * ::GXIO_MPIPE_LINK_NAME_LEN bytes; the returned name, including the | ||
1647 | * terminating null byte, will be no longer than that. | ||
1648 | * @param link_name Pointer to the buffer which will receive the retrieved | ||
1649 | * MAC address. The buffer should contain space for at least 6 bytes. | ||
1650 | * @return Zero if a link name was successfully retrieved; -1 if one was | ||
1651 | * not. | ||
1652 | */ | ||
1653 | extern int gxio_mpipe_link_enumerate_mac(int index, char *link_name, | ||
1654 | uint8_t *mac_addr); | ||
1655 | |||
1656 | /* Open an mPIPE link. | ||
1657 | * | ||
1658 | * A link must be opened before it may be used to send or receive packets, | ||
1659 | * and before its state may be examined or changed. Depending up on the | ||
1660 | * link's intended use, one or more link permissions may be requested via | ||
1661 | * the flags parameter; see @ref gxio_mpipe_link_perm. In addition, flags | ||
1662 | * may request that the link's state be modified at open time. See @ref | ||
1663 | * gxio_mpipe_link_states and @ref gxio_mpipe_link_open_flags for more detail. | ||
1664 | * | ||
1665 | * @param link A link state object, which will be initialized if this | ||
1666 | * function completes successfully. | ||
1667 | * @param context An initialized mPIPE context. | ||
1668 | * @param link_name Name of the link. | ||
1669 | * @param flags Zero or more @ref gxio_mpipe_link_open_flags, ORed together. | ||
1670 | * @return 0 if the link was successfully opened, or a negative error code. | ||
1671 | * | ||
1672 | */ | ||
1673 | extern int gxio_mpipe_link_open(gxio_mpipe_link_t *link, | ||
1674 | gxio_mpipe_context_t *context, | ||
1675 | const char *link_name, unsigned int flags); | ||
1676 | |||
1677 | /* Close an mPIPE link. | ||
1678 | * | ||
1679 | * Closing a link makes it available for use by other processes. Once | ||
1680 | * a link has been closed, packets may no longer be sent on or received | ||
1681 | * from the link, and its state may not be examined or changed. | ||
1682 | * | ||
1683 | * @param link A link state object, which will no longer be initialized | ||
1684 | * if this function completes successfully. | ||
1685 | * @return 0 if the link was successfully closed, or a negative error code. | ||
1686 | * | ||
1687 | */ | ||
1688 | extern int gxio_mpipe_link_close(gxio_mpipe_link_t *link); | ||
1689 | |||
1690 | /* Return a link's channel number. | ||
1691 | * | ||
1692 | * @param link A properly initialized link state object. | ||
1693 | * @return The channel number for the link. | ||
1694 | */ | ||
1695 | static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link) | ||
1696 | { | ||
1697 | return link->channel; | ||
1698 | } | ||
1699 | |||
1700 | /////////////////////////////////////////////////////////////////// | ||
1701 | // Timestamp // | ||
1702 | /////////////////////////////////////////////////////////////////// | ||
1703 | |||
1704 | /* Get the timestamp of mPIPE when this routine is called. | ||
1705 | * | ||
1706 | * @param context An initialized mPIPE context. | ||
1707 | * @param ts A timespec structure to store the current clock. | ||
1708 | * @return If the call was successful, zero; otherwise, a negative error | ||
1709 | * code. | ||
1710 | */ | ||
1711 | extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, | ||
1712 | struct timespec *ts); | ||
1713 | |||
1714 | /* Set the timestamp of mPIPE. | ||
1715 | * | ||
1716 | * @param context An initialized mPIPE context. | ||
1717 | * @param ts A timespec structure to store the requested clock. | ||
1718 | * @return If the call was successful, zero; otherwise, a negative error | ||
1719 | * code. | ||
1720 | */ | ||
1721 | extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, | ||
1722 | const struct timespec *ts); | ||
1723 | |||
1724 | /* Adjust the timestamp of mPIPE. | ||
1725 | * | ||
1726 | * @param context An initialized mPIPE context. | ||
1727 | * @param delta A signed time offset to adjust, in nanoseconds. | ||
1728 | * The absolute value of this parameter must be less than or | ||
1729 | * equal to 1000000000. | ||
1730 | * @return If the call was successful, zero; otherwise, a negative error | ||
1731 | * code. | ||
1732 | */ | ||
1733 | extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, | ||
1734 | int64_t delta); | ||
1735 | |||
1736 | #endif /* !_GXIO_MPIPE_H_ */ | ||
diff --git a/arch/tile/include/gxio/trio.h b/arch/tile/include/gxio/trio.h new file mode 100644 index 000000000000..77b80cdd46d8 --- /dev/null +++ b/arch/tile/include/gxio/trio.h | |||
@@ -0,0 +1,298 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * | ||
17 | * An API for allocating, configuring, and manipulating TRIO hardware | ||
18 | * resources | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * | ||
23 | * The TILE-Gx TRIO shim provides connections to external devices via | ||
24 | * PCIe or other transaction IO standards. The gxio_trio_ API, | ||
25 | * declared in <gxio/trio.h>, allows applications to allocate and | ||
26 | * configure TRIO IO resources like DMA command rings, memory map | ||
27 | * windows, and device interrupts. The following sections introduce | ||
28 | * the various components of the API. We strongly recommend reading | ||
29 | * the TRIO section of the IO Device Guide (UG404) before working with | ||
30 | * this API. | ||
31 | * | ||
32 | * @section trio__ingress TRIO Ingress Hardware Resources | ||
33 | * | ||
34 | * The TRIO ingress hardware is responsible for examining incoming | ||
35 | * PCIe or StreamIO packets and choosing a processing mechanism based | ||
36 | * on the packets' bus address. The gxio_trio_ API can be used to | ||
37 | * configure different handlers for different ranges of bus address | ||
38 | * space. The user can configure "mapped memory" and "scatter queue" | ||
39 | * regions to match incoming packets within 4kB-aligned ranges of bus | ||
40 | * addresses. Each range specifies a different set of mapping | ||
41 | * parameters to be applied when handling the ingress packet. The | ||
42 | * following sections describe how to work with MapMem and scatter | ||
43 | * queue regions. | ||
44 | * | ||
45 | * @subsection trio__mapmem TRIO MapMem Regions | ||
46 | * | ||
47 | * TRIO mapped memory (or MapMem) regions allow the user to map | ||
48 | * incoming read and write requests directly to the application's | ||
49 | * memory space. MapMem regions are allocated via | ||
50 | * gxio_trio_alloc_memory_maps(). Given an integer MapMem number, | ||
51 | * applications can use gxio_trio_init_memory_map() to specify the | ||
52 | * range of bus addresses that will match the region and the range of | ||
53 | * virtual addresses to which those packets will be applied. | ||
54 | * | ||
55 | * As with many other gxio APIs, the programmer must be sure to | ||
56 | * register memory pages that will be used with MapMem regions. Pages | ||
57 | * can be registered with TRIO by allocating an ASID (address space | ||
58 | * identifier) and then using gxio_trio_register_page() to register up to | ||
59 | * 16 pages with the hardware. The initialization functions for | ||
60 | * resources that require registered memory (MapMem, scatter queues, | ||
61 | * push DMA, and pull DMA) then take an 'asid' parameter in order to | ||
62 | * configure which set of registered pages is used by each resource. | ||
63 | * | ||
64 | * @subsection trio__scatter_queue TRIO Scatter Queues | ||
65 | * | ||
66 | * The TRIO shim's scatter queue regions allow users to dynamically | ||
67 | * map buffers from a large address space into a small range of bus | ||
68 | * addresses. This is particularly helpful for PCIe endpoint devices, | ||
69 | * where the host generally limits the size of BARs to tens of | ||
70 | * megabytes. | ||
71 | * | ||
72 | * Each scatter queue consists of a memory map region, a queue of | ||
73 | * tile-side buffer VAs to be mapped to that region, and a bus-mapped | ||
74 | * "doorbell" register that the remote endpoint can write to trigger a | ||
75 | * dequeue of the current buffer VA, thus swapping in a new buffer. | ||
76 | * The VAs pushed onto a scatter queue must be 4kB aligned, so | ||
77 | * applications may need to use higher-level protocols to inform | ||
78 | * remote entities that they should apply some additional, sub-4kB | ||
79 | * offset when reading or writing the scatter queue region. For more | ||
80 | * information, see the IO Device Guide (UG404). | ||
81 | * | ||
82 | * @section trio__egress TRIO Egress Hardware Resources | ||
83 | * | ||
84 | * The TRIO shim supports two mechanisms for egress packet generation: | ||
85 | * programmed IO (PIO) and push/pull DMA. PIO allows applications to | ||
86 | * create MMIO mappings for PCIe or StreamIO address space, such that | ||
87 | * the application can generate word-sized read or write transactions | ||
88 | * by issuing load or store instructions. Push and pull DMA are tuned | ||
89 | * for larger transactions; they use specialized hardware engines to | ||
90 | * transfer large blocks of data at line rate. | ||
91 | * | ||
92 | * @subsection trio__pio TRIO Programmed IO | ||
93 | * | ||
94 | * Programmed IO allows applications to create MMIO mappings for PCIe | ||
95 | * or StreamIO address space. The hardware PIO regions support access | ||
96 | * to PCIe configuration, IO, and memory space, but the gxio_trio API | ||
97 | * only supports memory space accesses. PIO regions are allocated | ||
98 | * with gxio_trio_alloc_pio_regions() and initialized via | ||
99 | * gxio_trio_init_pio_region(). Once a region is bound to a range of | ||
100 | * bus address via the initialization function, the application can | ||
101 | * use gxio_trio_map_pio_region() to create MMIO mappings from its VA | ||
102 | * space onto the range of bus addresses supported by the PIO region. | ||
103 | * | ||
104 | * @subsection trio_dma TRIO Push and Pull DMA | ||
105 | * | ||
106 | * The TRIO push and pull DMA engines allow users to copy blocks of | ||
107 | * data between application memory and the bus. Push DMA generates | ||
108 | * write packets that copy from application memory to the bus and pull | ||
109 | * DMA generates read packets that copy from the bus into application | ||
110 | * memory. The DMA engines are managed via an API that is very | ||
111 | * similar to the mPIPE eDMA interface. For a detailed explanation of | ||
112 | * the eDMA queue API, see @ref gxio_mpipe_wrappers. | ||
113 | * | ||
114 | * Push and pull DMA queues are allocated via | ||
115 | * gxio_trio_alloc_push_dma_ring() / gxio_trio_alloc_pull_dma_ring(). | ||
116 | * Once allocated, users generally use a ::gxio_trio_dma_queue_t | ||
117 | * object to manage the queue, providing easy wrappers for reserving | ||
118 | * command slots in the DMA command ring, filling those slots, and | ||
119 | * waiting for commands to complete. DMA queues can be initialized | ||
120 | * via gxio_trio_init_push_dma_queue() or | ||
121 | * gxio_trio_init_pull_dma_queue(). | ||
122 | * | ||
123 | * See @ref trio/push_dma/app.c for an example of how to use push DMA. | ||
124 | * | ||
125 | * @section trio_shortcomings Plans for Future API Revisions | ||
126 | * | ||
127 | * The simulation framework is incomplete. Future features include: | ||
128 | * | ||
129 | * - Support for reset and deallocation of resources. | ||
130 | * | ||
131 | * - Support for pull DMA. | ||
132 | * | ||
133 | * - Support for interrupt regions and user-space interrupt delivery. | ||
134 | * | ||
135 | * - Support for getting BAR mappings and reserving regions of BAR | ||
136 | * address space. | ||
137 | */ | ||
138 | #ifndef _GXIO_TRIO_H_ | ||
139 | #define _GXIO_TRIO_H_ | ||
140 | |||
141 | #include <linux/types.h> | ||
142 | |||
143 | #include "common.h" | ||
144 | #include "dma_queue.h" | ||
145 | |||
146 | #include <arch/trio_constants.h> | ||
147 | #include <arch/trio.h> | ||
148 | #include <arch/trio_pcie_intfc.h> | ||
149 | #include <arch/trio_pcie_rc.h> | ||
150 | #include <arch/trio_shm.h> | ||
151 | #include <hv/drv_trio_intf.h> | ||
152 | #include <hv/iorpc.h> | ||
153 | |||
154 | /* A context object used to manage TRIO hardware resources. */ | ||
155 | typedef struct { | ||
156 | |||
157 | /* File descriptor for calling up to Linux (and thus the HV). */ | ||
158 | int fd; | ||
159 | |||
160 | /* The VA at which the MAC MMIO registers are mapped. */ | ||
161 | char *mmio_base_mac; | ||
162 | |||
163 | /* The VA at which the PIO config space are mapped for each PCIe MAC. | ||
164 | Gx36 has max 3 PCIe MACs per TRIO shim. */ | ||
165 | char *mmio_base_pio_cfg[TILEGX_TRIO_PCIES]; | ||
166 | |||
167 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | ||
168 | /* Index of the shared PIO region for PCI config access. */ | ||
169 | int pio_cfg_index; | ||
170 | #else | ||
171 | /* Index of the PIO region for PCI config access per MAC. */ | ||
172 | int pio_cfg_index[TILEGX_TRIO_PCIES]; | ||
173 | #endif | ||
174 | |||
175 | /* The VA at which the push DMA MMIO registers are mapped. */ | ||
176 | char *mmio_push_dma[TRIO_NUM_PUSH_DMA_RINGS]; | ||
177 | |||
178 | /* The VA at which the pull DMA MMIO registers are mapped. */ | ||
179 | char *mmio_pull_dma[TRIO_NUM_PUSH_DMA_RINGS]; | ||
180 | |||
181 | /* Application space ID. */ | ||
182 | unsigned int asid; | ||
183 | |||
184 | } gxio_trio_context_t; | ||
185 | |||
186 | /* Command descriptor for push or pull DMA. */ | ||
187 | typedef TRIO_DMA_DESC_t gxio_trio_dma_desc_t; | ||
188 | |||
189 | /* A convenient, thread-safe interface to an eDMA ring. */ | ||
190 | typedef struct { | ||
191 | |||
192 | /* State object for tracking head and tail pointers. */ | ||
193 | __gxio_dma_queue_t dma_queue; | ||
194 | |||
195 | /* The ring entries. */ | ||
196 | gxio_trio_dma_desc_t *dma_descs; | ||
197 | |||
198 | /* The number of entries minus one. */ | ||
199 | unsigned long mask_num_entries; | ||
200 | |||
201 | /* The log2() of the number of entries. */ | ||
202 | unsigned int log2_num_entries; | ||
203 | |||
204 | } gxio_trio_dma_queue_t; | ||
205 | |||
206 | /* Initialize a TRIO context. | ||
207 | * | ||
208 | * This function allocates a TRIO "service domain" and maps the MMIO | ||
209 | * registers into the the caller's VA space. | ||
210 | * | ||
211 | * @param trio_index Which TRIO shim; Gx36 must pass 0. | ||
212 | * @param context Context object to be initialized. | ||
213 | */ | ||
214 | extern int gxio_trio_init(gxio_trio_context_t *context, | ||
215 | unsigned int trio_index); | ||
216 | |||
217 | /* This indicates that an ASID hasn't been allocated. */ | ||
218 | #define GXIO_ASID_NULL -1 | ||
219 | |||
220 | /* Ordering modes for map memory regions and scatter queue regions. */ | ||
221 | typedef enum gxio_trio_order_mode_e { | ||
222 | /* Writes are not ordered. Reads always wait for previous writes. */ | ||
223 | GXIO_TRIO_ORDER_MODE_UNORDERED = | ||
224 | TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED, | ||
225 | /* Both writes and reads wait for previous transactions to complete. */ | ||
226 | GXIO_TRIO_ORDER_MODE_STRICT = | ||
227 | TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT, | ||
228 | /* Writes are ordered unless the incoming packet has the | ||
229 | relaxed-ordering attributes set. */ | ||
230 | GXIO_TRIO_ORDER_MODE_OBEY_PACKET = | ||
231 | TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD | ||
232 | } gxio_trio_order_mode_t; | ||
233 | |||
234 | /* Initialize a memory mapping region. | ||
235 | * | ||
236 | * @param context An initialized TRIO context. | ||
237 | * @param map A Memory map region allocated by gxio_trio_alloc_memory_map(). | ||
238 | * @param target_mem VA of backing memory, should be registered via | ||
239 | * gxio_trio_register_page() and aligned to 4kB. | ||
240 | * @param target_size Length of the memory mapping, must be a multiple | ||
241 | * of 4kB. | ||
242 | * @param asid ASID to be used for Tile-side address translation. | ||
243 | * @param mac MAC number. | ||
244 | * @param bus_address Bus address at which the mapping starts. | ||
245 | * @param order_mode Memory ordering mode for this mapping. | ||
246 | * @return Zero on success, else ::GXIO_TRIO_ERR_BAD_MEMORY_MAP, | ||
247 | * GXIO_TRIO_ERR_BAD_ASID, or ::GXIO_TRIO_ERR_BAD_BUS_RANGE. | ||
248 | */ | ||
249 | extern int gxio_trio_init_memory_map(gxio_trio_context_t *context, | ||
250 | unsigned int map, void *target_mem, | ||
251 | size_t target_size, unsigned int asid, | ||
252 | unsigned int mac, uint64_t bus_address, | ||
253 | gxio_trio_order_mode_t order_mode); | ||
254 | |||
255 | /* Flags that can be passed to resource allocation functions. */ | ||
256 | enum gxio_trio_alloc_flags_e { | ||
257 | GXIO_TRIO_ALLOC_FIXED = HV_TRIO_ALLOC_FIXED, | ||
258 | }; | ||
259 | |||
260 | /* Flags that can be passed to memory registration functions. */ | ||
261 | enum gxio_trio_mem_flags_e { | ||
262 | /* Do not fill L3 when writing, and invalidate lines upon egress. */ | ||
263 | GXIO_TRIO_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT, | ||
264 | |||
265 | /* L3 cache fills should only populate IO cache ways. */ | ||
266 | GXIO_TRIO_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN, | ||
267 | }; | ||
268 | |||
269 | /* Flag indicating a request generator uses a special traffic | ||
270 | class. */ | ||
271 | #define GXIO_TRIO_FLAG_TRAFFIC_CLASS(N) HV_TRIO_FLAG_TC(N) | ||
272 | |||
273 | /* Flag indicating a request generator uses a virtual function | ||
274 | number. */ | ||
275 | #define GXIO_TRIO_FLAG_VFUNC(N) HV_TRIO_FLAG_VFUNC(N) | ||
276 | |||
277 | /***************************************************************** | ||
278 | * Memory Registration * | ||
279 | ******************************************************************/ | ||
280 | |||
281 | /* Allocate Application Space Identifiers (ASIDs). Each ASID can | ||
282 | * register up to 16 page translations. ASIDs are used by memory map | ||
283 | * regions, scatter queues, and DMA queues to translate application | ||
284 | * VAs into memory system PAs. | ||
285 | * | ||
286 | * @param context An initialized TRIO context. | ||
287 | * @param count Number of ASIDs required. | ||
288 | * @param first Index of first ASID if ::GXIO_TRIO_ALLOC_FIXED flag | ||
289 | * is set, otherwise ignored. | ||
290 | * @param flags Flag bits, including bits from ::gxio_trio_alloc_flags_e. | ||
291 | * @return Index of first ASID, or ::GXIO_TRIO_ERR_NO_ASID if allocation | ||
292 | * failed. | ||
293 | */ | ||
294 | extern int gxio_trio_alloc_asids(gxio_trio_context_t *context, | ||
295 | unsigned int count, unsigned int first, | ||
296 | unsigned int flags); | ||
297 | |||
298 | #endif /* ! _GXIO_TRIO_H_ */ | ||
diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h new file mode 100644 index 000000000000..a60a126e4565 --- /dev/null +++ b/arch/tile/include/gxio/usb_host.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | #ifndef _GXIO_USB_H_ | ||
15 | #define _GXIO_USB_H_ | ||
16 | |||
17 | #include "common.h" | ||
18 | |||
19 | #include <hv/drv_usb_host_intf.h> | ||
20 | #include <hv/iorpc.h> | ||
21 | |||
22 | /* | ||
23 | * | ||
24 | * An API for manipulating general-purpose I/O pins. | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * | ||
29 | * The USB shim allows access to the processor's Universal Serial Bus | ||
30 | * connections. | ||
31 | */ | ||
32 | |||
33 | /* A context object used to manage USB hardware resources. */ | ||
34 | typedef struct { | ||
35 | |||
36 | /* File descriptor for calling up to the hypervisor. */ | ||
37 | int fd; | ||
38 | |||
39 | /* The VA at which our MMIO registers are mapped. */ | ||
40 | char *mmio_base; | ||
41 | } gxio_usb_host_context_t; | ||
42 | |||
43 | /* Initialize a USB context. | ||
44 | * | ||
45 | * A properly initialized context must be obtained before any of the other | ||
46 | * gxio_usb_host routines may be used. | ||
47 | * | ||
48 | * @param context Pointer to a gxio_usb_host_context_t, which will be | ||
49 | * initialized by this routine, if it succeeds. | ||
50 | * @param usb_index Index of the USB shim to use. | ||
51 | * @param is_ehci Nonzero to use the EHCI interface; zero to use the OHCI | ||
52 | * intereface. | ||
53 | * @return Zero if the context was successfully initialized, else a | ||
54 | * GXIO_ERR_xxx error code. | ||
55 | */ | ||
56 | extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, | ||
57 | int is_ehci); | ||
58 | |||
59 | /* Destroy a USB context. | ||
60 | * | ||
61 | * Once destroyed, a context may not be used with any gxio_usb_host routines | ||
62 | * other than gxio_usb_host_init(). After this routine returns, no further | ||
63 | * interrupts or signals requested on this context will be delivered. The | ||
64 | * state and configuration of the pins which had been attached to this | ||
65 | * context are unchanged by this operation. | ||
66 | * | ||
67 | * @param context Pointer to a gxio_usb_host_context_t. | ||
68 | * @return Zero if the context was successfully destroyed, else a | ||
69 | * GXIO_ERR_xxx error code. | ||
70 | */ | ||
71 | extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); | ||
72 | |||
73 | /* Retrieve the address of the shim's MMIO registers. | ||
74 | * | ||
75 | * @param context Pointer to a properly initialized gxio_usb_host_context_t. | ||
76 | * @return The address of the shim's MMIO registers. | ||
77 | */ | ||
78 | extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); | ||
79 | |||
80 | /* Retrieve the length of the shim's MMIO registers. | ||
81 | * | ||
82 | * @param context Pointer to a properly initialized gxio_usb_host_context_t. | ||
83 | * @return The length of the shim's MMIO registers. | ||
84 | */ | ||
85 | extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); | ||
86 | |||
87 | #endif /* _GXIO_USB_H_ */ | ||
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h new file mode 100644 index 000000000000..6cdae3bf046e --- /dev/null +++ b/arch/tile/include/hv/drv_mpipe_intf.h | |||
@@ -0,0 +1,602 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * Interface definitions for the mpipe driver. | ||
17 | */ | ||
18 | |||
19 | #ifndef _SYS_HV_DRV_MPIPE_INTF_H | ||
20 | #define _SYS_HV_DRV_MPIPE_INTF_H | ||
21 | |||
22 | #include <arch/mpipe.h> | ||
23 | #include <arch/mpipe_constants.h> | ||
24 | |||
25 | |||
26 | /** Number of buffer stacks (32). */ | ||
27 | #define HV_MPIPE_NUM_BUFFER_STACKS \ | ||
28 | (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) | ||
29 | |||
30 | /** Number of NotifRings (256). */ | ||
31 | #define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS) | ||
32 | |||
33 | /** Number of NotifGroups (32). */ | ||
34 | #define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS) | ||
35 | |||
36 | /** Number of buckets (4160). */ | ||
37 | #define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS) | ||
38 | |||
39 | /** Number of "lo" buckets (4096). */ | ||
40 | #define HV_MPIPE_NUM_LO_BUCKETS 4096 | ||
41 | |||
42 | /** Number of "hi" buckets (64). */ | ||
43 | #define HV_MPIPE_NUM_HI_BUCKETS \ | ||
44 | (HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS) | ||
45 | |||
46 | /** Number of edma rings (24). */ | ||
47 | #define HV_MPIPE_NUM_EDMA_RINGS \ | ||
48 | (MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH) | ||
49 | |||
50 | |||
51 | |||
52 | |||
53 | /** A flag bit indicating a fixed resource allocation. */ | ||
54 | #define HV_MPIPE_ALLOC_FIXED 0x01 | ||
55 | |||
56 | /** Offset for the config register MMIO region. */ | ||
57 | #define HV_MPIPE_CONFIG_MMIO_OFFSET \ | ||
58 | (MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||
59 | |||
60 | /** Size of the config register MMIO region. */ | ||
61 | #define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024) | ||
62 | |||
63 | /** Offset for the config register MMIO region. */ | ||
64 | #define HV_MPIPE_FAST_MMIO_OFFSET \ | ||
65 | (MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||
66 | |||
67 | /** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */ | ||
68 | #define HV_MPIPE_FAST_MMIO_SIZE \ | ||
69 | ((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \ | ||
70 | << MPIPE_MMIO_ADDR__REGION_SHIFT) | ||
71 | |||
72 | |||
73 | /* | ||
74 | * Each type of resource allocation comes in quantized chunks, where | ||
75 | * XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number | ||
76 | * of resources in each chunk. | ||
77 | */ | ||
78 | |||
79 | /** Number of buffer stack chunks available (32). */ | ||
80 | #define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \ | ||
81 | MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH | ||
82 | |||
83 | /** Granularity of buffer stack allocation (1). */ | ||
84 | #define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \ | ||
85 | (HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS) | ||
86 | |||
87 | /** Number of NotifRing chunks available (32). */ | ||
88 | #define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \ | ||
89 | MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH | ||
90 | |||
91 | /** Granularity of NotifRing allocation (8). */ | ||
92 | #define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \ | ||
93 | (HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS) | ||
94 | |||
95 | /** Number of NotifGroup chunks available (32). */ | ||
96 | #define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \ | ||
97 | HV_MPIPE_NUM_NOTIF_GROUPS | ||
98 | |||
99 | /** Granularity of NotifGroup allocation (1). */ | ||
100 | #define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \ | ||
101 | (HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS) | ||
102 | |||
103 | /** Number of lo bucket chunks available (16). */ | ||
104 | #define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \ | ||
105 | MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH | ||
106 | |||
107 | /** Granularity of lo bucket allocation (256). */ | ||
108 | #define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \ | ||
109 | (HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS) | ||
110 | |||
111 | /** Number of hi bucket chunks available (16). */ | ||
112 | #define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \ | ||
113 | MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH | ||
114 | |||
115 | /** Granularity of hi bucket allocation (4). */ | ||
116 | #define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \ | ||
117 | (HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS) | ||
118 | |||
119 | /** Number of eDMA ring chunks available (24). */ | ||
120 | #define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \ | ||
121 | MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH | ||
122 | |||
123 | /** Granularity of eDMA ring allocation (1). */ | ||
124 | #define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \ | ||
125 | (HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS) | ||
126 | |||
127 | |||
128 | |||
129 | |||
130 | /** Bit vector encoding which NotifRings are in a NotifGroup. */ | ||
131 | typedef struct | ||
132 | { | ||
133 | /** The actual bits. */ | ||
134 | uint64_t ring_mask[4]; | ||
135 | |||
136 | } gxio_mpipe_notif_group_bits_t; | ||
137 | |||
138 | |||
139 | /** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */ | ||
140 | typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t; | ||
141 | |||
142 | |||
143 | |||
144 | /** Eight buffer stack ids. */ | ||
145 | typedef struct | ||
146 | { | ||
147 | /** The stacks. */ | ||
148 | uint8_t stacks[8]; | ||
149 | |||
150 | } gxio_mpipe_rules_stacks_t; | ||
151 | |||
152 | |||
153 | /** A destination mac address. */ | ||
154 | typedef struct | ||
155 | { | ||
156 | /** The octets. */ | ||
157 | uint8_t octets[6]; | ||
158 | |||
159 | } gxio_mpipe_rules_dmac_t; | ||
160 | |||
161 | |||
162 | /** A vlan. */ | ||
163 | typedef uint16_t gxio_mpipe_rules_vlan_t; | ||
164 | |||
165 | |||
166 | |||
167 | /** Maximum number of characters in a link name. */ | ||
168 | #define GXIO_MPIPE_LINK_NAME_LEN 32 | ||
169 | |||
170 | |||
171 | /** Structure holding a link name. Only needed, and only typedef'ed, | ||
172 | * because the IORPC stub generator only handles types which are single | ||
173 | * words coming before the parameter name. */ | ||
174 | typedef struct | ||
175 | { | ||
176 | /** The name itself. */ | ||
177 | char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||
178 | } | ||
179 | _gxio_mpipe_link_name_t; | ||
180 | |||
181 | /** Maximum number of characters in a symbol name. */ | ||
182 | #define GXIO_MPIPE_SYMBOL_NAME_LEN 128 | ||
183 | |||
184 | |||
185 | /** Structure holding a symbol name. Only needed, and only typedef'ed, | ||
186 | * because the IORPC stub generator only handles types which are single | ||
187 | * words coming before the parameter name. */ | ||
188 | typedef struct | ||
189 | { | ||
190 | /** The name itself. */ | ||
191 | char name[GXIO_MPIPE_SYMBOL_NAME_LEN]; | ||
192 | } | ||
193 | _gxio_mpipe_symbol_name_t; | ||
194 | |||
195 | |||
196 | /** Structure holding a MAC address. */ | ||
197 | typedef struct | ||
198 | { | ||
199 | /** The address. */ | ||
200 | uint8_t mac[6]; | ||
201 | } | ||
202 | _gxio_mpipe_link_mac_t; | ||
203 | |||
204 | |||
205 | |||
206 | /** Request shared data permission -- that is, the ability to send and | ||
207 | * receive packets -- on the specified link. Other processes may also | ||
208 | * request shared data permission on the same link. | ||
209 | * | ||
210 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||
211 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||
212 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||
213 | */ | ||
214 | #define GXIO_MPIPE_LINK_DATA 0x00000001UL | ||
215 | |||
216 | /** Do not request data permission on the specified link. | ||
217 | * | ||
218 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||
219 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||
220 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||
221 | */ | ||
222 | #define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL | ||
223 | |||
224 | /** Request exclusive data permission -- that is, the ability to send and | ||
225 | * receive packets -- on the specified link. No other processes may | ||
226 | * request data permission on this link, and if any process already has | ||
227 | * data permission on it, this open will fail. | ||
228 | * | ||
229 | * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA, | ||
230 | * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open() | ||
231 | * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed. | ||
232 | */ | ||
233 | #define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL | ||
234 | |||
235 | /** Request shared stats permission -- that is, the ability to read and write | ||
236 | * registers which contain link statistics, and to get link attributes -- | ||
237 | * on the specified link. Other processes may also request shared stats | ||
238 | * permission on the same link. | ||
239 | * | ||
240 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||
241 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||
242 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||
243 | */ | ||
244 | #define GXIO_MPIPE_LINK_STATS 0x00000008UL | ||
245 | |||
246 | /** Do not request stats permission on the specified link. | ||
247 | * | ||
248 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||
249 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||
250 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||
251 | */ | ||
252 | #define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL | ||
253 | |||
254 | /** Request exclusive stats permission -- that is, the ability to read and | ||
255 | * write registers which contain link statistics, and to get link | ||
256 | * attributes -- on the specified link. No other processes may request | ||
257 | * stats permission on this link, and if any process already | ||
258 | * has stats permission on it, this open will fail. | ||
259 | * | ||
260 | * Requesting exclusive stats permission is normally a very bad idea, since | ||
261 | * it prevents programs like mpipe-stat from providing information on this | ||
262 | * link. Applications should only do this if they use MAC statistics | ||
263 | * registers, and cannot tolerate any of the clear-on-read registers being | ||
264 | * reset by other statistics programs. | ||
265 | * | ||
266 | * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS, | ||
267 | * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open() | ||
268 | * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed. | ||
269 | */ | ||
270 | #define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL | ||
271 | |||
272 | /** Request shared control permission -- that is, the ability to modify link | ||
273 | * attributes, and read and write MAC and MDIO registers -- on the | ||
274 | * specified link. Other processes may also request shared control | ||
275 | * permission on the same link. | ||
276 | * | ||
277 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||
278 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||
279 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||
280 | */ | ||
281 | #define GXIO_MPIPE_LINK_CTL 0x00000040UL | ||
282 | |||
283 | /** Do not request control permission on the specified link. | ||
284 | * | ||
285 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||
286 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||
287 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||
288 | */ | ||
289 | #define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL | ||
290 | |||
291 | /** Request exclusive control permission -- that is, the ability to modify | ||
292 | * link attributes, and read and write MAC and MDIO registers -- on the | ||
293 | * specified link. No other processes may request control permission on | ||
294 | * this link, and if any process already has control permission on it, | ||
295 | * this open will fail. | ||
296 | * | ||
297 | * Requesting exclusive control permission is not always a good idea, since | ||
298 | * it prevents programs like mpipe-link from configuring the link. | ||
299 | * | ||
300 | * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL, | ||
301 | * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open() | ||
302 | * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed. | ||
303 | */ | ||
304 | #define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL | ||
305 | |||
306 | /** Set the desired state of the link to up, allowing any speeds which are | ||
307 | * supported by the link hardware, as part of this open operation; do not | ||
308 | * change the desired state of the link when it is closed or the process | ||
309 | * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | ||
310 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | ||
311 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | ||
312 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||
313 | */ | ||
314 | #define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL | ||
315 | |||
316 | /** Set the desired state of the link to up, allowing any speeds which are | ||
317 | * supported by the link hardware, as part of this open operation; when the | ||
318 | * link is closed or this process exits, if no other process has the link | ||
319 | * open, set the desired state of the link to down. No more than one of | ||
320 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | ||
321 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | ||
322 | * specifed in a gxio_mpipe_link_open() call. If none are specified, | ||
323 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||
324 | */ | ||
325 | #define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL | ||
326 | |||
327 | /** Do not change the desired state of the link as part of the open | ||
328 | * operation; when the link is closed or this process exits, if no other | ||
329 | * process has the link open, set the desired state of the link to down. | ||
330 | * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP, | ||
331 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or | ||
332 | * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open() | ||
333 | * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||
334 | */ | ||
335 | #define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL | ||
336 | |||
337 | /** Do not change the desired state of the link as part of the open | ||
338 | * operation; do not change the desired state of the link when it is | ||
339 | * closed or the process exits. No more than one of | ||
340 | * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN, | ||
341 | * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be | ||
342 | * specifed in a gxio_mpipe_link_open() call. If none are specified, | ||
343 | * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed. | ||
344 | */ | ||
345 | #define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL | ||
346 | |||
347 | /** Request that this open call not complete until the network link is up. | ||
348 | * The process will wait as long as necessary for this to happen; | ||
349 | * applications which wish to abandon waiting for the link after a | ||
350 | * specific time period should not specify this flag when opening a link, | ||
351 | * but should instead call gxio_mpipe_link_wait() afterward. The link | ||
352 | * must be opened with stats permission. Note that this flag by itself | ||
353 | * does not change the desired link state; if other open flags or previous | ||
354 | * link state changes have not requested a desired state of up, the open | ||
355 | * call will never complete. This flag is not available to kernel | ||
356 | * clients. | ||
357 | */ | ||
358 | #define GXIO_MPIPE_LINK_WAIT 0x00002000UL | ||
359 | |||
360 | |||
361 | /* | ||
362 | * Note: link attributes must fit in 24 bits, since we use the top 8 bits | ||
363 | * of the IORPC offset word for the channel number. | ||
364 | */ | ||
365 | |||
366 | /** Determine whether jumbo frames may be received. If this attribute's | ||
367 | * value value is nonzero, the MAC will accept frames of up to 10240 bytes. | ||
368 | * If the value is zero, the MAC will only accept frames of up to 1544 | ||
369 | * bytes. The default value is zero. */ | ||
370 | #define GXIO_MPIPE_LINK_RECEIVE_JUMBO 0x010000 | ||
371 | |||
372 | /** Determine whether to send pause frames on this link if the mPIPE packet | ||
373 | * FIFO is nearly full. If the value is zero, pause frames are not sent. | ||
374 | * If the value is nonzero, it is the delay value which will be sent in any | ||
375 | * pause frames which are output, in units of 512 bit times. | ||
376 | * | ||
377 | * Bear in mind that in almost all circumstances, the mPIPE packet FIFO | ||
378 | * will never fill up, since mPIPE will empty it as fast as or faster than | ||
379 | * the incoming data rate, by either delivering or dropping packets. The | ||
380 | * only situation in which this is not true is if the memory and cache | ||
381 | * subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of | ||
382 | * packet data to memory in a timely fashion. In particular, pause frames | ||
383 | * will <em>not</em> be sent if packets cannot be delivered because | ||
384 | * NotifRings are full, buckets are full, or buffers are not available in | ||
385 | * a buffer stack. */ | ||
386 | #define GXIO_MPIPE_LINK_SEND_PAUSE 0x020000 | ||
387 | |||
388 | /** Determine whether to suspend output on the receipt of pause frames. | ||
389 | * If the value is nonzero, mPIPE shim will suspend output on the link's | ||
390 | * channel when a pause frame is received. If the value is zero, pause | ||
391 | * frames will be ignored. The default value is zero. */ | ||
392 | #define GXIO_MPIPE_LINK_RECEIVE_PAUSE 0x030000 | ||
393 | |||
394 | /** Interface MAC address. The value is a 6-byte MAC address, in the least | ||
395 | * significant 48 bits of the value; in other words, an address which would | ||
396 | * be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would | ||
397 | * be returned as 0x12345678ab. | ||
398 | * | ||
399 | * Depending upon the overall system design, a MAC address may or may not | ||
400 | * be available for each interface. Note that the interface's MAC address | ||
401 | * does not limit the packets received on its channel, although the | ||
402 | * classifier's rules could be configured to do that. Similarly, the MAC | ||
403 | * address is not used when transmitting packets, although applications | ||
404 | * could certainly decide to use the assigned address as a source MAC | ||
405 | * address when doing so. This attribute may only be retrieved with | ||
406 | * gxio_mpipe_link_get_attr(); it may not be modified. | ||
407 | */ | ||
408 | #define GXIO_MPIPE_LINK_MAC 0x040000 | ||
409 | |||
410 | /** Determine whether to discard egress packets on link down. If this value | ||
411 | * is nonzero, packets sent on this link while the link is down will be | ||
412 | * discarded. If this value is zero, no packets will be sent on this link | ||
413 | * while it is down. The default value is one. */ | ||
414 | #define GXIO_MPIPE_LINK_DISCARD_IF_DOWN 0x050000 | ||
415 | |||
416 | /** Possible link state. The value is a combination of link state flags, | ||
417 | * ORed together, that indicate link modes which are actually supported by | ||
418 | * the hardware. This attribute may only be retrieved with | ||
419 | * gxio_mpipe_link_get_attr(); it may not be modified. */ | ||
420 | #define GXIO_MPIPE_LINK_POSSIBLE_STATE 0x060000 | ||
421 | |||
422 | /** Current link state. The value is a combination of link state flags, | ||
423 | * ORed together, that indicate the current state of the hardware. If the | ||
424 | * link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero; | ||
425 | * if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will | ||
426 | * result in exactly one of the speed values, indicating the current speed. | ||
427 | * This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it | ||
428 | * may not be modified. */ | ||
429 | #define GXIO_MPIPE_LINK_CURRENT_STATE 0x070000 | ||
430 | |||
431 | /** Desired link state. The value is a conbination of flags, which specify | ||
432 | * the desired state for the link. With gxio_mpipe_link_set_attr(), this | ||
433 | * will, in the background, attempt to bring up the link using whichever of | ||
434 | * the requested flags are reasonable, or take down the link if the flags | ||
435 | * are zero. The actual link up or down operation may happen after this | ||
436 | * call completes. If the link state changes in the future, the system | ||
437 | * will continue to try to get back to the desired link state; for | ||
438 | * instance, if the link is brought up successfully, and then the network | ||
439 | * cable is disconnected, the link will go down. However, the desired | ||
440 | * state of the link is still up, so if the cable is reconnected, the link | ||
441 | * will be brought up again. | ||
442 | * | ||
443 | * With gxio_mpipe_link_set_attr(), this will indicate the desired state | ||
444 | * for the link, as set with a previous gxio_mpipe_link_set_attr() call, | ||
445 | * or implicitly by a gxio_mpipe_link_open() or link close operation. | ||
446 | * This may not reflect the current state of the link; to get that, use | ||
447 | * ::GXIO_MPIPE_LINK_CURRENT_STATE. | ||
448 | */ | ||
449 | #define GXIO_MPIPE_LINK_DESIRED_STATE 0x080000 | ||
450 | |||
451 | |||
452 | |||
453 | /** Link can run, should run, or is running at 10 Mbps. */ | ||
454 | #define GXIO_MPIPE_LINK_10M 0x0000000000000001UL | ||
455 | |||
456 | /** Link can run, should run, or is running at 100 Mbps. */ | ||
457 | #define GXIO_MPIPE_LINK_100M 0x0000000000000002UL | ||
458 | |||
459 | /** Link can run, should run, or is running at 1 Gbps. */ | ||
460 | #define GXIO_MPIPE_LINK_1G 0x0000000000000004UL | ||
461 | |||
462 | /** Link can run, should run, or is running at 10 Gbps. */ | ||
463 | #define GXIO_MPIPE_LINK_10G 0x0000000000000008UL | ||
464 | |||
465 | /** Link can run, should run, or is running at 20 Gbps. */ | ||
466 | #define GXIO_MPIPE_LINK_20G 0x0000000000000010UL | ||
467 | |||
468 | /** Link can run, should run, or is running at 25 Gbps. */ | ||
469 | #define GXIO_MPIPE_LINK_25G 0x0000000000000020UL | ||
470 | |||
471 | /** Link can run, should run, or is running at 50 Gbps. */ | ||
472 | #define GXIO_MPIPE_LINK_50G 0x0000000000000040UL | ||
473 | |||
474 | /** Link should run at the highest speed supported by the link and by | ||
475 | * the device connected to the link. Only usable as a value for | ||
476 | * the link's desired state; never returned as a value for the current | ||
477 | * or possible states. */ | ||
478 | #define GXIO_MPIPE_LINK_ANYSPEED 0x0000000000000800UL | ||
479 | |||
480 | /** All legal link speeds. This value is provided for use in extracting | ||
481 | * the speed-related subset of the link state flags; it is not intended | ||
482 | * to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE | ||
483 | * attributes. A link is up or is requested to be up if its current or | ||
484 | * desired state, respectively, ANDED with this value, is nonzero. */ | ||
485 | #define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL | ||
486 | |||
487 | /** Link can run, should run, or is running in MAC loopback mode. This | ||
488 | * loops transmitted packets back to the receiver, inside the Tile | ||
489 | * Processor. */ | ||
490 | #define GXIO_MPIPE_LINK_LOOP_MAC 0x0000000000001000UL | ||
491 | |||
492 | /** Link can run, should run, or is running in PHY loopback mode. This | ||
493 | * loops transmitted packets back to the receiver, inside the external | ||
494 | * PHY chip. */ | ||
495 | #define GXIO_MPIPE_LINK_LOOP_PHY 0x0000000000002000UL | ||
496 | |||
497 | /** Link can run, should run, or is running in external loopback mode. | ||
498 | * This requires that an external loopback plug be installed on the | ||
499 | * Ethernet port. Note that only some links require that this be | ||
500 | * configured via the gxio_mpipe_link routines; other links can do | ||
501 | * external loopack with the plug and no special configuration. */ | ||
502 | #define GXIO_MPIPE_LINK_LOOP_EXT 0x0000000000004000UL | ||
503 | |||
504 | /** All legal loopback types. */ | ||
505 | #define GXIO_MPIPE_LINK_LOOP_MASK 0x000000000000F000UL | ||
506 | |||
507 | /** Link can run, should run, or is running in full-duplex mode. | ||
508 | * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are | ||
509 | * specified in a set of desired state flags, both are assumed. */ | ||
510 | #define GXIO_MPIPE_LINK_FDX 0x0000000000010000UL | ||
511 | |||
512 | /** Link can run, should run, or is running in half-duplex mode. | ||
513 | * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are | ||
514 | * specified in a set of desired state flags, both are assumed. */ | ||
515 | #define GXIO_MPIPE_LINK_HDX 0x0000000000020000UL | ||
516 | |||
517 | |||
518 | /** An individual rule. */ | ||
519 | typedef struct | ||
520 | { | ||
521 | /** The total size. */ | ||
522 | uint16_t size; | ||
523 | |||
524 | /** The priority. */ | ||
525 | int16_t priority; | ||
526 | |||
527 | /** The "headroom" in each buffer. */ | ||
528 | uint8_t headroom; | ||
529 | |||
530 | /** The "tailroom" in each buffer. */ | ||
531 | uint8_t tailroom; | ||
532 | |||
533 | /** The "capacity" of the largest buffer. */ | ||
534 | uint16_t capacity; | ||
535 | |||
536 | /** The mask for converting a flow hash into a bucket. */ | ||
537 | uint16_t bucket_mask; | ||
538 | |||
539 | /** The offset for converting a flow hash into a bucket. */ | ||
540 | uint16_t bucket_first; | ||
541 | |||
542 | /** The buffer stack ids. */ | ||
543 | gxio_mpipe_rules_stacks_t stacks; | ||
544 | |||
545 | /** The actual channels. */ | ||
546 | uint32_t channel_bits; | ||
547 | |||
548 | /** The number of dmacs. */ | ||
549 | uint16_t num_dmacs; | ||
550 | |||
551 | /** The number of vlans. */ | ||
552 | uint16_t num_vlans; | ||
553 | |||
554 | /** The actual dmacs and vlans. */ | ||
555 | uint8_t dmacs_and_vlans[]; | ||
556 | |||
557 | } gxio_mpipe_rules_rule_t; | ||
558 | |||
559 | |||
560 | /** A list of classifier rules. */ | ||
561 | typedef struct | ||
562 | { | ||
563 | /** The offset to the end of the current rule. */ | ||
564 | uint16_t tail; | ||
565 | |||
566 | /** The offset to the start of the current rule. */ | ||
567 | uint16_t head; | ||
568 | |||
569 | /** The actual rules. */ | ||
570 | uint8_t rules[4096 - 4]; | ||
571 | |||
572 | } gxio_mpipe_rules_list_t; | ||
573 | |||
574 | |||
575 | |||
576 | |||
577 | /** mPIPE statistics structure. These counters include all relevant | ||
578 | * events occurring on all links within the mPIPE shim. */ | ||
579 | typedef struct | ||
580 | { | ||
581 | /** Number of ingress packets dropped for any reason. */ | ||
582 | uint64_t ingress_drops; | ||
583 | /** Number of ingress packets dropped because a buffer stack was empty. */ | ||
584 | uint64_t ingress_drops_no_buf; | ||
585 | /** Number of ingress packets dropped or truncated due to lack of space in | ||
586 | * the iPkt buffer. */ | ||
587 | uint64_t ingress_drops_ipkt; | ||
588 | /** Number of ingress packets dropped by the classifier or load balancer */ | ||
589 | uint64_t ingress_drops_cls_lb; | ||
590 | /** Total number of ingress packets. */ | ||
591 | uint64_t ingress_packets; | ||
592 | /** Total number of egress packets. */ | ||
593 | uint64_t egress_packets; | ||
594 | /** Total number of ingress bytes. */ | ||
595 | uint64_t ingress_bytes; | ||
596 | /** Total number of egress bytes. */ | ||
597 | uint64_t egress_bytes; | ||
598 | } | ||
599 | gxio_mpipe_stats_t; | ||
600 | |||
601 | |||
602 | #endif /* _SYS_HV_DRV_MPIPE_INTF_H */ | ||
diff --git a/arch/tile/include/hv/drv_trio_intf.h b/arch/tile/include/hv/drv_trio_intf.h new file mode 100644 index 000000000000..ef9f3f52ee27 --- /dev/null +++ b/arch/tile/include/hv/drv_trio_intf.h | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * Interface definitions for the trio driver. | ||
17 | */ | ||
18 | |||
19 | #ifndef _SYS_HV_DRV_TRIO_INTF_H | ||
20 | #define _SYS_HV_DRV_TRIO_INTF_H | ||
21 | |||
22 | #include <arch/trio.h> | ||
23 | |||
24 | /** The vendor ID for all Tilera processors. */ | ||
25 | #define TILERA_VENDOR_ID 0x1a41 | ||
26 | |||
27 | /** The device ID for the Gx36 processor. */ | ||
28 | #define TILERA_GX36_DEV_ID 0x0200 | ||
29 | |||
30 | /** Device ID for our internal bridge when running as RC. */ | ||
31 | #define TILERA_GX36_RC_DEV_ID 0x2000 | ||
32 | |||
33 | /** Maximum number of TRIO interfaces. */ | ||
34 | #define TILEGX_NUM_TRIO 2 | ||
35 | |||
36 | /** Gx36 has max 3 PCIe MACs per TRIO interface. */ | ||
37 | #define TILEGX_TRIO_PCIES 3 | ||
38 | |||
39 | /** Specify port properties for a PCIe MAC. */ | ||
40 | struct pcie_port_property | ||
41 | { | ||
42 | /** If true, the link can be configured in PCIe root complex mode. */ | ||
43 | uint8_t allow_rc: 1; | ||
44 | |||
45 | /** If true, the link can be configured in PCIe endpoint mode. */ | ||
46 | uint8_t allow_ep: 1; | ||
47 | |||
48 | /** If true, the link can be configured in StreamIO mode. */ | ||
49 | uint8_t allow_sio: 1; | ||
50 | |||
51 | /** If true, the link is allowed to support 1-lane operation. Software | ||
52 | * will not consider it an error if the link comes up as a x1 link. */ | ||
53 | uint8_t allow_x1: 1; | ||
54 | |||
55 | /** If true, the link is allowed to support 2-lane operation. Software | ||
56 | * will not consider it an error if the link comes up as a x2 link. */ | ||
57 | uint8_t allow_x2: 1; | ||
58 | |||
59 | /** If true, the link is allowed to support 4-lane operation. Software | ||
60 | * will not consider it an error if the link comes up as a x4 link. */ | ||
61 | uint8_t allow_x4: 1; | ||
62 | |||
63 | /** If true, the link is allowed to support 8-lane operation. Software | ||
64 | * will not consider it an error if the link comes up as a x8 link. */ | ||
65 | uint8_t allow_x8: 1; | ||
66 | |||
67 | /** Reserved. */ | ||
68 | uint8_t reserved: 1; | ||
69 | |||
70 | }; | ||
71 | |||
72 | /** Configurations can be issued to configure a char stream interrupt. */ | ||
73 | typedef enum pcie_stream_intr_config_sel_e | ||
74 | { | ||
75 | /** Interrupt configuration for memory map regions. */ | ||
76 | MEM_MAP_SEL, | ||
77 | |||
78 | /** Interrupt configuration for push DMAs. */ | ||
79 | PUSH_DMA_SEL, | ||
80 | |||
81 | /** Interrupt configuration for pull DMAs. */ | ||
82 | PULL_DMA_SEL, | ||
83 | } | ||
84 | pcie_stream_intr_config_sel_t; | ||
85 | |||
86 | |||
87 | /** The mmap file offset (PA) of the TRIO config region. */ | ||
88 | #define HV_TRIO_CONFIG_OFFSET \ | ||
89 | ((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_CFG << \ | ||
90 | TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | ||
91 | |||
92 | /** The maximum size of the TRIO config region. */ | ||
93 | #define HV_TRIO_CONFIG_SIZE \ | ||
94 | (1ULL << TRIO_CFG_REGION_ADDR__REGION_SHIFT) | ||
95 | |||
96 | /** Size of the config region mapped into client. We can't use | ||
97 | * TRIO_MMIO_ADDRESS_SPACE__OFFSET_WIDTH because it | ||
98 | * will require the kernel to allocate 4GB VA space | ||
99 | * from the VMALLOC region which has a total range | ||
100 | * of 4GB. | ||
101 | */ | ||
102 | #define HV_TRIO_CONFIG_IOREMAP_SIZE \ | ||
103 | ((uint64_t) 1 << TRIO_CFG_REGION_ADDR__PROT_SHIFT) | ||
104 | |||
105 | /** The mmap file offset (PA) of a scatter queue region. */ | ||
106 | #define HV_TRIO_SQ_OFFSET(queue) \ | ||
107 | (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_SQ << \ | ||
108 | TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \ | ||
109 | ((queue) << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT)) | ||
110 | |||
111 | /** The maximum size of a scatter queue region. */ | ||
112 | #define HV_TRIO_SQ_SIZE \ | ||
113 | (1ULL << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT) | ||
114 | |||
115 | |||
116 | /** The "hardware MMIO region" of the first PIO region. */ | ||
117 | #define HV_TRIO_FIRST_PIO_REGION 8 | ||
118 | |||
119 | /** The mmap file offset (PA) of a PIO region. */ | ||
120 | #define HV_TRIO_PIO_OFFSET(region) \ | ||
121 | (((unsigned long long)(region) + HV_TRIO_FIRST_PIO_REGION) \ | ||
122 | << TRIO_PIO_REGIONS_ADDR__REGION_SHIFT) | ||
123 | |||
124 | /** The maximum size of a PIO region. */ | ||
125 | #define HV_TRIO_PIO_SIZE (1ULL << TRIO_PIO_REGIONS_ADDR__ADDR_WIDTH) | ||
126 | |||
127 | |||
128 | /** The mmap file offset (PA) of a push DMA region. */ | ||
129 | #define HV_TRIO_PUSH_DMA_OFFSET(ring) \ | ||
130 | (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PUSH_DMA << \ | ||
131 | TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \ | ||
132 | ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)) | ||
133 | |||
134 | /** The mmap file offset (PA) of a pull DMA region. */ | ||
135 | #define HV_TRIO_PULL_DMA_OFFSET(ring) \ | ||
136 | (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PULL_DMA << \ | ||
137 | TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \ | ||
138 | ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT)) | ||
139 | |||
140 | /** The maximum size of a DMA region. */ | ||
141 | #define HV_TRIO_DMA_REGION_SIZE \ | ||
142 | (1ULL << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT) | ||
143 | |||
144 | |||
145 | /** The mmap file offset (PA) of a Mem-Map interrupt region. */ | ||
146 | #define HV_TRIO_MEM_MAP_INTR_OFFSET(map) \ | ||
147 | (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_MEM << \ | ||
148 | TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \ | ||
149 | ((map) << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT)) | ||
150 | |||
151 | /** The maximum size of a Mem-Map interrupt region. */ | ||
152 | #define HV_TRIO_MEM_MAP_INTR_SIZE \ | ||
153 | (1ULL << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT) | ||
154 | |||
155 | |||
156 | /** A flag bit indicating a fixed resource allocation. */ | ||
157 | #define HV_TRIO_ALLOC_FIXED 0x01 | ||
158 | |||
159 | /** TRIO requires that all mappings have 4kB aligned start addresses. */ | ||
160 | #define HV_TRIO_PAGE_SHIFT 12 | ||
161 | |||
162 | /** TRIO requires that all mappings have 4kB aligned start addresses. */ | ||
163 | #define HV_TRIO_PAGE_SIZE (1ull << HV_TRIO_PAGE_SHIFT) | ||
164 | |||
165 | |||
166 | /* Specify all PCIe port properties for a TRIO. */ | ||
167 | struct pcie_trio_ports_property | ||
168 | { | ||
169 | struct pcie_port_property ports[TILEGX_TRIO_PCIES]; | ||
170 | }; | ||
171 | |||
172 | /* Flags indicating traffic class. */ | ||
173 | #define HV_TRIO_FLAG_TC_SHIFT 4 | ||
174 | #define HV_TRIO_FLAG_TC_RMASK 0xf | ||
175 | #define HV_TRIO_FLAG_TC(N) \ | ||
176 | ((((N) & HV_TRIO_FLAG_TC_RMASK) + 1) << HV_TRIO_FLAG_TC_SHIFT) | ||
177 | |||
178 | /* Flags indicating virtual functions. */ | ||
179 | #define HV_TRIO_FLAG_VFUNC_SHIFT 8 | ||
180 | #define HV_TRIO_FLAG_VFUNC_RMASK 0xff | ||
181 | #define HV_TRIO_FLAG_VFUNC(N) \ | ||
182 | ((((N) & HV_TRIO_FLAG_VFUNC_RMASK) + 1) << HV_TRIO_FLAG_VFUNC_SHIFT) | ||
183 | |||
184 | |||
185 | /* Flag indicating an ordered PIO region. */ | ||
186 | #define HV_TRIO_PIO_FLAG_ORDERED (1 << 16) | ||
187 | |||
188 | /* Flags indicating special types of PIO regions. */ | ||
189 | #define HV_TRIO_PIO_FLAG_SPACE_SHIFT 17 | ||
190 | #define HV_TRIO_PIO_FLAG_SPACE_MASK (0x3 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||
191 | #define HV_TRIO_PIO_FLAG_CONFIG_SPACE (0x1 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||
192 | #define HV_TRIO_PIO_FLAG_IO_SPACE (0x2 << HV_TRIO_PIO_FLAG_SPACE_SHIFT) | ||
193 | |||
194 | |||
195 | #endif /* _SYS_HV_DRV_TRIO_INTF_H */ | ||
diff --git a/arch/tile/include/hv/drv_usb_host_intf.h b/arch/tile/include/hv/drv_usb_host_intf.h new file mode 100644 index 000000000000..24ce774a3f1d --- /dev/null +++ b/arch/tile/include/hv/drv_usb_host_intf.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * Interface definitions for the USB host driver. | ||
17 | */ | ||
18 | |||
19 | #ifndef _SYS_HV_DRV_USB_HOST_INTF_H | ||
20 | #define _SYS_HV_DRV_USB_HOST_INTF_H | ||
21 | |||
22 | #include <arch/usb_host.h> | ||
23 | |||
24 | |||
25 | /** Offset for the EHCI register MMIO region. */ | ||
26 | #define HV_USB_HOST_MMIO_OFFSET_EHCI ((uint64_t) USB_HOST_HCCAPBASE_REG) | ||
27 | |||
28 | /** Offset for the OHCI register MMIO region. */ | ||
29 | #define HV_USB_HOST_MMIO_OFFSET_OHCI ((uint64_t) USB_HOST_OHCD_HC_REVISION_REG) | ||
30 | |||
31 | /** Size of the register MMIO region. This turns out to be the same for | ||
32 | * both EHCI and OHCI. */ | ||
33 | #define HV_USB_HOST_MMIO_SIZE ((uint64_t) 0x1000) | ||
34 | |||
35 | /** The number of service domains supported by the USB host shim. */ | ||
36 | #define HV_USB_HOST_NUM_SVC_DOM 1 | ||
37 | |||
38 | |||
39 | #endif /* _SYS_HV_DRV_USB_HOST_INTF_H */ | ||
diff --git a/arch/tile/include/hv/iorpc.h b/arch/tile/include/hv/iorpc.h new file mode 100644 index 000000000000..89c72a5d9341 --- /dev/null +++ b/arch/tile/include/hv/iorpc.h | |||
@@ -0,0 +1,714 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | #ifndef _HV_IORPC_H_ | ||
15 | #define _HV_IORPC_H_ | ||
16 | |||
17 | /** | ||
18 | * | ||
19 | * Error codes and struct definitions for the IO RPC library. | ||
20 | * | ||
21 | * The hypervisor's IO RPC component provides a convenient way for | ||
22 | * driver authors to proxy system calls between user space, linux, and | ||
23 | * the hypervisor driver. The core of the system is a set of Python | ||
24 | * files that take ".idl" files as input and generates the following | ||
25 | * source code: | ||
26 | * | ||
27 | * - _rpc_call() routines for use in userspace IO libraries. These | ||
28 | * routines take an argument list specified in the .idl file, pack the | ||
29 | * arguments in to a buffer, and read or write that buffer via the | ||
30 | * Linux iorpc driver. | ||
31 | * | ||
32 | * - dispatch_read() and dispatch_write() routines that hypervisor | ||
33 | * drivers can use to implement most of their dev_pread() and | ||
34 | * dev_pwrite() methods. These routines decode the incoming parameter | ||
35 | * blob, permission check and translate parameters where appropriate, | ||
36 | * and then invoke a callback routine for whichever RPC call has | ||
37 | * arrived. The driver simply implements the set of callback | ||
38 | * routines. | ||
39 | * | ||
40 | * The IO RPC system also includes the Linux 'iorpc' driver, which | ||
41 | * proxies calls between the userspace library and the hypervisor | ||
42 | * driver. The Linux driver is almost entirely device agnostic; it | ||
43 | * watches for special flags indicating cases where a memory buffer | ||
44 | * address might need to be translated, etc. As a result, driver | ||
45 | * writers can avoid many of the problem cases related to registering | ||
46 | * hardware resources like memory pages or interrupts. However, the | ||
47 | * drivers must be careful to obey the conventions documented below in | ||
48 | * order to work properly with the generic Linux iorpc driver. | ||
49 | * | ||
50 | * @section iorpc_domains Service Domains | ||
51 | * | ||
52 | * All iorpc-based drivers must support a notion of service domains. | ||
53 | * A service domain is basically an application context - state | ||
54 | * indicating resources that are allocated to that particular app | ||
55 | * which it may access and (perhaps) other applications may not | ||
56 | * access. Drivers can support any number of service domains they | ||
57 | * choose. In some cases the design is limited by a number of service | ||
58 | * domains supported by the IO hardware; in other cases the service | ||
59 | * domains are a purely software concept and the driver chooses a | ||
60 | * maximum number of domains based on how much state memory it is | ||
61 | * willing to preallocate. | ||
62 | * | ||
63 | * For example, the mPIPE driver only supports as many service domains | ||
64 | * as are supported by the mPIPE hardware. This limitation is | ||
65 | * required because the hardware implements its own MMIO protection | ||
66 | * scheme to allow large MMIO mappings while still protecting small | ||
67 | * register ranges within the page that should only be accessed by the | ||
68 | * hypervisor. | ||
69 | * | ||
70 | * In contrast, drivers with no hardware service domain limitations | ||
71 | * (for instance the TRIO shim) can implement an arbitrary number of | ||
72 | * service domains. In these cases, each service domain is limited to | ||
73 | * a carefully restricted set of legal MMIO addresses if necessary to | ||
74 | * keep one application from corrupting another application's state. | ||
75 | * | ||
76 | * @section iorpc_conventions System Call Conventions | ||
77 | * | ||
78 | * The driver's open routine is responsible for allocating a new | ||
79 | * service domain for each hv_dev_open() call. By convention, the | ||
80 | * return value from open() should be the service domain number on | ||
81 | * success, or GXIO_ERR_NO_SVC_DOM if no more service domains are | ||
82 | * available. | ||
83 | * | ||
84 | * The implementations of hv_dev_pread() and hv_dev_pwrite() are | ||
85 | * responsible for validating the devhdl value passed up by the | ||
86 | * client. Since the device handle returned by hv_dev_open() should | ||
87 | * embed the positive service domain number, drivers should make sure | ||
88 | * that DRV_HDL2BITS(devhdl) is a legal service domain. If the client | ||
89 | * passes an illegal service domain number, the routine should return | ||
90 | * GXIO_ERR_INVAL_SVC_DOM. Once the service domain number has been | ||
91 | * validated, the driver can copy to/from the client buffer and call | ||
92 | * the dispatch_read() or dispatch_write() methods created by the RPC | ||
93 | * generator. | ||
94 | * | ||
95 | * The hv_dev_close() implementation should reset all service domain | ||
96 | * state and put the service domain back on a free list for | ||
97 | * reallocation by a future application. In most cases, this will | ||
98 | * require executing a hardware reset or drain flow and denying any | ||
99 | * MMIO regions that were created for the service domain. | ||
100 | * | ||
101 | * @section iorpc_data Special Data Types | ||
102 | * | ||
103 | * The .idl file syntax allows the creation of syscalls with special | ||
104 | * parameters that require permission checks or translations as part | ||
105 | * of the system call path. Because of limitations in the code | ||
106 | * generator, APIs are generally limited to just one of these special | ||
107 | * parameters per system call, and they are sometimes required to be | ||
108 | * the first or last parameter to the call. Special parameters | ||
109 | * include: | ||
110 | * | ||
111 | * @subsection iorpc_mem_buffer MEM_BUFFER | ||
112 | * | ||
113 | * The MEM_BUFFER() datatype allows user space to "register" memory | ||
114 | * buffers with a device. Registering memory accomplishes two tasks: | ||
115 | * Linux keeps track of all buffers that might be modified by a | ||
116 | * hardware device, and the hardware device drivers bind registered | ||
117 | * buffers to particular hardware resources like ingress NotifRings. | ||
118 | * The MEM_BUFFER() idl syntax can take extra flags like ALIGN_64KB, | ||
119 | * ALIGN_SELF_SIZE, and FLAGS indicating that memory buffers must have | ||
120 | * certain alignment or that the user should be able to pass a "memory | ||
121 | * flags" word specifying attributes like nt_hint or IO cache pinning. | ||
122 | * The parser will accept multiple MEM_BUFFER() flags. | ||
123 | * | ||
124 | * Implementations must obey the following conventions when | ||
125 | * registering memory buffers via the iorpc flow. These rules are a | ||
126 | * result of the Linux driver implementation, which needs to keep | ||
127 | * track of how many times a particular page has been registered with | ||
128 | * the hardware so that it can release the page when all those | ||
129 | * registrations are cleared. | ||
130 | * | ||
131 | * - Memory registrations that refer to a resource which has already | ||
132 | * been bound must return GXIO_ERR_ALREADY_INIT. Thus, it is an | ||
133 | * error to register memory twice without resetting (i.e. closing) the | ||
134 | * resource in between. This convention keeps the Linux driver from | ||
135 | * having to track which particular devices a page is bound to. | ||
136 | * | ||
137 | * - At present, a memory registration is only cleared when the | ||
138 | * service domain is reset. In this case, the Linux driver simply | ||
139 | * closes the HV device file handle and then decrements the reference | ||
140 | * counts of all pages that were previously registered with the | ||
141 | * device. | ||
142 | * | ||
143 | * - In the future, we may add a mechanism for unregistering memory. | ||
144 | * One possible implementation would require that the user specify | ||
145 | * which buffer is currently registered. The HV would then verify | ||
146 | * that that page was actually the one currently mapped and return | ||
147 | * success or failure to Linux, which would then only decrement the | ||
148 | * page reference count if the addresses were mapped. Another scheme | ||
149 | * might allow Linux to pass a token to the HV to be returned when the | ||
150 | * resource is unmapped. | ||
151 | * | ||
152 | * @subsection iorpc_interrupt INTERRUPT | ||
153 | * | ||
154 | * The INTERRUPT .idl datatype allows the client to bind hardware | ||
155 | * interrupts to a particular combination of IPI parameters - CPU, IPI | ||
156 | * PL, and event bit number. This data is passed via a special | ||
157 | * datatype so that the Linux driver can validate the CPU and PL and | ||
158 | * the HV generic iorpc code can translate client CPUs to real CPUs. | ||
159 | * | ||
160 | * @subsection iorpc_pollfd_setup POLLFD_SETUP | ||
161 | * | ||
162 | * The POLLFD_SETUP .idl datatype allows the client to set up hardware | ||
163 | * interrupt bindings which are received by Linux but which are made | ||
164 | * visible to user processes as state transitions on a file descriptor; | ||
165 | * this allows user processes to use Linux primitives, such as poll(), to | ||
166 | * await particular hardware events. This data is passed via a special | ||
167 | * datatype so that the Linux driver may recognize the pollable file | ||
168 | * descriptor and translate it to a set of interrupt target information, | ||
169 | * and so that the HV generic iorpc code can translate client CPUs to real | ||
170 | * CPUs. | ||
171 | * | ||
172 | * @subsection iorpc_pollfd POLLFD | ||
173 | * | ||
174 | * The POLLFD .idl datatype allows manipulation of hardware interrupt | ||
175 | * bindings set up via the POLLFD_SETUP datatype; common operations are | ||
176 | * resetting the state of the requested interrupt events, and unbinding any | ||
177 | * bound interrupts. This data is passed via a special datatype so that | ||
178 | * the Linux driver may recognize the pollable file descriptor and | ||
179 | * translate it to an interrupt identifier previously supplied by the | ||
180 | * hypervisor as the result of an earlier pollfd_setup operation. | ||
181 | * | ||
182 | * @subsection iorpc_blob BLOB | ||
183 | * | ||
184 | * The BLOB .idl datatype allows the client to write an arbitrary | ||
185 | * length string of bytes up to the hypervisor driver. This can be | ||
186 | * useful for passing up large, arbitrarily structured data like | ||
187 | * classifier programs. The iorpc stack takes care of validating the | ||
188 | * buffer VA and CPA as the data passes up to the hypervisor. Unlike | ||
189 | * MEM_BUFFER(), the buffer is not registered - Linux does not bump | ||
190 | * page refcounts and the HV driver should not reuse the buffer once | ||
191 | * the system call is complete. | ||
192 | * | ||
193 | * @section iorpc_translation Translating User Space Calls | ||
194 | * | ||
195 | * The ::iorpc_offset structure describes the formatting of the offset | ||
196 | * that is passed to pread() or pwrite() as part of the generated RPC code. | ||
197 | * When the user calls up to Linux, the rpc code fills in all the fields of | ||
198 | * the offset, including a 16-bit opcode, a 16 bit format indicator, and 32 | ||
199 | * bits of user-specified "sub-offset". The opcode indicates which syscall | ||
200 | * is being requested. The format indicates whether there is a "prefix | ||
201 | * struct" at the start of the memory buffer passed to pwrite(), and if so | ||
202 | * what data is in that prefix struct. These prefix structs are used to | ||
203 | * implement special datatypes like MEM_BUFFER() and INTERRUPT - we arrange | ||
204 | * to put data that needs translation and permission checks at the start of | ||
205 | * the buffer so that the Linux driver and generic portions of the HV iorpc | ||
206 | * code can easily access the data. The 32 bits of user-specified | ||
207 | * "sub-offset" are most useful for pread() calls where the user needs to | ||
208 | * also pass in a few bits indicating which register to read, etc. | ||
209 | * | ||
210 | * The Linux iorpc driver watches for system calls that contain prefix | ||
211 | * structs so that it can translate parameters and bump reference | ||
212 | * counts as appropriate. It does not (currently) have any knowledge | ||
213 | * of the per-device opcodes - it doesn't care what operation you're | ||
214 | * doing to mPIPE, so long as it can do all the generic book-keeping. | ||
215 | * The hv/iorpc.h header file defines all of the generic encoding bits | ||
216 | * needed to translate iorpc calls without knowing which particular | ||
217 | * opcode is being issued. | ||
218 | * | ||
219 | * @section iorpc_globals Global iorpc Calls | ||
220 | * | ||
221 | * Implementing mmap() required adding some special iorpc syscalls | ||
222 | * that are only called by the Linux driver, never by userspace. | ||
223 | * These include get_mmio_base() and check_mmio_offset(). These | ||
224 | * routines are described in globals.idl and must be included in every | ||
225 | * iorpc driver. By providing these routines in every driver, Linux's | ||
226 | * mmap implementation can easily get the PTE bits it needs and | ||
227 | * validate the PA offset without needing to know the per-device | ||
228 | * opcodes to perform those tasks. | ||
229 | * | ||
230 | * @section iorpc_kernel Supporting gxio APIs in the Kernel | ||
231 | * | ||
232 | * The iorpc code generator also supports generation of kernel code | ||
233 | * implementing the gxio APIs. This capability is currently used by | ||
234 | * the mPIPE network driver, and will likely be used by the TRIO root | ||
235 | * complex and endpoint drivers and perhaps an in-kernel crypto | ||
236 | * driver. Each driver that wants to instantiate iorpc calls in the | ||
237 | * kernel needs to generate a kernel version of the generate rpc code | ||
238 | * and (probably) copy any related gxio source files into the kernel. | ||
239 | * The mPIPE driver provides a good example of this pattern. | ||
240 | */ | ||
241 | |||
242 | #ifdef __KERNEL__ | ||
243 | #include <linux/stddef.h> | ||
244 | #else | ||
245 | #include <stddef.h> | ||
246 | #endif | ||
247 | |||
248 | #if defined(__HV__) | ||
249 | #include <hv/hypervisor.h> | ||
250 | #elif defined(__KERNEL__) | ||
251 | #include "hypervisor.h" | ||
252 | #include <linux/types.h> | ||
253 | #else | ||
254 | #include <stdint.h> | ||
255 | #endif | ||
256 | |||
257 | |||
258 | /** Code indicating translation services required within the RPC path. | ||
259 | * These indicate whether there is a translatable struct at the start | ||
260 | * of the RPC buffer and what information that struct contains. | ||
261 | */ | ||
262 | enum iorpc_format_e | ||
263 | { | ||
264 | /** No translation required, no prefix struct. */ | ||
265 | IORPC_FORMAT_NONE, | ||
266 | |||
267 | /** No translation required, no prefix struct, no access to this | ||
268 | * operation from user space. */ | ||
269 | IORPC_FORMAT_NONE_NOUSER, | ||
270 | |||
271 | /** Prefix struct contains user VA and size. */ | ||
272 | IORPC_FORMAT_USER_MEM, | ||
273 | |||
274 | /** Prefix struct contains CPA, size, and homing bits. */ | ||
275 | IORPC_FORMAT_KERNEL_MEM, | ||
276 | |||
277 | /** Prefix struct contains interrupt. */ | ||
278 | IORPC_FORMAT_KERNEL_INTERRUPT, | ||
279 | |||
280 | /** Prefix struct contains user-level interrupt. */ | ||
281 | IORPC_FORMAT_USER_INTERRUPT, | ||
282 | |||
283 | /** Prefix struct contains pollfd_setup (interrupt information). */ | ||
284 | IORPC_FORMAT_KERNEL_POLLFD_SETUP, | ||
285 | |||
286 | /** Prefix struct contains user-level pollfd_setup (file descriptor). */ | ||
287 | IORPC_FORMAT_USER_POLLFD_SETUP, | ||
288 | |||
289 | /** Prefix struct contains pollfd (interrupt cookie). */ | ||
290 | IORPC_FORMAT_KERNEL_POLLFD, | ||
291 | |||
292 | /** Prefix struct contains user-level pollfd (file descriptor). */ | ||
293 | IORPC_FORMAT_USER_POLLFD, | ||
294 | }; | ||
295 | |||
296 | |||
297 | /** Generate an opcode given format and code. */ | ||
298 | #define IORPC_OPCODE(FORMAT, CODE) (((FORMAT) << 16) | (CODE)) | ||
299 | |||
300 | /** The offset passed through the read() and write() system calls | ||
301 | combines an opcode with 32 bits of user-specified offset. */ | ||
302 | union iorpc_offset | ||
303 | { | ||
304 | #ifndef __BIG_ENDIAN__ | ||
305 | uint64_t offset; /**< All bits. */ | ||
306 | |||
307 | struct | ||
308 | { | ||
309 | uint16_t code; /**< RPC code. */ | ||
310 | uint16_t format; /**< iorpc_format_e */ | ||
311 | uint32_t sub_offset; /**< caller-specified offset. */ | ||
312 | }; | ||
313 | |||
314 | uint32_t opcode; /**< Opcode combines code & format. */ | ||
315 | #else | ||
316 | uint64_t offset; /**< All bits. */ | ||
317 | |||
318 | struct | ||
319 | { | ||
320 | uint32_t sub_offset; /**< caller-specified offset. */ | ||
321 | uint16_t format; /**< iorpc_format_e */ | ||
322 | uint16_t code; /**< RPC code. */ | ||
323 | }; | ||
324 | |||
325 | struct | ||
326 | { | ||
327 | uint32_t padding; | ||
328 | uint32_t opcode; /**< Opcode combines code & format. */ | ||
329 | }; | ||
330 | #endif | ||
331 | }; | ||
332 | |||
333 | |||
334 | /** Homing and cache hinting bits that can be used by IO devices. */ | ||
335 | struct iorpc_mem_attr | ||
336 | { | ||
337 | unsigned int lotar_x:4; /**< lotar X bits (or Gx page_mask). */ | ||
338 | unsigned int lotar_y:4; /**< lotar Y bits (or Gx page_offset). */ | ||
339 | unsigned int hfh:1; /**< Uses hash-for-home. */ | ||
340 | unsigned int nt_hint:1; /**< Non-temporal hint. */ | ||
341 | unsigned int io_pin:1; /**< Only fill 'IO' cache ways. */ | ||
342 | }; | ||
343 | |||
344 | /** Set the nt_hint bit. */ | ||
345 | #define IORPC_MEM_BUFFER_FLAG_NT_HINT (1 << 0) | ||
346 | |||
347 | /** Set the IO pin bit. */ | ||
348 | #define IORPC_MEM_BUFFER_FLAG_IO_PIN (1 << 1) | ||
349 | |||
350 | |||
351 | /** A structure used to describe memory registration. Different | ||
352 | protection levels describe memory differently, so this union | ||
353 | contains all the different possible descriptions. As a request | ||
354 | moves up the call chain, each layer translates from one | ||
355 | description format to the next. In particular, the Linux iorpc | ||
356 | driver translates user VAs into CPAs and homing parameters. */ | ||
357 | union iorpc_mem_buffer | ||
358 | { | ||
359 | struct | ||
360 | { | ||
361 | uint64_t va; /**< User virtual address. */ | ||
362 | uint64_t size; /**< Buffer size. */ | ||
363 | unsigned int flags; /**< nt_hint, IO pin. */ | ||
364 | } | ||
365 | user; /**< Buffer as described by user apps. */ | ||
366 | |||
367 | struct | ||
368 | { | ||
369 | unsigned long long cpa; /**< Client physical address. */ | ||
370 | #if defined(__KERNEL__) || defined(__HV__) | ||
371 | size_t size; /**< Buffer size. */ | ||
372 | HV_PTE pte; /**< PTE describing memory homing. */ | ||
373 | #else | ||
374 | uint64_t size; | ||
375 | uint64_t pte; | ||
376 | #endif | ||
377 | unsigned int flags; /**< nt_hint, IO pin. */ | ||
378 | } | ||
379 | kernel; /**< Buffer as described by kernel. */ | ||
380 | |||
381 | struct | ||
382 | { | ||
383 | unsigned long long pa; /**< Physical address. */ | ||
384 | size_t size; /**< Buffer size. */ | ||
385 | struct iorpc_mem_attr attr; /**< Homing and locality hint bits. */ | ||
386 | } | ||
387 | hv; /**< Buffer parameters for HV driver. */ | ||
388 | }; | ||
389 | |||
390 | |||
391 | /** A structure used to describe interrupts. The format differs slightly | ||
392 | * for user and kernel interrupts. As with the mem_buffer_t, translation | ||
393 | * between the formats is done at each level. */ | ||
394 | union iorpc_interrupt | ||
395 | { | ||
396 | struct | ||
397 | { | ||
398 | int cpu; /**< CPU. */ | ||
399 | int event; /**< evt_num */ | ||
400 | } | ||
401 | user; /**< Interrupt as described by user applications. */ | ||
402 | |||
403 | struct | ||
404 | { | ||
405 | int x; /**< X coord. */ | ||
406 | int y; /**< Y coord. */ | ||
407 | int ipi; /**< int_num */ | ||
408 | int event; /**< evt_num */ | ||
409 | } | ||
410 | kernel; /**< Interrupt as described by the kernel. */ | ||
411 | |||
412 | }; | ||
413 | |||
414 | |||
415 | /** A structure used to describe interrupts used with poll(). The format | ||
416 | * differs significantly for requests from user to kernel, and kernel to | ||
417 | * hypervisor. As with the mem_buffer_t, translation between the formats | ||
418 | * is done at each level. */ | ||
419 | union iorpc_pollfd_setup | ||
420 | { | ||
421 | struct | ||
422 | { | ||
423 | int fd; /**< Pollable file descriptor. */ | ||
424 | } | ||
425 | user; /**< pollfd_setup as described by user applications. */ | ||
426 | |||
427 | struct | ||
428 | { | ||
429 | int x; /**< X coord. */ | ||
430 | int y; /**< Y coord. */ | ||
431 | int ipi; /**< int_num */ | ||
432 | int event; /**< evt_num */ | ||
433 | } | ||
434 | kernel; /**< pollfd_setup as described by the kernel. */ | ||
435 | |||
436 | }; | ||
437 | |||
438 | |||
439 | /** A structure used to describe previously set up interrupts used with | ||
440 | * poll(). The format differs significantly for requests from user to | ||
441 | * kernel, and kernel to hypervisor. As with the mem_buffer_t, translation | ||
442 | * between the formats is done at each level. */ | ||
443 | union iorpc_pollfd | ||
444 | { | ||
445 | struct | ||
446 | { | ||
447 | int fd; /**< Pollable file descriptor. */ | ||
448 | } | ||
449 | user; /**< pollfd as described by user applications. */ | ||
450 | |||
451 | struct | ||
452 | { | ||
453 | int cookie; /**< hv cookie returned by the pollfd_setup operation. */ | ||
454 | } | ||
455 | kernel; /**< pollfd as described by the kernel. */ | ||
456 | |||
457 | }; | ||
458 | |||
459 | |||
460 | /** The various iorpc devices use error codes from -1100 to -1299. | ||
461 | * | ||
462 | * This range is distinct from netio (-700 to -799), the hypervisor | ||
463 | * (-800 to -899), tilepci (-900 to -999), ilib (-1000 to -1099), | ||
464 | * gxcr (-1300 to -1399) and gxpci (-1400 to -1499). | ||
465 | */ | ||
466 | enum gxio_err_e { | ||
467 | |||
468 | /** Largest iorpc error number. */ | ||
469 | GXIO_ERR_MAX = -1101, | ||
470 | |||
471 | |||
472 | /********************************************************/ | ||
473 | /* Generic Error Codes */ | ||
474 | /********************************************************/ | ||
475 | |||
476 | /** Bad RPC opcode - possible version incompatibility. */ | ||
477 | GXIO_ERR_OPCODE = -1101, | ||
478 | |||
479 | /** Invalid parameter. */ | ||
480 | GXIO_ERR_INVAL = -1102, | ||
481 | |||
482 | /** Memory buffer did not meet alignment requirements. */ | ||
483 | GXIO_ERR_ALIGNMENT = -1103, | ||
484 | |||
485 | /** Memory buffers must be coherent and cacheable. */ | ||
486 | GXIO_ERR_COHERENCE = -1104, | ||
487 | |||
488 | /** Resource already initialized. */ | ||
489 | GXIO_ERR_ALREADY_INIT = -1105, | ||
490 | |||
491 | /** No service domains available. */ | ||
492 | GXIO_ERR_NO_SVC_DOM = -1106, | ||
493 | |||
494 | /** Illegal service domain number. */ | ||
495 | GXIO_ERR_INVAL_SVC_DOM = -1107, | ||
496 | |||
497 | /** Illegal MMIO address. */ | ||
498 | GXIO_ERR_MMIO_ADDRESS = -1108, | ||
499 | |||
500 | /** Illegal interrupt binding. */ | ||
501 | GXIO_ERR_INTERRUPT = -1109, | ||
502 | |||
503 | /** Unreasonable client memory. */ | ||
504 | GXIO_ERR_CLIENT_MEMORY = -1110, | ||
505 | |||
506 | /** No more IOTLB entries. */ | ||
507 | GXIO_ERR_IOTLB_ENTRY = -1111, | ||
508 | |||
509 | /** Invalid memory size. */ | ||
510 | GXIO_ERR_INVAL_MEMORY_SIZE = -1112, | ||
511 | |||
512 | /** Unsupported operation. */ | ||
513 | GXIO_ERR_UNSUPPORTED_OP = -1113, | ||
514 | |||
515 | /** Insufficient DMA credits. */ | ||
516 | GXIO_ERR_DMA_CREDITS = -1114, | ||
517 | |||
518 | /** Operation timed out. */ | ||
519 | GXIO_ERR_TIMEOUT = -1115, | ||
520 | |||
521 | /** No such device or object. */ | ||
522 | GXIO_ERR_NO_DEVICE = -1116, | ||
523 | |||
524 | /** Device or resource busy. */ | ||
525 | GXIO_ERR_BUSY = -1117, | ||
526 | |||
527 | /** I/O error. */ | ||
528 | GXIO_ERR_IO = -1118, | ||
529 | |||
530 | /** Permissions error. */ | ||
531 | GXIO_ERR_PERM = -1119, | ||
532 | |||
533 | |||
534 | |||
535 | /********************************************************/ | ||
536 | /* Test Device Error Codes */ | ||
537 | /********************************************************/ | ||
538 | |||
539 | /** Illegal register number. */ | ||
540 | GXIO_TEST_ERR_REG_NUMBER = -1120, | ||
541 | |||
542 | /** Illegal buffer slot. */ | ||
543 | GXIO_TEST_ERR_BUFFER_SLOT = -1121, | ||
544 | |||
545 | |||
546 | /********************************************************/ | ||
547 | /* MPIPE Error Codes */ | ||
548 | /********************************************************/ | ||
549 | |||
550 | |||
551 | /** Invalid buffer size. */ | ||
552 | GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE = -1131, | ||
553 | |||
554 | /** Cannot allocate buffer stack. */ | ||
555 | GXIO_MPIPE_ERR_NO_BUFFER_STACK = -1140, | ||
556 | |||
557 | /** Invalid buffer stack number. */ | ||
558 | GXIO_MPIPE_ERR_BAD_BUFFER_STACK = -1141, | ||
559 | |||
560 | /** Cannot allocate NotifRing. */ | ||
561 | GXIO_MPIPE_ERR_NO_NOTIF_RING = -1142, | ||
562 | |||
563 | /** Invalid NotifRing number. */ | ||
564 | GXIO_MPIPE_ERR_BAD_NOTIF_RING = -1143, | ||
565 | |||
566 | /** Cannot allocate NotifGroup. */ | ||
567 | GXIO_MPIPE_ERR_NO_NOTIF_GROUP = -1144, | ||
568 | |||
569 | /** Invalid NotifGroup number. */ | ||
570 | GXIO_MPIPE_ERR_BAD_NOTIF_GROUP = -1145, | ||
571 | |||
572 | /** Cannot allocate bucket. */ | ||
573 | GXIO_MPIPE_ERR_NO_BUCKET = -1146, | ||
574 | |||
575 | /** Invalid bucket number. */ | ||
576 | GXIO_MPIPE_ERR_BAD_BUCKET = -1147, | ||
577 | |||
578 | /** Cannot allocate eDMA ring. */ | ||
579 | GXIO_MPIPE_ERR_NO_EDMA_RING = -1148, | ||
580 | |||
581 | /** Invalid eDMA ring number. */ | ||
582 | GXIO_MPIPE_ERR_BAD_EDMA_RING = -1149, | ||
583 | |||
584 | /** Invalid channel number. */ | ||
585 | GXIO_MPIPE_ERR_BAD_CHANNEL = -1150, | ||
586 | |||
587 | /** Bad configuration. */ | ||
588 | GXIO_MPIPE_ERR_BAD_CONFIG = -1151, | ||
589 | |||
590 | /** Empty iqueue. */ | ||
591 | GXIO_MPIPE_ERR_IQUEUE_EMPTY = -1152, | ||
592 | |||
593 | /** Empty rules. */ | ||
594 | GXIO_MPIPE_ERR_RULES_EMPTY = -1160, | ||
595 | |||
596 | /** Full rules. */ | ||
597 | GXIO_MPIPE_ERR_RULES_FULL = -1161, | ||
598 | |||
599 | /** Corrupt rules. */ | ||
600 | GXIO_MPIPE_ERR_RULES_CORRUPT = -1162, | ||
601 | |||
602 | /** Invalid rules. */ | ||
603 | GXIO_MPIPE_ERR_RULES_INVALID = -1163, | ||
604 | |||
605 | /** Classifier is too big. */ | ||
606 | GXIO_MPIPE_ERR_CLASSIFIER_TOO_BIG = -1170, | ||
607 | |||
608 | /** Classifier is too complex. */ | ||
609 | GXIO_MPIPE_ERR_CLASSIFIER_TOO_COMPLEX = -1171, | ||
610 | |||
611 | /** Classifier has bad header. */ | ||
612 | GXIO_MPIPE_ERR_CLASSIFIER_BAD_HEADER = -1172, | ||
613 | |||
614 | /** Classifier has bad contents. */ | ||
615 | GXIO_MPIPE_ERR_CLASSIFIER_BAD_CONTENTS = -1173, | ||
616 | |||
617 | /** Classifier encountered invalid symbol. */ | ||
618 | GXIO_MPIPE_ERR_CLASSIFIER_INVAL_SYMBOL = -1174, | ||
619 | |||
620 | /** Classifier encountered invalid bounds. */ | ||
621 | GXIO_MPIPE_ERR_CLASSIFIER_INVAL_BOUNDS = -1175, | ||
622 | |||
623 | /** Classifier encountered invalid relocation. */ | ||
624 | GXIO_MPIPE_ERR_CLASSIFIER_INVAL_RELOCATION = -1176, | ||
625 | |||
626 | /** Classifier encountered undefined symbol. */ | ||
627 | GXIO_MPIPE_ERR_CLASSIFIER_UNDEF_SYMBOL = -1177, | ||
628 | |||
629 | |||
630 | /********************************************************/ | ||
631 | /* TRIO Error Codes */ | ||
632 | /********************************************************/ | ||
633 | |||
634 | /** Cannot allocate memory map region. */ | ||
635 | GXIO_TRIO_ERR_NO_MEMORY_MAP = -1180, | ||
636 | |||
637 | /** Invalid memory map region number. */ | ||
638 | GXIO_TRIO_ERR_BAD_MEMORY_MAP = -1181, | ||
639 | |||
640 | /** Cannot allocate scatter queue. */ | ||
641 | GXIO_TRIO_ERR_NO_SCATTER_QUEUE = -1182, | ||
642 | |||
643 | /** Invalid scatter queue number. */ | ||
644 | GXIO_TRIO_ERR_BAD_SCATTER_QUEUE = -1183, | ||
645 | |||
646 | /** Cannot allocate push DMA ring. */ | ||
647 | GXIO_TRIO_ERR_NO_PUSH_DMA_RING = -1184, | ||
648 | |||
649 | /** Invalid push DMA ring index. */ | ||
650 | GXIO_TRIO_ERR_BAD_PUSH_DMA_RING = -1185, | ||
651 | |||
652 | /** Cannot allocate pull DMA ring. */ | ||
653 | GXIO_TRIO_ERR_NO_PULL_DMA_RING = -1186, | ||
654 | |||
655 | /** Invalid pull DMA ring index. */ | ||
656 | GXIO_TRIO_ERR_BAD_PULL_DMA_RING = -1187, | ||
657 | |||
658 | /** Cannot allocate PIO region. */ | ||
659 | GXIO_TRIO_ERR_NO_PIO = -1188, | ||
660 | |||
661 | /** Invalid PIO region index. */ | ||
662 | GXIO_TRIO_ERR_BAD_PIO = -1189, | ||
663 | |||
664 | /** Cannot allocate ASID. */ | ||
665 | GXIO_TRIO_ERR_NO_ASID = -1190, | ||
666 | |||
667 | /** Invalid ASID. */ | ||
668 | GXIO_TRIO_ERR_BAD_ASID = -1191, | ||
669 | |||
670 | |||
671 | /********************************************************/ | ||
672 | /* MICA Error Codes */ | ||
673 | /********************************************************/ | ||
674 | |||
675 | /** No such accelerator type. */ | ||
676 | GXIO_MICA_ERR_BAD_ACCEL_TYPE = -1220, | ||
677 | |||
678 | /** Cannot allocate context. */ | ||
679 | GXIO_MICA_ERR_NO_CONTEXT = -1221, | ||
680 | |||
681 | /** PKA command queue is full, can't add another command. */ | ||
682 | GXIO_MICA_ERR_PKA_CMD_QUEUE_FULL = -1222, | ||
683 | |||
684 | /** PKA result queue is empty, can't get a result from the queue. */ | ||
685 | GXIO_MICA_ERR_PKA_RESULT_QUEUE_EMPTY = -1223, | ||
686 | |||
687 | /********************************************************/ | ||
688 | /* GPIO Error Codes */ | ||
689 | /********************************************************/ | ||
690 | |||
691 | /** Pin not available. Either the physical pin does not exist, or | ||
692 | * it is reserved by the hypervisor for system usage. */ | ||
693 | GXIO_GPIO_ERR_PIN_UNAVAILABLE = -1240, | ||
694 | |||
695 | /** Pin busy. The pin exists, and is available for use via GXIO, but | ||
696 | * it has been attached by some other process or driver. */ | ||
697 | GXIO_GPIO_ERR_PIN_BUSY = -1241, | ||
698 | |||
699 | /** Cannot access unattached pin. One or more of the pins being | ||
700 | * manipulated by this call are not attached to the requesting | ||
701 | * context. */ | ||
702 | GXIO_GPIO_ERR_PIN_UNATTACHED = -1242, | ||
703 | |||
704 | /** Invalid I/O mode for pin. The wiring of the pin in the system | ||
705 | * is such that the I/O mode or electrical control parameters | ||
706 | * requested could cause damage. */ | ||
707 | GXIO_GPIO_ERR_PIN_INVALID_MODE = -1243, | ||
708 | |||
709 | /** Smallest iorpc error number. */ | ||
710 | GXIO_ERR_MIN = -1299 | ||
711 | }; | ||
712 | |||
713 | |||
714 | #endif /* !_HV_IORPC_H_ */ | ||
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 5de99248d8df..5334be8e2538 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile | |||
@@ -14,4 +14,9 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | |||
14 | obj-$(CONFIG_MODULES) += module.o | 14 | obj-$(CONFIG_MODULES) += module.o |
15 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 15 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
16 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o | 16 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o |
17 | ifdef CONFIG_TILEGX | ||
18 | obj-$(CONFIG_PCI) += pci_gx.o | ||
19 | else | ||
17 | obj-$(CONFIG_PCI) += pci.o | 20 | obj-$(CONFIG_PCI) += pci.o |
21 | endif | ||
22 | obj-$(CONFIG_TILE_USB) += usb.o | ||
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b3ed19f8779c..b9fe80ec1089 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/swiotlb.h> | ||
17 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
18 | #include <linux/export.h> | 19 | #include <linux/export.h> |
19 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
@@ -22,13 +23,18 @@ | |||
22 | /* Generic DMA mapping functions: */ | 23 | /* Generic DMA mapping functions: */ |
23 | 24 | ||
24 | /* | 25 | /* |
25 | * Allocate what Linux calls "coherent" memory, which for us just | 26 | * Allocate what Linux calls "coherent" memory. On TILEPro this is |
26 | * means uncached. | 27 | * uncached memory; on TILE-Gx it is hash-for-home memory. |
27 | */ | 28 | */ |
28 | void *dma_alloc_coherent(struct device *dev, | 29 | #ifdef __tilepro__ |
29 | size_t size, | 30 | #define PAGE_HOME_DMA PAGE_HOME_UNCACHED |
30 | dma_addr_t *dma_handle, | 31 | #else |
31 | gfp_t gfp) | 32 | #define PAGE_HOME_DMA PAGE_HOME_HASH |
33 | #endif | ||
34 | |||
35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | ||
36 | dma_addr_t *dma_handle, gfp_t gfp, | ||
37 | struct dma_attrs *attrs) | ||
32 | { | 38 | { |
33 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | 39 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); |
34 | int node = dev_to_node(dev); | 40 | int node = dev_to_node(dev); |
@@ -39,39 +45,42 @@ void *dma_alloc_coherent(struct device *dev, | |||
39 | gfp |= __GFP_ZERO; | 45 | gfp |= __GFP_ZERO; |
40 | 46 | ||
41 | /* | 47 | /* |
42 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | 48 | * If the mask specifies that the memory be in the first 4 GB, then |
43 | * high 32 bits of the resulting PA will be zero. If the mask | 49 | * we force the allocation to come from the DMA zone. We also |
44 | * size is, e.g., 24, we may still not be able to guarantee a | 50 | * force the node to 0 since that's the only node where the DMA |
45 | * suitable memory address, in which case we will return NULL. | 51 | * zone isn't empty. If the mask size is smaller than 32 bits, we |
46 | * But such devices are uncommon. | 52 | * may still not be able to guarantee a suitable memory address, in |
53 | * which case we will return NULL. But such devices are uncommon. | ||
47 | */ | 54 | */ |
48 | if (dma_mask <= DMA_BIT_MASK(32)) | 55 | if (dma_mask <= DMA_BIT_MASK(32)) { |
56 | gfp |= GFP_DMA; | ||
49 | node = 0; | 57 | node = 0; |
58 | } | ||
50 | 59 | ||
51 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | 60 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); |
52 | if (pg == NULL) | 61 | if (pg == NULL) |
53 | return NULL; | 62 | return NULL; |
54 | 63 | ||
55 | addr = page_to_phys(pg); | 64 | addr = page_to_phys(pg); |
56 | if (addr + size > dma_mask) { | 65 | if (addr + size > dma_mask) { |
57 | homecache_free_pages(addr, order); | 66 | __homecache_free_pages(pg, order); |
58 | return NULL; | 67 | return NULL; |
59 | } | 68 | } |
60 | 69 | ||
61 | *dma_handle = addr; | 70 | *dma_handle = addr; |
71 | |||
62 | return page_address(pg); | 72 | return page_address(pg); |
63 | } | 73 | } |
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | 74 | ||
66 | /* | 75 | /* |
67 | * Free memory that was allocated with dma_alloc_coherent. | 76 | * Free memory that was allocated with tile_dma_alloc_coherent. |
68 | */ | 77 | */ |
69 | void dma_free_coherent(struct device *dev, size_t size, | 78 | static void tile_dma_free_coherent(struct device *dev, size_t size, |
70 | void *vaddr, dma_addr_t dma_handle) | 79 | void *vaddr, dma_addr_t dma_handle, |
80 | struct dma_attrs *attrs) | ||
71 | { | 81 | { |
72 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 82 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
73 | } | 83 | } |
74 | EXPORT_SYMBOL(dma_free_coherent); | ||
75 | 84 | ||
76 | /* | 85 | /* |
77 | * The map routines "map" the specified address range for DMA | 86 | * The map routines "map" the specified address range for DMA |
@@ -87,52 +96,285 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
87 | * can count on nothing having been touched. | 96 | * can count on nothing having been touched. |
88 | */ | 97 | */ |
89 | 98 | ||
90 | /* Flush a PA range from cache page by page. */ | 99 | /* Set up a single page for DMA access. */ |
91 | static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) | 100 | static void __dma_prep_page(struct page *page, unsigned long offset, |
101 | size_t size, enum dma_data_direction direction) | ||
102 | { | ||
103 | /* | ||
104 | * Flush the page from cache if necessary. | ||
105 | * On tilegx, data is delivered to hash-for-home L3; on tilepro, | ||
106 | * data is delivered direct to memory. | ||
107 | * | ||
108 | * NOTE: If we were just doing DMA_TO_DEVICE we could optimize | ||
109 | * this to be a "flush" not a "finv" and keep some of the | ||
110 | * state in cache across the DMA operation, but it doesn't seem | ||
111 | * worth creating the necessary flush_buffer_xxx() infrastructure. | ||
112 | */ | ||
113 | int home = page_home(page); | ||
114 | switch (home) { | ||
115 | case PAGE_HOME_HASH: | ||
116 | #ifdef __tilegx__ | ||
117 | return; | ||
118 | #endif | ||
119 | break; | ||
120 | case PAGE_HOME_UNCACHED: | ||
121 | #ifdef __tilepro__ | ||
122 | return; | ||
123 | #endif | ||
124 | break; | ||
125 | case PAGE_HOME_IMMUTABLE: | ||
126 | /* Should be going to the device only. */ | ||
127 | BUG_ON(direction == DMA_FROM_DEVICE || | ||
128 | direction == DMA_BIDIRECTIONAL); | ||
129 | return; | ||
130 | case PAGE_HOME_INCOHERENT: | ||
131 | /* Incoherent anyway, so no need to work hard here. */ | ||
132 | return; | ||
133 | default: | ||
134 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
135 | break; | ||
136 | } | ||
137 | homecache_finv_page(page); | ||
138 | |||
139 | #ifdef DEBUG_ALIGNMENT | ||
140 | /* Warn if the region isn't cacheline aligned. */ | ||
141 | if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) | ||
142 | pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", | ||
143 | PFN_PHYS(page_to_pfn(page)) + offset, size); | ||
144 | #endif | ||
145 | } | ||
146 | |||
147 | /* Make the page ready to be read by the core. */ | ||
148 | static void __dma_complete_page(struct page *page, unsigned long offset, | ||
149 | size_t size, enum dma_data_direction direction) | ||
150 | { | ||
151 | #ifdef __tilegx__ | ||
152 | switch (page_home(page)) { | ||
153 | case PAGE_HOME_HASH: | ||
154 | /* I/O device delivered data the way the cpu wanted it. */ | ||
155 | break; | ||
156 | case PAGE_HOME_INCOHERENT: | ||
157 | /* Incoherent anyway, so no need to work hard here. */ | ||
158 | break; | ||
159 | case PAGE_HOME_IMMUTABLE: | ||
160 | /* Extra read-only copies are not a problem. */ | ||
161 | break; | ||
162 | default: | ||
163 | /* Flush the bogus hash-for-home I/O entries to memory. */ | ||
164 | homecache_finv_map_page(page, PAGE_HOME_HASH); | ||
165 | break; | ||
166 | } | ||
167 | #endif | ||
168 | } | ||
169 | |||
170 | static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, | ||
171 | enum dma_data_direction direction) | ||
92 | { | 172 | { |
93 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | 173 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); |
94 | size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); | 174 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); |
175 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); | ||
176 | |||
177 | while (size != 0) { | ||
178 | __dma_prep_page(page, offset, bytes, direction); | ||
179 | size -= bytes; | ||
180 | ++page; | ||
181 | offset = 0; | ||
182 | bytes = min((size_t)PAGE_SIZE, size); | ||
183 | } | ||
184 | } | ||
95 | 185 | ||
96 | while ((ssize_t)size > 0) { | 186 | static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, |
97 | /* Flush the page. */ | 187 | enum dma_data_direction direction) |
98 | homecache_flush_cache(page++, 0); | 188 | { |
189 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); | ||
190 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); | ||
191 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); | ||
192 | |||
193 | while (size != 0) { | ||
194 | __dma_complete_page(page, offset, bytes, direction); | ||
195 | size -= bytes; | ||
196 | ++page; | ||
197 | offset = 0; | ||
198 | bytes = min((size_t)PAGE_SIZE, size); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
203 | int nents, enum dma_data_direction direction, | ||
204 | struct dma_attrs *attrs) | ||
205 | { | ||
206 | struct scatterlist *sg; | ||
207 | int i; | ||
208 | |||
209 | BUG_ON(!valid_dma_direction(direction)); | ||
210 | |||
211 | WARN_ON(nents == 0 || sglist->length == 0); | ||
99 | 212 | ||
100 | /* Figure out if we need to continue on the next page. */ | 213 | for_each_sg(sglist, sg, nents, i) { |
101 | size -= bytesleft; | 214 | sg->dma_address = sg_phys(sg); |
102 | bytesleft = PAGE_SIZE; | 215 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
216 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
217 | sg->dma_length = sg->length; | ||
218 | #endif | ||
103 | } | 219 | } |
220 | |||
221 | return nents; | ||
104 | } | 222 | } |
105 | 223 | ||
106 | /* | 224 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
107 | * dma_map_single can be passed any memory address, and there appear | 225 | int nents, enum dma_data_direction direction, |
108 | * to be no alignment constraints. | 226 | struct dma_attrs *attrs) |
109 | * | 227 | { |
110 | * There is a chance that the start of the buffer will share a cache | 228 | struct scatterlist *sg; |
111 | * line with some other data that has been touched in the meantime. | 229 | int i; |
112 | */ | 230 | |
113 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 231 | BUG_ON(!valid_dma_direction(direction)); |
114 | enum dma_data_direction direction) | 232 | for_each_sg(sglist, sg, nents, i) { |
233 | sg->dma_address = sg_phys(sg); | ||
234 | __dma_complete_pa_range(sg->dma_address, sg->length, | ||
235 | direction); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | ||
240 | unsigned long offset, size_t size, | ||
241 | enum dma_data_direction direction, | ||
242 | struct dma_attrs *attrs) | ||
115 | { | 243 | { |
116 | dma_addr_t dma_addr = __pa(ptr); | 244 | BUG_ON(!valid_dma_direction(direction)); |
245 | |||
246 | BUG_ON(offset + size > PAGE_SIZE); | ||
247 | __dma_prep_page(page, offset, size, direction); | ||
117 | 248 | ||
249 | return page_to_pa(page) + offset; | ||
250 | } | ||
251 | |||
252 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
253 | size_t size, enum dma_data_direction direction, | ||
254 | struct dma_attrs *attrs) | ||
255 | { | ||
118 | BUG_ON(!valid_dma_direction(direction)); | 256 | BUG_ON(!valid_dma_direction(direction)); |
119 | WARN_ON(size == 0); | ||
120 | 257 | ||
121 | __dma_map_pa_range(dma_addr, size); | 258 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), |
259 | dma_address & PAGE_OFFSET, size, direction); | ||
260 | } | ||
122 | 261 | ||
123 | return dma_addr; | 262 | static void tile_dma_sync_single_for_cpu(struct device *dev, |
263 | dma_addr_t dma_handle, | ||
264 | size_t size, | ||
265 | enum dma_data_direction direction) | ||
266 | { | ||
267 | BUG_ON(!valid_dma_direction(direction)); | ||
268 | |||
269 | __dma_complete_pa_range(dma_handle, size, direction); | ||
270 | } | ||
271 | |||
272 | static void tile_dma_sync_single_for_device(struct device *dev, | ||
273 | dma_addr_t dma_handle, size_t size, | ||
274 | enum dma_data_direction direction) | ||
275 | { | ||
276 | __dma_prep_pa_range(dma_handle, size, direction); | ||
124 | } | 277 | } |
125 | EXPORT_SYMBOL(dma_map_single); | ||
126 | 278 | ||
127 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 279 | static void tile_dma_sync_sg_for_cpu(struct device *dev, |
128 | enum dma_data_direction direction) | 280 | struct scatterlist *sglist, int nelems, |
281 | enum dma_data_direction direction) | ||
129 | { | 282 | { |
283 | struct scatterlist *sg; | ||
284 | int i; | ||
285 | |||
286 | BUG_ON(!valid_dma_direction(direction)); | ||
287 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
288 | |||
289 | for_each_sg(sglist, sg, nelems, i) { | ||
290 | dma_sync_single_for_cpu(dev, sg->dma_address, | ||
291 | sg_dma_len(sg), direction); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | static void tile_dma_sync_sg_for_device(struct device *dev, | ||
296 | struct scatterlist *sglist, int nelems, | ||
297 | enum dma_data_direction direction) | ||
298 | { | ||
299 | struct scatterlist *sg; | ||
300 | int i; | ||
301 | |||
130 | BUG_ON(!valid_dma_direction(direction)); | 302 | BUG_ON(!valid_dma_direction(direction)); |
303 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
304 | |||
305 | for_each_sg(sglist, sg, nelems, i) { | ||
306 | dma_sync_single_for_device(dev, sg->dma_address, | ||
307 | sg_dma_len(sg), direction); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static inline int | ||
312 | tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
313 | { | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static inline int | ||
318 | tile_dma_supported(struct device *dev, u64 mask) | ||
319 | { | ||
320 | return 1; | ||
321 | } | ||
322 | |||
323 | static struct dma_map_ops tile_default_dma_map_ops = { | ||
324 | .alloc = tile_dma_alloc_coherent, | ||
325 | .free = tile_dma_free_coherent, | ||
326 | .map_page = tile_dma_map_page, | ||
327 | .unmap_page = tile_dma_unmap_page, | ||
328 | .map_sg = tile_dma_map_sg, | ||
329 | .unmap_sg = tile_dma_unmap_sg, | ||
330 | .sync_single_for_cpu = tile_dma_sync_single_for_cpu, | ||
331 | .sync_single_for_device = tile_dma_sync_single_for_device, | ||
332 | .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, | ||
333 | .sync_sg_for_device = tile_dma_sync_sg_for_device, | ||
334 | .mapping_error = tile_dma_mapping_error, | ||
335 | .dma_supported = tile_dma_supported | ||
336 | }; | ||
337 | |||
338 | struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; | ||
339 | EXPORT_SYMBOL(tile_dma_map_ops); | ||
340 | |||
341 | /* Generic PCI DMA mapping functions */ | ||
342 | |||
343 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | ||
344 | dma_addr_t *dma_handle, gfp_t gfp, | ||
345 | struct dma_attrs *attrs) | ||
346 | { | ||
347 | int node = dev_to_node(dev); | ||
348 | int order = get_order(size); | ||
349 | struct page *pg; | ||
350 | dma_addr_t addr; | ||
351 | |||
352 | gfp |= __GFP_ZERO; | ||
353 | |||
354 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); | ||
355 | if (pg == NULL) | ||
356 | return NULL; | ||
357 | |||
358 | addr = page_to_phys(pg); | ||
359 | |||
360 | *dma_handle = phys_to_dma(dev, addr); | ||
361 | |||
362 | return page_address(pg); | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Free memory that was allocated with tile_pci_dma_alloc_coherent. | ||
367 | */ | ||
368 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, | ||
369 | void *vaddr, dma_addr_t dma_handle, | ||
370 | struct dma_attrs *attrs) | ||
371 | { | ||
372 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||
131 | } | 373 | } |
132 | EXPORT_SYMBOL(dma_unmap_single); | ||
133 | 374 | ||
134 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 375 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
135 | enum dma_data_direction direction) | 376 | int nents, enum dma_data_direction direction, |
377 | struct dma_attrs *attrs) | ||
136 | { | 378 | { |
137 | struct scatterlist *sg; | 379 | struct scatterlist *sg; |
138 | int i; | 380 | int i; |
@@ -143,73 +385,103 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
143 | 385 | ||
144 | for_each_sg(sglist, sg, nents, i) { | 386 | for_each_sg(sglist, sg, nents, i) { |
145 | sg->dma_address = sg_phys(sg); | 387 | sg->dma_address = sg_phys(sg); |
146 | __dma_map_pa_range(sg->dma_address, sg->length); | 388 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
389 | |||
390 | sg->dma_address = phys_to_dma(dev, sg->dma_address); | ||
391 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
392 | sg->dma_length = sg->length; | ||
393 | #endif | ||
147 | } | 394 | } |
148 | 395 | ||
149 | return nents; | 396 | return nents; |
150 | } | 397 | } |
151 | EXPORT_SYMBOL(dma_map_sg); | ||
152 | 398 | ||
153 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 399 | static void tile_pci_dma_unmap_sg(struct device *dev, |
154 | enum dma_data_direction direction) | 400 | struct scatterlist *sglist, int nents, |
401 | enum dma_data_direction direction, | ||
402 | struct dma_attrs *attrs) | ||
155 | { | 403 | { |
404 | struct scatterlist *sg; | ||
405 | int i; | ||
406 | |||
156 | BUG_ON(!valid_dma_direction(direction)); | 407 | BUG_ON(!valid_dma_direction(direction)); |
408 | for_each_sg(sglist, sg, nents, i) { | ||
409 | sg->dma_address = sg_phys(sg); | ||
410 | __dma_complete_pa_range(sg->dma_address, sg->length, | ||
411 | direction); | ||
412 | } | ||
157 | } | 413 | } |
158 | EXPORT_SYMBOL(dma_unmap_sg); | ||
159 | 414 | ||
160 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 415 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, |
161 | unsigned long offset, size_t size, | 416 | unsigned long offset, size_t size, |
162 | enum dma_data_direction direction) | 417 | enum dma_data_direction direction, |
418 | struct dma_attrs *attrs) | ||
163 | { | 419 | { |
164 | BUG_ON(!valid_dma_direction(direction)); | 420 | BUG_ON(!valid_dma_direction(direction)); |
165 | 421 | ||
166 | BUG_ON(offset + size > PAGE_SIZE); | 422 | BUG_ON(offset + size > PAGE_SIZE); |
167 | homecache_flush_cache(page, 0); | 423 | __dma_prep_page(page, offset, size, direction); |
168 | 424 | ||
169 | return page_to_pa(page) + offset; | 425 | return phys_to_dma(dev, page_to_pa(page) + offset); |
170 | } | 426 | } |
171 | EXPORT_SYMBOL(dma_map_page); | ||
172 | 427 | ||
173 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 428 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
174 | enum dma_data_direction direction) | 429 | size_t size, |
430 | enum dma_data_direction direction, | ||
431 | struct dma_attrs *attrs) | ||
175 | { | 432 | { |
176 | BUG_ON(!valid_dma_direction(direction)); | 433 | BUG_ON(!valid_dma_direction(direction)); |
434 | |||
435 | dma_address = dma_to_phys(dev, dma_address); | ||
436 | |||
437 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), | ||
438 | dma_address & PAGE_OFFSET, size, direction); | ||
177 | } | 439 | } |
178 | EXPORT_SYMBOL(dma_unmap_page); | ||
179 | 440 | ||
180 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 441 | static void tile_pci_dma_sync_single_for_cpu(struct device *dev, |
181 | size_t size, enum dma_data_direction direction) | 442 | dma_addr_t dma_handle, |
443 | size_t size, | ||
444 | enum dma_data_direction direction) | ||
182 | { | 445 | { |
183 | BUG_ON(!valid_dma_direction(direction)); | 446 | BUG_ON(!valid_dma_direction(direction)); |
447 | |||
448 | dma_handle = dma_to_phys(dev, dma_handle); | ||
449 | |||
450 | __dma_complete_pa_range(dma_handle, size, direction); | ||
184 | } | 451 | } |
185 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
186 | 452 | ||
187 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 453 | static void tile_pci_dma_sync_single_for_device(struct device *dev, |
188 | size_t size, enum dma_data_direction direction) | 454 | dma_addr_t dma_handle, |
455 | size_t size, | ||
456 | enum dma_data_direction | ||
457 | direction) | ||
189 | { | 458 | { |
190 | unsigned long start = PFN_DOWN(dma_handle); | 459 | dma_handle = dma_to_phys(dev, dma_handle); |
191 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | ||
192 | unsigned long i; | ||
193 | 460 | ||
194 | BUG_ON(!valid_dma_direction(direction)); | 461 | __dma_prep_pa_range(dma_handle, size, direction); |
195 | for (i = start; i <= end; ++i) | ||
196 | homecache_flush_cache(pfn_to_page(i), 0); | ||
197 | } | 462 | } |
198 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
199 | 463 | ||
200 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | 464 | static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, |
201 | enum dma_data_direction direction) | 465 | struct scatterlist *sglist, |
466 | int nelems, | ||
467 | enum dma_data_direction direction) | ||
202 | { | 468 | { |
469 | struct scatterlist *sg; | ||
470 | int i; | ||
471 | |||
203 | BUG_ON(!valid_dma_direction(direction)); | 472 | BUG_ON(!valid_dma_direction(direction)); |
204 | WARN_ON(nelems == 0 || sg[0].length == 0); | 473 | WARN_ON(nelems == 0 || sglist->length == 0); |
474 | |||
475 | for_each_sg(sglist, sg, nelems, i) { | ||
476 | dma_sync_single_for_cpu(dev, sg->dma_address, | ||
477 | sg_dma_len(sg), direction); | ||
478 | } | ||
205 | } | 479 | } |
206 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
207 | 480 | ||
208 | /* | 481 | static void tile_pci_dma_sync_sg_for_device(struct device *dev, |
209 | * Flush and invalidate cache for scatterlist. | 482 | struct scatterlist *sglist, |
210 | */ | 483 | int nelems, |
211 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 484 | enum dma_data_direction direction) |
212 | int nelems, enum dma_data_direction direction) | ||
213 | { | 485 | { |
214 | struct scatterlist *sg; | 486 | struct scatterlist *sg; |
215 | int i; | 487 | int i; |
@@ -222,31 +494,93 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | |||
222 | sg_dma_len(sg), direction); | 494 | sg_dma_len(sg), direction); |
223 | } | 495 | } |
224 | } | 496 | } |
225 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
226 | 497 | ||
227 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 498 | static inline int |
228 | unsigned long offset, size_t size, | 499 | tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
229 | enum dma_data_direction direction) | ||
230 | { | 500 | { |
231 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | 501 | return 0; |
232 | } | 502 | } |
233 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
234 | 503 | ||
235 | void dma_sync_single_range_for_device(struct device *dev, | 504 | static inline int |
236 | dma_addr_t dma_handle, | 505 | tile_pci_dma_supported(struct device *dev, u64 mask) |
237 | unsigned long offset, size_t size, | ||
238 | enum dma_data_direction direction) | ||
239 | { | 506 | { |
240 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | 507 | return 1; |
241 | } | 508 | } |
242 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
243 | 509 | ||
244 | /* | 510 | static struct dma_map_ops tile_pci_default_dma_map_ops = { |
245 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | 511 | .alloc = tile_pci_dma_alloc_coherent, |
246 | * need to do any flushing here. | 512 | .free = tile_pci_dma_free_coherent, |
247 | */ | 513 | .map_page = tile_pci_dma_map_page, |
248 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 514 | .unmap_page = tile_pci_dma_unmap_page, |
249 | enum dma_data_direction direction) | 515 | .map_sg = tile_pci_dma_map_sg, |
516 | .unmap_sg = tile_pci_dma_unmap_sg, | ||
517 | .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, | ||
518 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, | ||
519 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, | ||
520 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, | ||
521 | .mapping_error = tile_pci_dma_mapping_error, | ||
522 | .dma_supported = tile_pci_dma_supported | ||
523 | }; | ||
524 | |||
525 | struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; | ||
526 | EXPORT_SYMBOL(gx_pci_dma_map_ops); | ||
527 | |||
528 | /* PCI DMA mapping functions for legacy PCI devices */ | ||
529 | |||
530 | #ifdef CONFIG_SWIOTLB | ||
531 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | ||
532 | dma_addr_t *dma_handle, gfp_t gfp, | ||
533 | struct dma_attrs *attrs) | ||
250 | { | 534 | { |
535 | gfp |= GFP_DMA; | ||
536 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
537 | } | ||
538 | |||
539 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | ||
540 | void *vaddr, dma_addr_t dma_addr, | ||
541 | struct dma_attrs *attrs) | ||
542 | { | ||
543 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | ||
544 | } | ||
545 | |||
546 | static struct dma_map_ops pci_swiotlb_dma_ops = { | ||
547 | .alloc = tile_swiotlb_alloc_coherent, | ||
548 | .free = tile_swiotlb_free_coherent, | ||
549 | .map_page = swiotlb_map_page, | ||
550 | .unmap_page = swiotlb_unmap_page, | ||
551 | .map_sg = swiotlb_map_sg_attrs, | ||
552 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
553 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
554 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
555 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
556 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
557 | .dma_supported = swiotlb_dma_supported, | ||
558 | .mapping_error = swiotlb_dma_mapping_error, | ||
559 | }; | ||
560 | |||
561 | struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; | ||
562 | #else | ||
563 | struct dma_map_ops *gx_legacy_pci_dma_map_ops; | ||
564 | #endif | ||
565 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | ||
566 | |||
567 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | ||
568 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
569 | { | ||
570 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
571 | |||
572 | /* Handle legacy PCI devices with limited memory addressability. */ | ||
573 | if (((dma_ops == gx_pci_dma_map_ops) || | ||
574 | (dma_ops == gx_legacy_pci_dma_map_ops)) && | ||
575 | (mask <= DMA_BIT_MASK(32))) { | ||
576 | if (mask > dev->archdata.max_direct_dma_addr) | ||
577 | mask = dev->archdata.max_direct_dma_addr; | ||
578 | } | ||
579 | |||
580 | if (!dma_supported(dev, mask)) | ||
581 | return -EIO; | ||
582 | dev->coherent_dma_mask = mask; | ||
583 | return 0; | ||
251 | } | 584 | } |
252 | EXPORT_SYMBOL(dma_cache_sync); | 585 | EXPORT_SYMBOL(dma_set_coherent_mask); |
586 | #endif | ||
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c new file mode 100644 index 000000000000..fa75264a82ae --- /dev/null +++ b/arch/tile/kernel/pci_gx.c | |||
@@ -0,0 +1,1543 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mmzone.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/capability.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/msi.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/ctype.h> | ||
29 | |||
30 | #include <asm/processor.h> | ||
31 | #include <asm/sections.h> | ||
32 | #include <asm/byteorder.h> | ||
33 | |||
34 | #include <gxio/iorpc_globals.h> | ||
35 | #include <gxio/kiorpc.h> | ||
36 | #include <gxio/trio.h> | ||
37 | #include <gxio/iorpc_trio.h> | ||
38 | #include <hv/drv_trio_intf.h> | ||
39 | |||
40 | #include <arch/sim.h> | ||
41 | |||
42 | /* | ||
43 | * This file containes the routines to search for PCI buses, | ||
44 | * enumerate the buses, and configure any attached devices. | ||
45 | */ | ||
46 | |||
47 | #define DEBUG_PCI_CFG 0 | ||
48 | |||
49 | #if DEBUG_PCI_CFG | ||
50 | #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \ | ||
51 | pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \ | ||
52 | size, val, bus, dev, func, offset & 0xFFF); | ||
53 | #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \ | ||
54 | pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \ | ||
55 | size, val, bus, dev, func, offset & 0xFFF); | ||
56 | #else | ||
57 | #define TRACE_CFG_WR(...) | ||
58 | #define TRACE_CFG_RD(...) | ||
59 | #endif | ||
60 | |||
61 | static int __devinitdata pci_probe = 1; | ||
62 | |||
63 | /* Information on the PCIe RC ports configuration. */ | ||
64 | static int __devinitdata pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | ||
65 | |||
66 | /* | ||
67 | * On some platforms with one or more Gx endpoint ports, we need to | ||
68 | * delay the PCIe RC port probe for a few seconds to work around | ||
69 | * a HW PCIe link-training bug. The exact delay is specified with | ||
70 | * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", | ||
71 | * where T is the TRIO instance number, P is the port number and S is | ||
72 | * the delay in seconds. If the delay is not provided, the value | ||
73 | * will be DEFAULT_RC_DELAY. | ||
74 | */ | ||
75 | static int __devinitdata rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | ||
76 | |||
77 | /* Default number of seconds that the PCIe RC port probe can be delayed. */ | ||
78 | #define DEFAULT_RC_DELAY 10 | ||
79 | |||
80 | /* Max number of seconds that the PCIe RC port probe can be delayed. */ | ||
81 | #define MAX_RC_DELAY 20 | ||
82 | |||
83 | /* Array of the PCIe ports configuration info obtained from the BIB. */ | ||
84 | struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | ||
85 | |||
86 | /* All drivers share the TRIO contexts defined here. */ | ||
87 | gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; | ||
88 | |||
89 | /* Pointer to an array of PCIe RC controllers. */ | ||
90 | struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; | ||
91 | int num_rc_controllers; | ||
92 | static int num_ep_controllers; | ||
93 | |||
94 | static struct pci_ops tile_cfg_ops; | ||
95 | |||
96 | /* Mask of CPUs that should receive PCIe interrupts. */ | ||
97 | static struct cpumask intr_cpus_map; | ||
98 | |||
99 | /* | ||
100 | * We don't need to worry about the alignment of resources. | ||
101 | */ | ||
102 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | ||
103 | resource_size_t size, resource_size_t align) | ||
104 | { | ||
105 | return res->start; | ||
106 | } | ||
107 | EXPORT_SYMBOL(pcibios_align_resource); | ||
108 | |||
109 | |||
110 | /* | ||
111 | * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. | ||
112 | * For now, we simply send interrupts to non-dataplane CPUs. | ||
113 | * We may implement methods to allow user to specify the target CPUs, | ||
114 | * e.g. via boot arguments. | ||
115 | */ | ||
116 | static int tile_irq_cpu(int irq) | ||
117 | { | ||
118 | unsigned int count; | ||
119 | int i = 0; | ||
120 | int cpu; | ||
121 | |||
122 | count = cpumask_weight(&intr_cpus_map); | ||
123 | if (unlikely(count == 0)) { | ||
124 | pr_warning("intr_cpus_map empty, interrupts will be" | ||
125 | " delievered to dataplane tiles\n"); | ||
126 | return irq % (smp_height * smp_width); | ||
127 | } | ||
128 | |||
129 | count = irq % count; | ||
130 | for_each_cpu(cpu, &intr_cpus_map) { | ||
131 | if (i++ == count) | ||
132 | break; | ||
133 | } | ||
134 | return cpu; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Open a file descriptor to the TRIO shim. | ||
139 | */ | ||
140 | static int __devinit tile_pcie_open(int trio_index) | ||
141 | { | ||
142 | gxio_trio_context_t *context = &trio_contexts[trio_index]; | ||
143 | int ret; | ||
144 | |||
145 | /* | ||
146 | * This opens a file descriptor to the TRIO shim. | ||
147 | */ | ||
148 | ret = gxio_trio_init(context, trio_index); | ||
149 | if (ret < 0) | ||
150 | return ret; | ||
151 | |||
152 | /* | ||
153 | * Allocate an ASID for the kernel. | ||
154 | */ | ||
155 | ret = gxio_trio_alloc_asids(context, 1, 0, 0); | ||
156 | if (ret < 0) { | ||
157 | pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", | ||
158 | trio_index); | ||
159 | goto asid_alloc_failure; | ||
160 | } | ||
161 | |||
162 | context->asid = ret; | ||
163 | |||
164 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | ||
165 | /* | ||
166 | * Alloc a PIO region for config access, shared by all MACs per TRIO. | ||
167 | * This shouldn't fail since the kernel is supposed to the first | ||
168 | * client of the TRIO's PIO regions. | ||
169 | */ | ||
170 | ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0); | ||
171 | if (ret < 0) { | ||
172 | pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n", | ||
173 | trio_index); | ||
174 | goto pio_alloc_failure; | ||
175 | } | ||
176 | |||
177 | context->pio_cfg_index = ret; | ||
178 | |||
179 | /* | ||
180 | * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter | ||
181 | * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR. | ||
182 | */ | ||
183 | ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index, | ||
184 | 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); | ||
185 | if (ret < 0) { | ||
186 | pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n", | ||
187 | trio_index); | ||
188 | goto pio_alloc_failure; | ||
189 | } | ||
190 | #endif | ||
191 | |||
192 | return ret; | ||
193 | |||
194 | asid_alloc_failure: | ||
195 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | ||
196 | pio_alloc_failure: | ||
197 | #endif | ||
198 | hv_dev_close(context->fd); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static void | ||
204 | tilegx_legacy_irq_ack(struct irq_data *d) | ||
205 | { | ||
206 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); | ||
207 | } | ||
208 | |||
209 | static void | ||
210 | tilegx_legacy_irq_mask(struct irq_data *d) | ||
211 | { | ||
212 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); | ||
213 | } | ||
214 | |||
215 | static void | ||
216 | tilegx_legacy_irq_unmask(struct irq_data *d) | ||
217 | { | ||
218 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); | ||
219 | } | ||
220 | |||
221 | static struct irq_chip tilegx_legacy_irq_chip = { | ||
222 | .name = "tilegx_legacy_irq", | ||
223 | .irq_ack = tilegx_legacy_irq_ack, | ||
224 | .irq_mask = tilegx_legacy_irq_mask, | ||
225 | .irq_unmask = tilegx_legacy_irq_unmask, | ||
226 | |||
227 | /* TBD: support set_affinity. */ | ||
228 | }; | ||
229 | |||
230 | /* | ||
231 | * This is a wrapper function of the kernel level-trigger interrupt | ||
232 | * handler handle_level_irq() for PCI legacy interrupts. The TRIO | ||
233 | * is configured such that only INTx Assert interrupts are proxied | ||
234 | * to Linux which just calls handle_level_irq() after clearing the | ||
235 | * MAC INTx Assert status bit associated with this interrupt. | ||
236 | */ | ||
237 | static void | ||
238 | trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) | ||
239 | { | ||
240 | struct pci_controller *controller = irq_desc_get_handler_data(desc); | ||
241 | gxio_trio_context_t *trio_context = controller->trio; | ||
242 | uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); | ||
243 | int mac = controller->mac; | ||
244 | unsigned int reg_offset; | ||
245 | uint64_t level_mask; | ||
246 | |||
247 | handle_level_irq(irq, desc); | ||
248 | |||
249 | /* | ||
250 | * Clear the INTx Level status, otherwise future interrupts are | ||
251 | * not sent. | ||
252 | */ | ||
253 | reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS << | ||
254 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
255 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | ||
256 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
257 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
258 | |||
259 | level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx; | ||
260 | |||
261 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask); | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Create kernel irqs and set up the handlers for the legacy interrupts. | ||
266 | * Also some minimum initialization for the MSI support. | ||
267 | */ | ||
268 | static int __devinit tile_init_irqs(struct pci_controller *controller) | ||
269 | { | ||
270 | int i; | ||
271 | int j; | ||
272 | int irq; | ||
273 | int result; | ||
274 | |||
275 | cpumask_copy(&intr_cpus_map, cpu_online_mask); | ||
276 | |||
277 | |||
278 | for (i = 0; i < 4; i++) { | ||
279 | gxio_trio_context_t *context = controller->trio; | ||
280 | int cpu; | ||
281 | |||
282 | /* Ask the kernel to allocate an IRQ. */ | ||
283 | irq = create_irq(); | ||
284 | if (irq < 0) { | ||
285 | pr_err("PCI: no free irq vectors, failed for %d\n", i); | ||
286 | |||
287 | goto free_irqs; | ||
288 | } | ||
289 | controller->irq_intx_table[i] = irq; | ||
290 | |||
291 | /* Distribute the 4 IRQs to different tiles. */ | ||
292 | cpu = tile_irq_cpu(irq); | ||
293 | |||
294 | /* Configure the TRIO intr binding for this IRQ. */ | ||
295 | result = gxio_trio_config_legacy_intr(context, cpu_x(cpu), | ||
296 | cpu_y(cpu), KERNEL_PL, | ||
297 | irq, controller->mac, i); | ||
298 | if (result < 0) { | ||
299 | pr_err("PCI: MAC intx config failed for %d\n", i); | ||
300 | |||
301 | goto free_irqs; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Register the IRQ handler with the kernel. | ||
306 | */ | ||
307 | irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, | ||
308 | trio_handle_level_irq); | ||
309 | irq_set_chip_data(irq, (void *)(uint64_t)i); | ||
310 | irq_set_handler_data(irq, controller); | ||
311 | } | ||
312 | |||
313 | return 0; | ||
314 | |||
315 | free_irqs: | ||
316 | for (j = 0; j < i; j++) | ||
317 | destroy_irq(controller->irq_intx_table[j]); | ||
318 | |||
319 | return -1; | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Find valid controllers and fill in pci_controller structs for each | ||
324 | * of them. | ||
325 | * | ||
326 | * Returns the number of controllers discovered. | ||
327 | */ | ||
328 | int __init tile_pci_init(void) | ||
329 | { | ||
330 | int num_trio_shims = 0; | ||
331 | int ctl_index = 0; | ||
332 | int i, j; | ||
333 | |||
334 | if (!pci_probe) { | ||
335 | pr_info("PCI: disabled by boot argument\n"); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | pr_info("PCI: Searching for controllers...\n"); | ||
340 | |||
341 | /* | ||
342 | * We loop over all the TRIO shims. | ||
343 | */ | ||
344 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | ||
345 | int ret; | ||
346 | |||
347 | ret = tile_pcie_open(i); | ||
348 | if (ret < 0) | ||
349 | continue; | ||
350 | |||
351 | num_trio_shims++; | ||
352 | } | ||
353 | |||
354 | if (num_trio_shims == 0 || sim_is_simulator()) | ||
355 | return 0; | ||
356 | |||
357 | /* | ||
358 | * Now determine which PCIe ports are configured to operate in RC mode. | ||
359 | * We look at the Board Information Block first and then see if there | ||
360 | * are any overriding configuration by the HW strapping pin. | ||
361 | */ | ||
362 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | ||
363 | gxio_trio_context_t *context = &trio_contexts[i]; | ||
364 | int ret; | ||
365 | |||
366 | if (context->fd < 0) | ||
367 | continue; | ||
368 | |||
369 | ret = hv_dev_pread(context->fd, 0, | ||
370 | (HV_VirtAddr)&pcie_ports[i][0], | ||
371 | sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES, | ||
372 | GXIO_TRIO_OP_GET_PORT_PROPERTY); | ||
373 | if (ret < 0) { | ||
374 | pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," | ||
375 | " on TRIO %d\n", ret, i); | ||
376 | continue; | ||
377 | } | ||
378 | |||
379 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { | ||
380 | if (pcie_ports[i][j].allow_rc) { | ||
381 | pcie_rc[i][j] = 1; | ||
382 | num_rc_controllers++; | ||
383 | } | ||
384 | else if (pcie_ports[i][j].allow_ep) { | ||
385 | num_ep_controllers++; | ||
386 | } | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * Return if no PCIe ports are configured to operate in RC mode. | ||
392 | */ | ||
393 | if (num_rc_controllers == 0) | ||
394 | return 0; | ||
395 | |||
396 | /* | ||
397 | * Set the TRIO pointer and MAC index for each PCIe RC port. | ||
398 | */ | ||
399 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | ||
400 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { | ||
401 | if (pcie_rc[i][j]) { | ||
402 | pci_controllers[ctl_index].trio = | ||
403 | &trio_contexts[i]; | ||
404 | pci_controllers[ctl_index].mac = j; | ||
405 | pci_controllers[ctl_index].trio_index = i; | ||
406 | ctl_index++; | ||
407 | if (ctl_index == num_rc_controllers) | ||
408 | goto out; | ||
409 | } | ||
410 | } | ||
411 | } | ||
412 | |||
413 | out: | ||
414 | /* | ||
415 | * Configure each PCIe RC port. | ||
416 | */ | ||
417 | for (i = 0; i < num_rc_controllers; i++) { | ||
418 | /* | ||
419 | * Configure the PCIe MAC to run in RC mode. | ||
420 | */ | ||
421 | |||
422 | struct pci_controller *controller = &pci_controllers[i]; | ||
423 | |||
424 | controller->index = i; | ||
425 | controller->ops = &tile_cfg_ops; | ||
426 | |||
427 | /* | ||
428 | * The PCI memory resource is located above the PA space. | ||
429 | * For every host bridge, the BAR window or the MMIO aperture | ||
430 | * is in range [3GB, 4GB - 1] of a 4GB space beyond the | ||
431 | * PA space. | ||
432 | */ | ||
433 | |||
434 | controller->mem_offset = TILE_PCI_MEM_START + | ||
435 | (i * TILE_PCI_BAR_WINDOW_TOP); | ||
436 | controller->mem_space.start = controller->mem_offset + | ||
437 | TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE; | ||
438 | controller->mem_space.end = controller->mem_offset + | ||
439 | TILE_PCI_BAR_WINDOW_TOP - 1; | ||
440 | controller->mem_space.flags = IORESOURCE_MEM; | ||
441 | snprintf(controller->mem_space_name, | ||
442 | sizeof(controller->mem_space_name), | ||
443 | "PCI mem domain %d", i); | ||
444 | controller->mem_space.name = controller->mem_space_name; | ||
445 | } | ||
446 | |||
447 | return num_rc_controllers; | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * (pin - 1) converts from the PCI standard's [1:4] convention to | ||
452 | * a normal [0:3] range. | ||
453 | */ | ||
454 | static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin) | ||
455 | { | ||
456 | struct pci_controller *controller = | ||
457 | (struct pci_controller *)dev->sysdata; | ||
458 | return controller->irq_intx_table[pin - 1]; | ||
459 | } | ||
460 | |||
461 | |||
462 | static void __devinit fixup_read_and_payload_sizes(struct pci_controller * | ||
463 | controller) | ||
464 | { | ||
465 | gxio_trio_context_t *trio_context = controller->trio; | ||
466 | struct pci_bus *root_bus = controller->root_bus; | ||
467 | TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control; | ||
468 | TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap; | ||
469 | unsigned int reg_offset; | ||
470 | struct pci_bus *child; | ||
471 | int mac; | ||
472 | int err; | ||
473 | |||
474 | mac = controller->mac; | ||
475 | |||
476 | /* | ||
477 | * Set our max read request size to be 4KB. | ||
478 | */ | ||
479 | reg_offset = | ||
480 | (TRIO_PCIE_RC_DEVICE_CONTROL << | ||
481 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
482 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | ||
483 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
484 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
485 | |||
486 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | ||
487 | reg_offset); | ||
488 | dev_control.max_read_req_sz = 5; | ||
489 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | ||
490 | dev_control.word); | ||
491 | |||
492 | /* | ||
493 | * Set the max payload size supported by this Gx PCIe MAC. | ||
494 | * Though Gx PCIe supports Max Payload Size of up to 1024 bytes, | ||
495 | * experiments have shown that setting MPS to 256 yields the | ||
496 | * best performance. | ||
497 | */ | ||
498 | reg_offset = | ||
499 | (TRIO_PCIE_RC_DEVICE_CAP << | ||
500 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
501 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | ||
502 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
503 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
504 | |||
505 | rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | ||
506 | reg_offset); | ||
507 | rc_dev_cap.mps_sup = 1; | ||
508 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | ||
509 | rc_dev_cap.word); | ||
510 | |||
511 | /* Configure PCI Express MPS setting. */ | ||
512 | list_for_each_entry(child, &root_bus->children, node) { | ||
513 | struct pci_dev *self = child->self; | ||
514 | if (!self) | ||
515 | continue; | ||
516 | |||
517 | pcie_bus_configure_settings(child, self->pcie_mpss); | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Set the mac_config register in trio based on the MPS/MRS of the link. | ||
522 | */ | ||
523 | reg_offset = | ||
524 | (TRIO_PCIE_RC_DEVICE_CONTROL << | ||
525 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
526 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | ||
527 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
528 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
529 | |||
530 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | ||
531 | reg_offset); | ||
532 | |||
533 | err = gxio_trio_set_mps_mrs(trio_context, | ||
534 | dev_control.max_payload_size, | ||
535 | dev_control.max_read_req_sz, | ||
536 | mac); | ||
537 | if (err < 0) { | ||
538 | pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " | ||
539 | "MAC %d on TRIO %d\n", | ||
540 | mac, controller->trio_index); | ||
541 | } | ||
542 | } | ||
543 | |||
544 | static int __devinit setup_pcie_rc_delay(char *str) | ||
545 | { | ||
546 | unsigned long delay = 0; | ||
547 | unsigned long trio_index; | ||
548 | unsigned long mac; | ||
549 | |||
550 | if (str == NULL || !isdigit(*str)) | ||
551 | return -EINVAL; | ||
552 | trio_index = simple_strtoul(str, (char **)&str, 10); | ||
553 | if (trio_index >= TILEGX_NUM_TRIO) | ||
554 | return -EINVAL; | ||
555 | |||
556 | if (*str != ',') | ||
557 | return -EINVAL; | ||
558 | |||
559 | str++; | ||
560 | if (!isdigit(*str)) | ||
561 | return -EINVAL; | ||
562 | mac = simple_strtoul(str, (char **)&str, 10); | ||
563 | if (mac >= TILEGX_TRIO_PCIES) | ||
564 | return -EINVAL; | ||
565 | |||
566 | if (*str != '\0') { | ||
567 | if (*str != ',') | ||
568 | return -EINVAL; | ||
569 | |||
570 | str++; | ||
571 | if (!isdigit(*str)) | ||
572 | return -EINVAL; | ||
573 | delay = simple_strtoul(str, (char **)&str, 10); | ||
574 | if (delay > MAX_RC_DELAY) | ||
575 | return -EINVAL; | ||
576 | } | ||
577 | |||
578 | rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY; | ||
579 | pr_info("Delaying PCIe RC link training for %u sec" | ||
580 | " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac], | ||
581 | mac, trio_index); | ||
582 | return 0; | ||
583 | } | ||
584 | early_param("pcie_rc_delay", setup_pcie_rc_delay); | ||
585 | |||
586 | /* | ||
587 | * PCI initialization entry point, called by subsys_initcall. | ||
588 | */ | ||
589 | int __init pcibios_init(void) | ||
590 | { | ||
591 | resource_size_t offset; | ||
592 | LIST_HEAD(resources); | ||
593 | int next_busno; | ||
594 | int i; | ||
595 | |||
596 | tile_pci_init(); | ||
597 | |||
598 | if (num_rc_controllers == 0 && num_ep_controllers == 0) | ||
599 | return 0; | ||
600 | |||
601 | /* | ||
602 | * We loop over all the TRIO shims and set up the MMIO mappings. | ||
603 | */ | ||
604 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | ||
605 | gxio_trio_context_t *context = &trio_contexts[i]; | ||
606 | |||
607 | if (context->fd < 0) | ||
608 | continue; | ||
609 | |||
610 | /* | ||
611 | * Map in the MMIO space for the MAC. | ||
612 | */ | ||
613 | offset = 0; | ||
614 | context->mmio_base_mac = | ||
615 | iorpc_ioremap(context->fd, offset, | ||
616 | HV_TRIO_CONFIG_IOREMAP_SIZE); | ||
617 | if (context->mmio_base_mac == NULL) { | ||
618 | pr_err("PCI: MAC map failure on TRIO %d\n", i); | ||
619 | |||
620 | hv_dev_close(context->fd); | ||
621 | context->fd = -1; | ||
622 | continue; | ||
623 | } | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * Delay a bit in case devices aren't ready. Some devices are | ||
628 | * known to require at least 20ms here, but we use a more | ||
629 | * conservative value. | ||
630 | */ | ||
631 | msleep(250); | ||
632 | |||
633 | /* Scan all of the recorded PCI controllers. */ | ||
634 | for (next_busno = 0, i = 0; i < num_rc_controllers; i++) { | ||
635 | struct pci_controller *controller = &pci_controllers[i]; | ||
636 | gxio_trio_context_t *trio_context = controller->trio; | ||
637 | TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; | ||
638 | TRIO_PCIE_INTFC_PORT_STATUS_t port_status; | ||
639 | TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl; | ||
640 | struct pci_bus *bus; | ||
641 | unsigned int reg_offset; | ||
642 | unsigned int class_code_revision; | ||
643 | int trio_index; | ||
644 | int mac; | ||
645 | int ret; | ||
646 | |||
647 | if (trio_context->fd < 0) | ||
648 | continue; | ||
649 | |||
650 | trio_index = controller->trio_index; | ||
651 | mac = controller->mac; | ||
652 | |||
653 | /* | ||
654 | * Check the port strap state which will override the BIB | ||
655 | * setting. | ||
656 | */ | ||
657 | |||
658 | reg_offset = | ||
659 | (TRIO_PCIE_INTFC_PORT_CONFIG << | ||
660 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
661 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | ||
662 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
663 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
664 | |||
665 | port_config.word = | ||
666 | __gxio_mmio_read(trio_context->mmio_base_mac + | ||
667 | reg_offset); | ||
668 | |||
669 | if ((port_config.strap_state != | ||
670 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) && | ||
671 | (port_config.strap_state != | ||
672 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) { | ||
673 | /* | ||
674 | * If this is really intended to be an EP port, | ||
675 | * record it so that the endpoint driver will know about it. | ||
676 | */ | ||
677 | if (port_config.strap_state == | ||
678 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT || | ||
679 | port_config.strap_state == | ||
680 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1) | ||
681 | pcie_ports[trio_index][mac].allow_ep = 1; | ||
682 | |||
683 | continue; | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * Delay the RC link training if needed. | ||
688 | */ | ||
689 | if (rc_delay[trio_index][mac]) | ||
690 | msleep(rc_delay[trio_index][mac] * 1000); | ||
691 | |||
692 | ret = gxio_trio_force_rc_link_up(trio_context, mac); | ||
693 | if (ret < 0) | ||
694 | pr_err("PCI: PCIE_FORCE_LINK_UP failure, " | ||
695 | "MAC %d on TRIO %d\n", mac, trio_index); | ||
696 | |||
697 | pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, | ||
698 | trio_index, controller->mac); | ||
699 | |||
700 | /* | ||
701 | * Wait a bit here because some EP devices take longer | ||
702 | * to come up. | ||
703 | */ | ||
704 | msleep(1000); | ||
705 | |||
706 | /* | ||
707 | * Check for PCIe link-up status. | ||
708 | */ | ||
709 | |||
710 | reg_offset = | ||
711 | (TRIO_PCIE_INTFC_PORT_STATUS << | ||
712 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
713 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | ||
714 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
715 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
716 | |||
717 | port_status.word = | ||
718 | __gxio_mmio_read(trio_context->mmio_base_mac + | ||
719 | reg_offset); | ||
720 | if (!port_status.dl_up) { | ||
721 | pr_err("PCI: link is down, MAC %d on TRIO %d\n", | ||
722 | mac, trio_index); | ||
723 | continue; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Ensure that the link can come out of L1 power down state. | ||
728 | * Strictly speaking, this is needed only in the case of | ||
729 | * heavy RC-initiated DMAs. | ||
730 | */ | ||
731 | reg_offset = | ||
732 | (TRIO_PCIE_INTFC_TX_FIFO_CTL << | ||
733 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
734 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | ||
735 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
736 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
737 | tx_fifo_ctl.word = | ||
738 | __gxio_mmio_read(trio_context->mmio_base_mac + | ||
739 | reg_offset); | ||
740 | tx_fifo_ctl.min_p_credits = 0; | ||
741 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, | ||
742 | tx_fifo_ctl.word); | ||
743 | |||
744 | /* | ||
745 | * Change the device ID so that Linux bus crawl doesn't confuse | ||
746 | * the internal bridge with any Tilera endpoints. | ||
747 | */ | ||
748 | |||
749 | reg_offset = | ||
750 | (TRIO_PCIE_RC_DEVICE_ID_VEN_ID << | ||
751 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
752 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | ||
753 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
754 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
755 | |||
756 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | ||
757 | (TILERA_GX36_RC_DEV_ID << | ||
758 | TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) | | ||
759 | TILERA_VENDOR_ID); | ||
760 | |||
761 | /* | ||
762 | * Set the internal P2P bridge class code. | ||
763 | */ | ||
764 | |||
765 | reg_offset = | ||
766 | (TRIO_PCIE_RC_REVISION_ID << | ||
767 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
768 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | ||
769 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
770 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
771 | |||
772 | class_code_revision = | ||
773 | __gxio_mmio_read32(trio_context->mmio_base_mac + | ||
774 | reg_offset); | ||
775 | class_code_revision = (class_code_revision & 0xff ) | | ||
776 | (PCI_CLASS_BRIDGE_PCI << 16); | ||
777 | |||
778 | __gxio_mmio_write32(trio_context->mmio_base_mac + | ||
779 | reg_offset, class_code_revision); | ||
780 | |||
781 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | ||
782 | |||
783 | /* | ||
784 | * Map in the MMIO space for the PIO region. | ||
785 | */ | ||
786 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) | | ||
787 | (((unsigned long long)mac) << | ||
788 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); | ||
789 | |||
790 | #else | ||
791 | |||
792 | /* | ||
793 | * Alloc a PIO region for PCI config access per MAC. | ||
794 | */ | ||
795 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | ||
796 | if (ret < 0) { | ||
797 | pr_err("PCI: PCI CFG PIO alloc failure for mac %d " | ||
798 | "on TRIO %d, give up\n", mac, trio_index); | ||
799 | |||
800 | continue; | ||
801 | } | ||
802 | |||
803 | trio_context->pio_cfg_index[mac] = ret; | ||
804 | |||
805 | /* | ||
806 | * For PIO CFG, the bus_address_hi parameter is 0. | ||
807 | */ | ||
808 | ret = gxio_trio_init_pio_region_aux(trio_context, | ||
809 | trio_context->pio_cfg_index[mac], | ||
810 | mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); | ||
811 | if (ret < 0) { | ||
812 | pr_err("PCI: PCI CFG PIO init failure for mac %d " | ||
813 | "on TRIO %d, give up\n", mac, trio_index); | ||
814 | |||
815 | continue; | ||
816 | } | ||
817 | |||
818 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) | | ||
819 | (((unsigned long long)mac) << | ||
820 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); | ||
821 | |||
822 | #endif | ||
823 | |||
824 | trio_context->mmio_base_pio_cfg[mac] = | ||
825 | iorpc_ioremap(trio_context->fd, offset, | ||
826 | (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT)); | ||
827 | if (trio_context->mmio_base_pio_cfg[mac] == NULL) { | ||
828 | pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", | ||
829 | mac, trio_index); | ||
830 | |||
831 | continue; | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * Initialize the PCIe interrupts. | ||
836 | */ | ||
837 | if (tile_init_irqs(controller)) { | ||
838 | pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n", | ||
839 | mac, trio_index); | ||
840 | |||
841 | continue; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * The PCI memory resource is located above the PA space. | ||
846 | * The memory range for the PCI root bus should not overlap | ||
847 | * with the physical RAM | ||
848 | */ | ||
849 | pci_add_resource_offset(&resources, &controller->mem_space, | ||
850 | controller->mem_offset); | ||
851 | |||
852 | controller->first_busno = next_busno; | ||
853 | bus = pci_scan_root_bus(NULL, next_busno, controller->ops, | ||
854 | controller, &resources); | ||
855 | controller->root_bus = bus; | ||
856 | next_busno = bus->subordinate + 1; | ||
857 | |||
858 | } | ||
859 | |||
860 | /* Do machine dependent PCI interrupt routing */ | ||
861 | pci_fixup_irqs(pci_common_swizzle, tile_map_irq); | ||
862 | |||
863 | /* | ||
864 | * This comes from the generic Linux PCI driver. | ||
865 | * | ||
866 | * It allocates all of the resources (I/O memory, etc) | ||
867 | * associated with the devices read in above. | ||
868 | */ | ||
869 | |||
870 | pci_assign_unassigned_resources(); | ||
871 | |||
872 | /* Record the I/O resources in the PCI controller structure. */ | ||
873 | for (i = 0; i < num_rc_controllers; i++) { | ||
874 | struct pci_controller *controller = &pci_controllers[i]; | ||
875 | gxio_trio_context_t *trio_context = controller->trio; | ||
876 | struct pci_bus *root_bus = pci_controllers[i].root_bus; | ||
877 | struct pci_bus *next_bus; | ||
878 | uint32_t bus_address_hi; | ||
879 | struct pci_dev *dev; | ||
880 | int ret; | ||
881 | int j; | ||
882 | |||
883 | /* | ||
884 | * Skip controllers that are not properly initialized or | ||
885 | * have down links. | ||
886 | */ | ||
887 | if (root_bus == NULL) | ||
888 | continue; | ||
889 | |||
890 | /* Configure the max_payload_size values for this domain. */ | ||
891 | fixup_read_and_payload_sizes(controller); | ||
892 | |||
893 | list_for_each_entry(dev, &root_bus->devices, bus_list) { | ||
894 | /* Find the PCI host controller, ie. the 1st bridge. */ | ||
895 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && | ||
896 | (PCI_SLOT(dev->devfn) == 0)) { | ||
897 | next_bus = dev->subordinate; | ||
898 | pci_controllers[i].mem_resources[0] = | ||
899 | *next_bus->resource[0]; | ||
900 | pci_controllers[i].mem_resources[1] = | ||
901 | *next_bus->resource[1]; | ||
902 | pci_controllers[i].mem_resources[2] = | ||
903 | *next_bus->resource[2]; | ||
904 | |||
905 | break; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM) | ||
910 | bus_address_hi = | ||
911 | pci_controllers[i].mem_resources[1].start >> 32; | ||
912 | else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH) | ||
913 | bus_address_hi = | ||
914 | pci_controllers[i].mem_resources[2].start >> 32; | ||
915 | else { | ||
916 | /* This is unlikely. */ | ||
917 | pr_err("PCI: no memory resources on TRIO %d mac %d\n", | ||
918 | controller->trio_index, controller->mac); | ||
919 | continue; | ||
920 | } | ||
921 | |||
922 | /* | ||
923 | * Alloc a PIO region for PCI memory access for each RC port. | ||
924 | */ | ||
925 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | ||
926 | if (ret < 0) { | ||
927 | pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " | ||
928 | "give up\n", controller->trio_index, | ||
929 | controller->mac); | ||
930 | |||
931 | continue; | ||
932 | } | ||
933 | |||
934 | controller->pio_mem_index = ret; | ||
935 | |||
936 | /* | ||
937 | * For PIO MEM, the bus_address_hi parameter is hard-coded 0 | ||
938 | * because we always assign 32-bit PCI bus BAR ranges. | ||
939 | */ | ||
940 | ret = gxio_trio_init_pio_region_aux(trio_context, | ||
941 | controller->pio_mem_index, | ||
942 | controller->mac, | ||
943 | 0, | ||
944 | 0); | ||
945 | if (ret < 0) { | ||
946 | pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " | ||
947 | "give up\n", controller->trio_index, | ||
948 | controller->mac); | ||
949 | |||
950 | continue; | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Configure a Mem-Map region for each memory controller so | ||
955 | * that Linux can map all of its PA space to the PCI bus. | ||
956 | * Use the IOMMU to handle hash-for-home memory. | ||
957 | */ | ||
958 | for_each_online_node(j) { | ||
959 | unsigned long start_pfn = node_start_pfn[j]; | ||
960 | unsigned long end_pfn = node_end_pfn[j]; | ||
961 | unsigned long nr_pages = end_pfn - start_pfn; | ||
962 | |||
963 | ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, | ||
964 | 0); | ||
965 | if (ret < 0) { | ||
966 | pr_err("PCI: Mem-Map alloc failure on TRIO %d " | ||
967 | "mac %d for MC %d, give up\n", | ||
968 | controller->trio_index, | ||
969 | controller->mac, j); | ||
970 | |||
971 | goto alloc_mem_map_failed; | ||
972 | } | ||
973 | |||
974 | controller->mem_maps[j] = ret; | ||
975 | |||
976 | /* | ||
977 | * Initialize the Mem-Map and the I/O MMU so that all | ||
978 | * the physical memory can be accessed by the endpoint | ||
979 | * devices. The base bus address is set to the base CPA | ||
980 | * of this memory controller plus an offset (see pci.h). | ||
981 | * The region's base VA is set to the base CPA. The | ||
982 | * I/O MMU table essentially translates the CPA to | ||
983 | * the real PA. Implicitly, for node 0, we create | ||
984 | * a separate Mem-Map region that serves as the inbound | ||
985 | * window for legacy 32-bit devices. This is a direct | ||
986 | * map of the low 4GB CPA space. | ||
987 | */ | ||
988 | ret = gxio_trio_init_memory_map_mmu_aux(trio_context, | ||
989 | controller->mem_maps[j], | ||
990 | start_pfn << PAGE_SHIFT, | ||
991 | nr_pages << PAGE_SHIFT, | ||
992 | trio_context->asid, | ||
993 | controller->mac, | ||
994 | (start_pfn << PAGE_SHIFT) + | ||
995 | TILE_PCI_MEM_MAP_BASE_OFFSET, | ||
996 | j, | ||
997 | GXIO_TRIO_ORDER_MODE_UNORDERED); | ||
998 | if (ret < 0) { | ||
999 | pr_err("PCI: Mem-Map init failure on TRIO %d " | ||
1000 | "mac %d for MC %d, give up\n", | ||
1001 | controller->trio_index, | ||
1002 | controller->mac, j); | ||
1003 | |||
1004 | goto alloc_mem_map_failed; | ||
1005 | } | ||
1006 | continue; | ||
1007 | |||
1008 | alloc_mem_map_failed: | ||
1009 | break; | ||
1010 | } | ||
1011 | |||
1012 | } | ||
1013 | |||
1014 | return 0; | ||
1015 | } | ||
1016 | subsys_initcall(pcibios_init); | ||
1017 | |||
1018 | /* Note: to be deleted after Linux 3.6 merge. */ | ||
1019 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
1020 | { | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * This can be called from the generic PCI layer, but doesn't need to | ||
1025 | * do anything. | ||
1026 | */ | ||
1027 | char __devinit *pcibios_setup(char *str) | ||
1028 | { | ||
1029 | if (!strcmp(str, "off")) { | ||
1030 | pci_probe = 0; | ||
1031 | return NULL; | ||
1032 | } | ||
1033 | return str; | ||
1034 | } | ||
1035 | |||
1036 | /* | ||
1037 | * This is called from the generic Linux layer. | ||
1038 | */ | ||
1039 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) | ||
1040 | { | ||
1041 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
1042 | } | ||
1043 | |||
1044 | /* | ||
1045 | * Enable memory address decoding, as appropriate, for the | ||
1046 | * device described by the 'dev' struct. The I/O decoding | ||
1047 | * is disabled, though the TILE-Gx supports I/O addressing. | ||
1048 | * | ||
1049 | * This is called from the generic PCI layer, and can be called | ||
1050 | * for bridges or endpoints. | ||
1051 | */ | ||
1052 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
1053 | { | ||
1054 | return pci_enable_resources(dev, mask); | ||
1055 | } | ||
1056 | |||
1057 | /* Called for each device after PCI setup is done. */ | ||
1058 | static void __init | ||
1059 | pcibios_fixup_final(struct pci_dev *pdev) | ||
1060 | { | ||
1061 | set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); | ||
1062 | set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); | ||
1063 | pdev->dev.archdata.max_direct_dma_addr = | ||
1064 | TILE_PCI_MAX_DIRECT_DMA_ADDRESS; | ||
1065 | } | ||
1066 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); | ||
1067 | |||
1068 | /* Map a PCI MMIO bus address into VA space. */ | ||
1069 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | ||
1070 | { | ||
1071 | struct pci_controller *controller = NULL; | ||
1072 | resource_size_t bar_start; | ||
1073 | resource_size_t bar_end; | ||
1074 | resource_size_t offset; | ||
1075 | resource_size_t start; | ||
1076 | resource_size_t end; | ||
1077 | int trio_fd; | ||
1078 | int i, j; | ||
1079 | |||
1080 | start = phys_addr; | ||
1081 | end = phys_addr + size - 1; | ||
1082 | |||
1083 | /* | ||
1084 | * In the following, each PCI controller's mem_resources[1] | ||
1085 | * represents its (non-prefetchable) PCI memory resource and | ||
1086 | * mem_resources[2] refers to its prefetchable PCI memory resource. | ||
1087 | * By searching phys_addr in each controller's mem_resources[], we can | ||
1088 | * determine the controller that should accept the PCI memory access. | ||
1089 | */ | ||
1090 | |||
1091 | for (i = 0; i < num_rc_controllers; i++) { | ||
1092 | /* | ||
1093 | * Skip controllers that are not properly initialized or | ||
1094 | * have down links. | ||
1095 | */ | ||
1096 | if (pci_controllers[i].root_bus == NULL) | ||
1097 | continue; | ||
1098 | |||
1099 | for (j = 1; j < 3; j++) { | ||
1100 | bar_start = | ||
1101 | pci_controllers[i].mem_resources[j].start; | ||
1102 | bar_end = | ||
1103 | pci_controllers[i].mem_resources[j].end; | ||
1104 | |||
1105 | if ((start >= bar_start) && (end <= bar_end)) { | ||
1106 | |||
1107 | controller = &pci_controllers[i]; | ||
1108 | |||
1109 | goto got_it; | ||
1110 | } | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | if (controller == NULL) | ||
1115 | return NULL; | ||
1116 | |||
1117 | got_it: | ||
1118 | trio_fd = controller->trio->fd; | ||
1119 | |||
1120 | /* Convert the resource start to the bus address offset. */ | ||
1121 | start = phys_addr - controller->mem_offset; | ||
1122 | |||
1123 | offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start; | ||
1124 | |||
1125 | /* | ||
1126 | * We need to keep the PCI bus address's in-page offset in the VA. | ||
1127 | */ | ||
1128 | return iorpc_ioremap(trio_fd, offset, size) + | ||
1129 | (phys_addr & (PAGE_SIZE - 1)); | ||
1130 | } | ||
1131 | EXPORT_SYMBOL(ioremap); | ||
1132 | |||
1133 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | ||
1134 | { | ||
1135 | iounmap(addr); | ||
1136 | } | ||
1137 | EXPORT_SYMBOL(pci_iounmap); | ||
1138 | |||
1139 | /**************************************************************** | ||
1140 | * | ||
1141 | * Tile PCI config space read/write routines | ||
1142 | * | ||
1143 | ****************************************************************/ | ||
1144 | |||
1145 | /* | ||
1146 | * These are the normal read and write ops | ||
1147 | * These are expanded with macros from pci_bus_read_config_byte() etc. | ||
1148 | * | ||
1149 | * devfn is the combined PCI device & function. | ||
1150 | * | ||
1151 | * offset is in bytes, from the start of config space for the | ||
1152 | * specified bus & device. | ||
1153 | */ | ||
1154 | |||
1155 | static int __devinit tile_cfg_read(struct pci_bus *bus, | ||
1156 | unsigned int devfn, | ||
1157 | int offset, | ||
1158 | int size, | ||
1159 | u32 *val) | ||
1160 | { | ||
1161 | struct pci_controller *controller = bus->sysdata; | ||
1162 | gxio_trio_context_t *trio_context = controller->trio; | ||
1163 | int busnum = bus->number & 0xff; | ||
1164 | int device = PCI_SLOT(devfn); | ||
1165 | int function = PCI_FUNC(devfn); | ||
1166 | int config_type = 1; | ||
1167 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; | ||
1168 | void *mmio_addr; | ||
1169 | |||
1170 | /* | ||
1171 | * Map all accesses to the local device on root bus into the | ||
1172 | * MMIO space of the MAC. Accesses to the downstream devices | ||
1173 | * go to the PIO space. | ||
1174 | */ | ||
1175 | if (pci_is_root_bus(bus)) { | ||
1176 | if (device == 0) { | ||
1177 | /* | ||
1178 | * This is the internal downstream P2P bridge, | ||
1179 | * access directly. | ||
1180 | */ | ||
1181 | unsigned int reg_offset; | ||
1182 | |||
1183 | reg_offset = ((offset & 0xFFF) << | ||
1184 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
1185 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED | ||
1186 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
1187 | (controller->mac << | ||
1188 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
1189 | |||
1190 | mmio_addr = trio_context->mmio_base_mac + reg_offset; | ||
1191 | |||
1192 | goto valid_device; | ||
1193 | |||
1194 | } else { | ||
1195 | /* | ||
1196 | * We fake an empty device for (device > 0), | ||
1197 | * since there is only one device on bus 0. | ||
1198 | */ | ||
1199 | goto invalid_device; | ||
1200 | } | ||
1201 | } | ||
1202 | |||
1203 | /* | ||
1204 | * Accesses to the directly attached device have to be | ||
1205 | * sent as type-0 configs. | ||
1206 | */ | ||
1207 | |||
1208 | if (busnum == (controller->first_busno + 1)) { | ||
1209 | /* | ||
1210 | * There is only one device off of our built-in P2P bridge. | ||
1211 | */ | ||
1212 | if (device != 0) | ||
1213 | goto invalid_device; | ||
1214 | |||
1215 | config_type = 0; | ||
1216 | } | ||
1217 | |||
1218 | cfg_addr.word = 0; | ||
1219 | cfg_addr.reg_addr = (offset & 0xFFF); | ||
1220 | cfg_addr.fn = function; | ||
1221 | cfg_addr.dev = device; | ||
1222 | cfg_addr.bus = busnum; | ||
1223 | cfg_addr.type = config_type; | ||
1224 | |||
1225 | /* | ||
1226 | * Note that we don't set the mac field in cfg_addr because the | ||
1227 | * mapping is per port. | ||
1228 | */ | ||
1229 | |||
1230 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + | ||
1231 | cfg_addr.word; | ||
1232 | |||
1233 | valid_device: | ||
1234 | |||
1235 | switch (size) { | ||
1236 | case 4: | ||
1237 | *val = __gxio_mmio_read32(mmio_addr); | ||
1238 | break; | ||
1239 | |||
1240 | case 2: | ||
1241 | *val = __gxio_mmio_read16(mmio_addr); | ||
1242 | break; | ||
1243 | |||
1244 | case 1: | ||
1245 | *val = __gxio_mmio_read8(mmio_addr); | ||
1246 | break; | ||
1247 | |||
1248 | default: | ||
1249 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
1250 | } | ||
1251 | |||
1252 | TRACE_CFG_RD(size, *val, busnum, device, function, offset); | ||
1253 | |||
1254 | return 0; | ||
1255 | |||
1256 | invalid_device: | ||
1257 | |||
1258 | switch (size) { | ||
1259 | case 4: | ||
1260 | *val = 0xFFFFFFFF; | ||
1261 | break; | ||
1262 | |||
1263 | case 2: | ||
1264 | *val = 0xFFFF; | ||
1265 | break; | ||
1266 | |||
1267 | case 1: | ||
1268 | *val = 0xFF; | ||
1269 | break; | ||
1270 | |||
1271 | default: | ||
1272 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
1273 | } | ||
1274 | |||
1275 | return 0; | ||
1276 | } | ||
1277 | |||
1278 | |||
1279 | /* | ||
1280 | * See tile_cfg_read() for relevent comments. | ||
1281 | * Note that "val" is the value to write, not a pointer to that value. | ||
1282 | */ | ||
1283 | static int __devinit tile_cfg_write(struct pci_bus *bus, | ||
1284 | unsigned int devfn, | ||
1285 | int offset, | ||
1286 | int size, | ||
1287 | u32 val) | ||
1288 | { | ||
1289 | struct pci_controller *controller = bus->sysdata; | ||
1290 | gxio_trio_context_t *trio_context = controller->trio; | ||
1291 | int busnum = bus->number & 0xff; | ||
1292 | int device = PCI_SLOT(devfn); | ||
1293 | int function = PCI_FUNC(devfn); | ||
1294 | int config_type = 1; | ||
1295 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; | ||
1296 | void *mmio_addr; | ||
1297 | u32 val_32 = (u32)val; | ||
1298 | u16 val_16 = (u16)val; | ||
1299 | u8 val_8 = (u8)val; | ||
1300 | |||
1301 | /* | ||
1302 | * Map all accesses to the local device on root bus into the | ||
1303 | * MMIO space of the MAC. Accesses to the downstream devices | ||
1304 | * go to the PIO space. | ||
1305 | */ | ||
1306 | if (pci_is_root_bus(bus)) { | ||
1307 | if (device == 0) { | ||
1308 | /* | ||
1309 | * This is the internal downstream P2P bridge, | ||
1310 | * access directly. | ||
1311 | */ | ||
1312 | unsigned int reg_offset; | ||
1313 | |||
1314 | reg_offset = ((offset & 0xFFF) << | ||
1315 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | ||
1316 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED | ||
1317 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | ||
1318 | (controller->mac << | ||
1319 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | ||
1320 | |||
1321 | mmio_addr = trio_context->mmio_base_mac + reg_offset; | ||
1322 | |||
1323 | goto valid_device; | ||
1324 | |||
1325 | } else { | ||
1326 | /* | ||
1327 | * We fake an empty device for (device > 0), | ||
1328 | * since there is only one device on bus 0. | ||
1329 | */ | ||
1330 | goto invalid_device; | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | /* | ||
1335 | * Accesses to the directly attached device have to be | ||
1336 | * sent as type-0 configs. | ||
1337 | */ | ||
1338 | |||
1339 | if (busnum == (controller->first_busno + 1)) { | ||
1340 | /* | ||
1341 | * There is only one device off of our built-in P2P bridge. | ||
1342 | */ | ||
1343 | if (device != 0) | ||
1344 | goto invalid_device; | ||
1345 | |||
1346 | config_type = 0; | ||
1347 | } | ||
1348 | |||
1349 | cfg_addr.word = 0; | ||
1350 | cfg_addr.reg_addr = (offset & 0xFFF); | ||
1351 | cfg_addr.fn = function; | ||
1352 | cfg_addr.dev = device; | ||
1353 | cfg_addr.bus = busnum; | ||
1354 | cfg_addr.type = config_type; | ||
1355 | |||
1356 | /* | ||
1357 | * Note that we don't set the mac field in cfg_addr because the | ||
1358 | * mapping is per port. | ||
1359 | */ | ||
1360 | |||
1361 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + | ||
1362 | cfg_addr.word; | ||
1363 | |||
1364 | valid_device: | ||
1365 | |||
1366 | switch (size) { | ||
1367 | case 4: | ||
1368 | __gxio_mmio_write32(mmio_addr, val_32); | ||
1369 | TRACE_CFG_WR(size, val_32, busnum, device, function, offset); | ||
1370 | break; | ||
1371 | |||
1372 | case 2: | ||
1373 | __gxio_mmio_write16(mmio_addr, val_16); | ||
1374 | TRACE_CFG_WR(size, val_16, busnum, device, function, offset); | ||
1375 | break; | ||
1376 | |||
1377 | case 1: | ||
1378 | __gxio_mmio_write8(mmio_addr, val_8); | ||
1379 | TRACE_CFG_WR(size, val_8, busnum, device, function, offset); | ||
1380 | break; | ||
1381 | |||
1382 | default: | ||
1383 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
1384 | } | ||
1385 | |||
1386 | invalid_device: | ||
1387 | |||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | |||
1392 | static struct pci_ops tile_cfg_ops = { | ||
1393 | .read = tile_cfg_read, | ||
1394 | .write = tile_cfg_write, | ||
1395 | }; | ||
1396 | |||
1397 | |||
1398 | /* | ||
1399 | * MSI support starts here. | ||
1400 | */ | ||
1401 | static unsigned int | ||
1402 | tilegx_msi_startup(struct irq_data *d) | ||
1403 | { | ||
1404 | if (d->msi_desc) | ||
1405 | unmask_msi_irq(d); | ||
1406 | |||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | static void | ||
1411 | tilegx_msi_ack(struct irq_data *d) | ||
1412 | { | ||
1413 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); | ||
1414 | } | ||
1415 | |||
1416 | static void | ||
1417 | tilegx_msi_mask(struct irq_data *d) | ||
1418 | { | ||
1419 | mask_msi_irq(d); | ||
1420 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); | ||
1421 | } | ||
1422 | |||
1423 | static void | ||
1424 | tilegx_msi_unmask(struct irq_data *d) | ||
1425 | { | ||
1426 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); | ||
1427 | unmask_msi_irq(d); | ||
1428 | } | ||
1429 | |||
1430 | static struct irq_chip tilegx_msi_chip = { | ||
1431 | .name = "tilegx_msi", | ||
1432 | .irq_startup = tilegx_msi_startup, | ||
1433 | .irq_ack = tilegx_msi_ack, | ||
1434 | .irq_mask = tilegx_msi_mask, | ||
1435 | .irq_unmask = tilegx_msi_unmask, | ||
1436 | |||
1437 | /* TBD: support set_affinity. */ | ||
1438 | }; | ||
1439 | |||
1440 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | ||
1441 | { | ||
1442 | struct pci_controller *controller; | ||
1443 | gxio_trio_context_t *trio_context; | ||
1444 | struct msi_msg msg; | ||
1445 | int default_irq; | ||
1446 | uint64_t mem_map_base; | ||
1447 | uint64_t mem_map_limit; | ||
1448 | u64 msi_addr; | ||
1449 | int mem_map; | ||
1450 | int cpu; | ||
1451 | int irq; | ||
1452 | int ret; | ||
1453 | |||
1454 | irq = create_irq(); | ||
1455 | if (irq < 0) | ||
1456 | return irq; | ||
1457 | |||
1458 | /* | ||
1459 | * Since we use a 64-bit Mem-Map to accept the MSI write, we fail | ||
1460 | * devices that are not capable of generating a 64-bit message address. | ||
1461 | * These devices will fall back to using the legacy interrupts. | ||
1462 | * Most PCIe endpoint devices do support 64-bit message addressing. | ||
1463 | */ | ||
1464 | if (desc->msi_attrib.is_64 == 0) { | ||
1465 | dev_printk(KERN_INFO, &pdev->dev, | ||
1466 | "64-bit MSI message address not supported, " | ||
1467 | "falling back to legacy interrupts.\n"); | ||
1468 | |||
1469 | ret = -ENOMEM; | ||
1470 | goto is_64_failure; | ||
1471 | } | ||
1472 | |||
1473 | default_irq = desc->msi_attrib.default_irq; | ||
1474 | controller = irq_get_handler_data(default_irq); | ||
1475 | |||
1476 | BUG_ON(!controller); | ||
1477 | |||
1478 | trio_context = controller->trio; | ||
1479 | |||
1480 | /* | ||
1481 | * Allocate the Mem-Map that will accept the MSI write and | ||
1482 | * trigger the TILE-side interrupts. | ||
1483 | */ | ||
1484 | mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); | ||
1485 | if (mem_map < 0) { | ||
1486 | dev_printk(KERN_INFO, &pdev->dev, | ||
1487 | "%s Mem-Map alloc failure. " | ||
1488 | "Failed to initialize MSI interrupts. " | ||
1489 | "Falling back to legacy interrupts.\n", | ||
1490 | desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); | ||
1491 | |||
1492 | ret = -ENOMEM; | ||
1493 | goto msi_mem_map_alloc_failure; | ||
1494 | } | ||
1495 | |||
1496 | /* We try to distribute different IRQs to different tiles. */ | ||
1497 | cpu = tile_irq_cpu(irq); | ||
1498 | |||
1499 | /* | ||
1500 | * Now call up to the HV to configure the Mem-Map interrupt and | ||
1501 | * set up the IPI binding. | ||
1502 | */ | ||
1503 | mem_map_base = MEM_MAP_INTR_REGIONS_BASE + | ||
1504 | mem_map * MEM_MAP_INTR_REGION_SIZE; | ||
1505 | mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; | ||
1506 | |||
1507 | ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu), | ||
1508 | KERNEL_PL, irq, controller->mac, | ||
1509 | mem_map, mem_map_base, mem_map_limit, | ||
1510 | trio_context->asid); | ||
1511 | if (ret < 0) { | ||
1512 | dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); | ||
1513 | |||
1514 | goto hv_msi_config_failure; | ||
1515 | } | ||
1516 | |||
1517 | irq_set_msi_desc(irq, desc); | ||
1518 | |||
1519 | msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0; | ||
1520 | |||
1521 | msg.address_hi = msi_addr >> 32; | ||
1522 | msg.address_lo = msi_addr & 0xffffffff; | ||
1523 | |||
1524 | msg.data = mem_map; | ||
1525 | |||
1526 | write_msi_msg(irq, &msg); | ||
1527 | irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); | ||
1528 | irq_set_handler_data(irq, controller); | ||
1529 | |||
1530 | return 0; | ||
1531 | |||
1532 | hv_msi_config_failure: | ||
1533 | /* Free mem-map */ | ||
1534 | msi_mem_map_alloc_failure: | ||
1535 | is_64_failure: | ||
1536 | destroy_irq(irq); | ||
1537 | return ret; | ||
1538 | } | ||
1539 | |||
1540 | void arch_teardown_msi_irq(unsigned int irq) | ||
1541 | { | ||
1542 | destroy_irq(irq); | ||
1543 | } | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index dd87f3420390..6a649a4462d3 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/kexec.h> | 24 | #include <linux/kexec.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/swiotlb.h> | ||
26 | #include <linux/initrd.h> | 27 | #include <linux/initrd.h> |
27 | #include <linux/io.h> | 28 | #include <linux/io.h> |
28 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
@@ -109,7 +110,7 @@ static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | |||
109 | }; | 110 | }; |
110 | static nodemask_t __initdata isolnodes; | 111 | static nodemask_t __initdata isolnodes; |
111 | 112 | ||
112 | #ifdef CONFIG_PCI | 113 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
113 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; | 114 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; |
114 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | 115 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; |
115 | unsigned long __initdata pci_reserve_start_pfn = -1U; | 116 | unsigned long __initdata pci_reserve_start_pfn = -1U; |
@@ -160,7 +161,7 @@ static int __init setup_isolnodes(char *str) | |||
160 | } | 161 | } |
161 | early_param("isolnodes", setup_isolnodes); | 162 | early_param("isolnodes", setup_isolnodes); |
162 | 163 | ||
163 | #ifdef CONFIG_PCI | 164 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
164 | static int __init setup_pci_reserve(char* str) | 165 | static int __init setup_pci_reserve(char* str) |
165 | { | 166 | { |
166 | unsigned long mb; | 167 | unsigned long mb; |
@@ -171,7 +172,7 @@ static int __init setup_pci_reserve(char* str) | |||
171 | 172 | ||
172 | pci_reserve_mb = mb; | 173 | pci_reserve_mb = mb; |
173 | pr_info("Reserving %dMB for PCIE root complex mappings\n", | 174 | pr_info("Reserving %dMB for PCIE root complex mappings\n", |
174 | pci_reserve_mb); | 175 | pci_reserve_mb); |
175 | return 0; | 176 | return 0; |
176 | } | 177 | } |
177 | early_param("pci_reserve", setup_pci_reserve); | 178 | early_param("pci_reserve", setup_pci_reserve); |
@@ -411,7 +412,7 @@ static void __init setup_memory(void) | |||
411 | continue; | 412 | continue; |
412 | } | 413 | } |
413 | #endif | 414 | #endif |
414 | #ifdef CONFIG_PCI | 415 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
415 | /* | 416 | /* |
416 | * Blocks that overlap the pci reserved region must | 417 | * Blocks that overlap the pci reserved region must |
417 | * have enough space to hold the maximum percpu data | 418 | * have enough space to hold the maximum percpu data |
@@ -604,11 +605,9 @@ static void __init setup_bootmem_allocator_node(int i) | |||
604 | /* Free all the space back into the allocator. */ | 605 | /* Free all the space back into the allocator. */ |
605 | free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start)); | 606 | free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start)); |
606 | 607 | ||
607 | #if defined(CONFIG_PCI) | 608 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
608 | /* | 609 | /* |
609 | * Throw away any memory aliased by the PCI region. FIXME: this | 610 | * Throw away any memory aliased by the PCI region. |
610 | * is a temporary hack to work around bug 10502, and needs to be | ||
611 | * fixed properly. | ||
612 | */ | 611 | */ |
613 | if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) | 612 | if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) |
614 | reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), | 613 | reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), |
@@ -658,6 +657,8 @@ static void __init zone_sizes_init(void) | |||
658 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | 657 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; |
659 | int size = percpu_size(); | 658 | int size = percpu_size(); |
660 | int num_cpus = smp_height * smp_width; | 659 | int num_cpus = smp_height * smp_width; |
660 | const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT)); | ||
661 | |||
661 | int i; | 662 | int i; |
662 | 663 | ||
663 | for (i = 0; i < num_cpus; ++i) | 664 | for (i = 0; i < num_cpus; ++i) |
@@ -729,6 +730,14 @@ static void __init zone_sizes_init(void) | |||
729 | zones_size[ZONE_NORMAL] = end - start; | 730 | zones_size[ZONE_NORMAL] = end - start; |
730 | #endif | 731 | #endif |
731 | 732 | ||
733 | if (start < dma_end) { | ||
734 | zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL], | ||
735 | dma_end - start); | ||
736 | zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA]; | ||
737 | } else { | ||
738 | zones_size[ZONE_DMA] = 0; | ||
739 | } | ||
740 | |||
732 | /* Take zone metadata from controller 0 if we're isolnode. */ | 741 | /* Take zone metadata from controller 0 if we're isolnode. */ |
733 | if (node_isset(i, isolnodes)) | 742 | if (node_isset(i, isolnodes)) |
734 | NODE_DATA(i)->bdata = &bootmem_node_data[0]; | 743 | NODE_DATA(i)->bdata = &bootmem_node_data[0]; |
@@ -738,7 +747,7 @@ static void __init zone_sizes_init(void) | |||
738 | PFN_UP(node_percpu[i])); | 747 | PFN_UP(node_percpu[i])); |
739 | 748 | ||
740 | /* Track the type of memory on each node */ | 749 | /* Track the type of memory on each node */ |
741 | if (zones_size[ZONE_NORMAL]) | 750 | if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA]) |
742 | node_set_state(i, N_NORMAL_MEMORY); | 751 | node_set_state(i, N_NORMAL_MEMORY); |
743 | #ifdef CONFIG_HIGHMEM | 752 | #ifdef CONFIG_HIGHMEM |
744 | if (end != start) | 753 | if (end != start) |
@@ -1343,7 +1352,7 @@ void __init setup_arch(char **cmdline_p) | |||
1343 | setup_cpu_maps(); | 1352 | setup_cpu_maps(); |
1344 | 1353 | ||
1345 | 1354 | ||
1346 | #ifdef CONFIG_PCI | 1355 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
1347 | /* | 1356 | /* |
1348 | * Initialize the PCI structures. This is done before memory | 1357 | * Initialize the PCI structures. This is done before memory |
1349 | * setup so that we know whether or not a pci_reserve region | 1358 | * setup so that we know whether or not a pci_reserve region |
@@ -1372,6 +1381,10 @@ void __init setup_arch(char **cmdline_p) | |||
1372 | * any memory using the bootmem allocator. | 1381 | * any memory using the bootmem allocator. |
1373 | */ | 1382 | */ |
1374 | 1383 | ||
1384 | #ifdef CONFIG_SWIOTLB | ||
1385 | swiotlb_init(0); | ||
1386 | #endif | ||
1387 | |||
1375 | paging_init(); | 1388 | paging_init(); |
1376 | setup_numa_mapping(); | 1389 | setup_numa_mapping(); |
1377 | zone_sizes_init(); | 1390 | zone_sizes_init(); |
@@ -1522,11 +1535,10 @@ static struct resource code_resource = { | |||
1522 | }; | 1535 | }; |
1523 | 1536 | ||
1524 | /* | 1537 | /* |
1525 | * We reserve all resources above 4GB so that PCI won't try to put | 1538 | * On Pro, we reserve all resources above 4GB so that PCI won't try to put |
1526 | * mappings above 4GB; the standard allows that for some devices but | 1539 | * mappings above 4GB. |
1527 | * the probing code trunates values to 32 bits. | ||
1528 | */ | 1540 | */ |
1529 | #ifdef CONFIG_PCI | 1541 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
1530 | static struct resource* __init | 1542 | static struct resource* __init |
1531 | insert_non_bus_resource(void) | 1543 | insert_non_bus_resource(void) |
1532 | { | 1544 | { |
@@ -1571,8 +1583,7 @@ static int __init request_standard_resources(void) | |||
1571 | int i; | 1583 | int i; |
1572 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | 1584 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; |
1573 | 1585 | ||
1574 | iomem_resource.end = -1LL; | 1586 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
1575 | #ifdef CONFIG_PCI | ||
1576 | insert_non_bus_resource(); | 1587 | insert_non_bus_resource(); |
1577 | #endif | 1588 | #endif |
1578 | 1589 | ||
@@ -1580,7 +1591,7 @@ static int __init request_standard_resources(void) | |||
1580 | u64 start_pfn = node_start_pfn[i]; | 1591 | u64 start_pfn = node_start_pfn[i]; |
1581 | u64 end_pfn = node_end_pfn[i]; | 1592 | u64 end_pfn = node_end_pfn[i]; |
1582 | 1593 | ||
1583 | #ifdef CONFIG_PCI | 1594 | #if defined(CONFIG_PCI) && !defined(__tilegx__) |
1584 | if (start_pfn <= pci_reserve_start_pfn && | 1595 | if (start_pfn <= pci_reserve_start_pfn && |
1585 | end_pfn > pci_reserve_start_pfn) { | 1596 | end_pfn > pci_reserve_start_pfn) { |
1586 | if (end_pfn > pci_reserve_end_pfn) | 1597 | if (end_pfn > pci_reserve_end_pfn) |
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c new file mode 100644 index 000000000000..5af8debc6a71 --- /dev/null +++ b/arch/tile/kernel/usb.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Register the Tile-Gx USB interfaces as platform devices. | ||
15 | * | ||
16 | * The actual USB driver is just some glue (in | ||
17 | * drivers/usb/host/[eo]hci-tilegx.c) which makes the registers available | ||
18 | * to the standard kernel EHCI and OHCI drivers. | ||
19 | */ | ||
20 | |||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/usb/tilegx.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | static u64 ehci_dmamask = DMA_BIT_MASK(32); | ||
27 | |||
28 | #define USB_HOST_DEF(unit, type, dmamask) \ | ||
29 | static struct \ | ||
30 | tilegx_usb_platform_data tilegx_usb_platform_data_ ## type ## \ | ||
31 | hci ## unit = { \ | ||
32 | .dev_index = unit, \ | ||
33 | }; \ | ||
34 | \ | ||
35 | static struct platform_device tilegx_usb_ ## type ## hci ## unit = { \ | ||
36 | .name = "tilegx-" #type "hci", \ | ||
37 | .id = unit, \ | ||
38 | .dev = { \ | ||
39 | .dma_mask = dmamask, \ | ||
40 | .coherent_dma_mask = DMA_BIT_MASK(32), \ | ||
41 | .platform_data = \ | ||
42 | &tilegx_usb_platform_data_ ## type ## hci ## \ | ||
43 | unit, \ | ||
44 | }, \ | ||
45 | }; | ||
46 | |||
47 | USB_HOST_DEF(0, e, &ehci_dmamask) | ||
48 | USB_HOST_DEF(0, o, NULL) | ||
49 | USB_HOST_DEF(1, e, &ehci_dmamask) | ||
50 | USB_HOST_DEF(1, o, NULL) | ||
51 | |||
52 | #undef USB_HOST_DEF | ||
53 | |||
54 | static struct platform_device *tilegx_usb_devices[] __initdata = { | ||
55 | &tilegx_usb_ehci0, | ||
56 | &tilegx_usb_ehci1, | ||
57 | &tilegx_usb_ohci0, | ||
58 | &tilegx_usb_ohci1, | ||
59 | }; | ||
60 | |||
61 | /** Add our set of possible USB devices. */ | ||
62 | static int __init tilegx_usb_init(void) | ||
63 | { | ||
64 | platform_add_devices(tilegx_usb_devices, | ||
65 | ARRAY_SIZE(tilegx_usb_devices)); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | arch_initcall(tilegx_usb_init); | ||
diff --git a/arch/tile/lib/checksum.c b/arch/tile/lib/checksum.c index e4bab5bd3f31..c3ca3e64d9d9 100644 --- a/arch/tile/lib/checksum.c +++ b/arch/tile/lib/checksum.c | |||
@@ -16,19 +16,6 @@ | |||
16 | #include <net/checksum.h> | 16 | #include <net/checksum.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | static inline unsigned int longto16(unsigned long x) | ||
20 | { | ||
21 | unsigned long ret; | ||
22 | #ifdef __tilegx__ | ||
23 | ret = __insn_v2sadu(x, 0); | ||
24 | ret = __insn_v2sadu(ret, 0); | ||
25 | #else | ||
26 | ret = __insn_sadh_u(x, 0); | ||
27 | ret = __insn_sadh_u(ret, 0); | ||
28 | #endif | ||
29 | return ret; | ||
30 | } | ||
31 | |||
32 | __wsum do_csum(const unsigned char *buff, int len) | 19 | __wsum do_csum(const unsigned char *buff, int len) |
33 | { | 20 | { |
34 | int odd, count; | 21 | int odd, count; |
@@ -94,7 +81,7 @@ __wsum do_csum(const unsigned char *buff, int len) | |||
94 | } | 81 | } |
95 | if (len & 1) | 82 | if (len & 1) |
96 | result += *buff; | 83 | result += *buff; |
97 | result = longto16(result); | 84 | result = csum_long(result); |
98 | if (odd) | 85 | if (odd) |
99 | result = swab16(result); | 86 | result = swab16(result); |
100 | out: | 87 | out: |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index dbcbdf7b8aa8..5f7868dcd6d4 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -64,10 +64,6 @@ early_param("noallocl2", set_noallocl2); | |||
64 | 64 | ||
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | ||
68 | #define mark_caches_evicted_start() 0 | ||
69 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | ||
70 | |||
71 | 67 | ||
72 | /* | 68 | /* |
73 | * Update the irq_stat for cpus that we are going to interrupt | 69 | * Update the irq_stat for cpus that we are going to interrupt |
@@ -107,7 +103,6 @@ static void hv_flush_update(const struct cpumask *cache_cpumask, | |||
107 | * there's never any good reason for hv_flush_remote() to fail. | 103 | * there's never any good reason for hv_flush_remote() to fail. |
108 | * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally | 104 | * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally |
109 | * is the type that Linux wants to pass around anyway. | 105 | * is the type that Linux wants to pass around anyway. |
110 | * - Centralizes the mark_caches_evicted() handling. | ||
111 | * - Canonicalizes that lengths of zero make cpumasks NULL. | 106 | * - Canonicalizes that lengths of zero make cpumasks NULL. |
112 | * - Handles deferring TLB flushes for dataplane tiles. | 107 | * - Handles deferring TLB flushes for dataplane tiles. |
113 | * - Tracks remote interrupts in the per-cpu irq_cpustat_t. | 108 | * - Tracks remote interrupts in the per-cpu irq_cpustat_t. |
@@ -126,7 +121,6 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
126 | HV_Remote_ASID *asids, int asidcount) | 121 | HV_Remote_ASID *asids, int asidcount) |
127 | { | 122 | { |
128 | int rc; | 123 | int rc; |
129 | int timestamp = 0; /* happy compiler */ | ||
130 | struct cpumask cache_cpumask_copy, tlb_cpumask_copy; | 124 | struct cpumask cache_cpumask_copy, tlb_cpumask_copy; |
131 | struct cpumask *cache_cpumask, *tlb_cpumask; | 125 | struct cpumask *cache_cpumask, *tlb_cpumask; |
132 | HV_PhysAddr cache_pa; | 126 | HV_PhysAddr cache_pa; |
@@ -157,15 +151,11 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
157 | hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, | 151 | hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, |
158 | asids, asidcount); | 152 | asids, asidcount); |
159 | cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; | 153 | cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; |
160 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
161 | timestamp = mark_caches_evicted_start(); | ||
162 | rc = hv_flush_remote(cache_pa, cache_control, | 154 | rc = hv_flush_remote(cache_pa, cache_control, |
163 | cpumask_bits(cache_cpumask), | 155 | cpumask_bits(cache_cpumask), |
164 | tlb_va, tlb_length, tlb_pgsize, | 156 | tlb_va, tlb_length, tlb_pgsize, |
165 | cpumask_bits(tlb_cpumask), | 157 | cpumask_bits(tlb_cpumask), |
166 | asids, asidcount); | 158 | asids, asidcount); |
167 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
168 | mark_caches_evicted_finish(cache_cpumask, timestamp); | ||
169 | if (rc == 0) | 159 | if (rc == 0) |
170 | return; | 160 | return; |
171 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | 161 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); |
@@ -180,85 +170,86 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
180 | panic("Unsafe to continue."); | 170 | panic("Unsafe to continue."); |
181 | } | 171 | } |
182 | 172 | ||
183 | void flush_remote_page(struct page *page, int order) | 173 | static void homecache_finv_page_va(void* va, int home) |
184 | { | 174 | { |
185 | int i, pages = (1 << order); | 175 | if (home == smp_processor_id()) { |
186 | for (i = 0; i < pages; ++i, ++page) { | 176 | finv_buffer_local(va, PAGE_SIZE); |
187 | void *p = kmap_atomic(page); | 177 | } else if (home == PAGE_HOME_HASH) { |
188 | int hfh = 0; | 178 | finv_buffer_remote(va, PAGE_SIZE, 1); |
189 | int home = page_home(page); | 179 | } else { |
190 | #if CHIP_HAS_CBOX_HOME_MAP() | 180 | BUG_ON(home < 0 || home >= NR_CPUS); |
191 | if (home == PAGE_HOME_HASH) | 181 | finv_buffer_remote(va, PAGE_SIZE, 0); |
192 | hfh = 1; | ||
193 | else | ||
194 | #endif | ||
195 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
196 | finv_buffer_remote(p, PAGE_SIZE, hfh); | ||
197 | kunmap_atomic(p); | ||
198 | } | 182 | } |
199 | } | 183 | } |
200 | 184 | ||
201 | void homecache_evict(const struct cpumask *mask) | 185 | void homecache_finv_map_page(struct page *page, int home) |
202 | { | 186 | { |
203 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | 187 | unsigned long flags; |
188 | unsigned long va; | ||
189 | pte_t *ptep; | ||
190 | pte_t pte; | ||
191 | |||
192 | if (home == PAGE_HOME_UNCACHED) | ||
193 | return; | ||
194 | local_irq_save(flags); | ||
195 | #ifdef CONFIG_HIGHMEM | ||
196 | va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() + | ||
197 | (KM_TYPE_NR * smp_processor_id())); | ||
198 | #else | ||
199 | va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); | ||
200 | #endif | ||
201 | ptep = virt_to_pte(NULL, (unsigned long)va); | ||
202 | pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); | ||
203 | __set_pte(ptep, pte_set_home(pte, home)); | ||
204 | homecache_finv_page_va((void *)va, home); | ||
205 | __pte_clear(ptep); | ||
206 | hv_flush_page(va, PAGE_SIZE); | ||
207 | #ifdef CONFIG_HIGHMEM | ||
208 | kmap_atomic_idx_pop(); | ||
209 | #endif | ||
210 | local_irq_restore(flags); | ||
204 | } | 211 | } |
205 | 212 | ||
206 | /* | 213 | static void homecache_finv_page_home(struct page *page, int home) |
207 | * Return a mask of the cpus whose caches currently own these pages. | ||
208 | * The return value is whether the pages are all coherently cached | ||
209 | * (i.e. none are immutable, incoherent, or uncached). | ||
210 | */ | ||
211 | static int homecache_mask(struct page *page, int pages, | ||
212 | struct cpumask *home_mask) | ||
213 | { | 214 | { |
214 | int i; | 215 | if (!PageHighMem(page) && home == page_home(page)) |
215 | int cached_coherently = 1; | 216 | homecache_finv_page_va(page_address(page), home); |
216 | cpumask_clear(home_mask); | 217 | else |
217 | for (i = 0; i < pages; ++i) { | 218 | homecache_finv_map_page(page, home); |
218 | int home = page_home(&page[i]); | ||
219 | if (home == PAGE_HOME_IMMUTABLE || | ||
220 | home == PAGE_HOME_INCOHERENT) { | ||
221 | cpumask_copy(home_mask, cpu_possible_mask); | ||
222 | return 0; | ||
223 | } | ||
224 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
225 | if (home == PAGE_HOME_HASH) { | ||
226 | cpumask_or(home_mask, home_mask, &hash_for_home_map); | ||
227 | continue; | ||
228 | } | ||
229 | #endif | ||
230 | if (home == PAGE_HOME_UNCACHED) { | ||
231 | cached_coherently = 0; | ||
232 | continue; | ||
233 | } | ||
234 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
235 | cpumask_set_cpu(home, home_mask); | ||
236 | } | ||
237 | return cached_coherently; | ||
238 | } | 219 | } |
239 | 220 | ||
240 | /* | 221 | static inline bool incoherent_home(int home) |
241 | * Return the passed length, or zero if it's long enough that we | ||
242 | * believe we should evict the whole L2 cache. | ||
243 | */ | ||
244 | static unsigned long cache_flush_length(unsigned long length) | ||
245 | { | 222 | { |
246 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | 223 | return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT; |
247 | } | 224 | } |
248 | 225 | ||
249 | /* Flush a page out of whatever cache(s) it is in. */ | 226 | static void homecache_finv_page_internal(struct page *page, int force_map) |
250 | void homecache_flush_cache(struct page *page, int order) | ||
251 | { | 227 | { |
252 | int pages = 1 << order; | 228 | int home = page_home(page); |
253 | int length = cache_flush_length(pages * PAGE_SIZE); | 229 | if (home == PAGE_HOME_UNCACHED) |
254 | unsigned long pfn = page_to_pfn(page); | 230 | return; |
255 | struct cpumask home_mask; | 231 | if (incoherent_home(home)) { |
256 | 232 | int cpu; | |
257 | homecache_mask(page, pages, &home_mask); | 233 | for_each_cpu(cpu, &cpu_cacheable_map) |
258 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | 234 | homecache_finv_map_page(page, cpu); |
259 | sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); | 235 | } else if (force_map) { |
236 | /* Force if, e.g., the normal mapping is migrating. */ | ||
237 | homecache_finv_map_page(page, home); | ||
238 | } else { | ||
239 | homecache_finv_page_home(page, home); | ||
240 | } | ||
241 | sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); | ||
260 | } | 242 | } |
261 | 243 | ||
244 | void homecache_finv_page(struct page *page) | ||
245 | { | ||
246 | homecache_finv_page_internal(page, 0); | ||
247 | } | ||
248 | |||
249 | void homecache_evict(const struct cpumask *mask) | ||
250 | { | ||
251 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | ||
252 | } | ||
262 | 253 | ||
263 | /* Report the home corresponding to a given PTE. */ | 254 | /* Report the home corresponding to a given PTE. */ |
264 | static int pte_to_home(pte_t pte) | 255 | static int pte_to_home(pte_t pte) |
@@ -441,15 +432,8 @@ struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | |||
441 | return page; | 432 | return page; |
442 | } | 433 | } |
443 | 434 | ||
444 | void homecache_free_pages(unsigned long addr, unsigned int order) | 435 | void __homecache_free_pages(struct page *page, unsigned int order) |
445 | { | 436 | { |
446 | struct page *page; | ||
447 | |||
448 | if (addr == 0) | ||
449 | return; | ||
450 | |||
451 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||
452 | page = virt_to_page((void *)addr); | ||
453 | if (put_page_testzero(page)) { | 437 | if (put_page_testzero(page)) { |
454 | homecache_change_page_home(page, order, initial_page_home()); | 438 | homecache_change_page_home(page, order, initial_page_home()); |
455 | if (order == 0) { | 439 | if (order == 0) { |
@@ -460,3 +444,13 @@ void homecache_free_pages(unsigned long addr, unsigned int order) | |||
460 | } | 444 | } |
461 | } | 445 | } |
462 | } | 446 | } |
447 | EXPORT_SYMBOL(__homecache_free_pages); | ||
448 | |||
449 | void homecache_free_pages(unsigned long addr, unsigned int order) | ||
450 | { | ||
451 | if (addr != 0) { | ||
452 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||
453 | __homecache_free_pages(virt_to_page((void *)addr), order); | ||
454 | } | ||
455 | } | ||
456 | EXPORT_SYMBOL(homecache_free_pages); | ||
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 630dd2ce2afe..ef29d6c5e10e 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -150,7 +150,21 @@ void __init shatter_pmd(pmd_t *pmd) | |||
150 | assign_pte(pmd, pte); | 150 | assign_pte(pmd, pte); |
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_HIGHMEM | 153 | #ifdef __tilegx__ |
154 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
155 | { | ||
156 | pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||
157 | if (pud_none(*pud)) | ||
158 | assign_pmd(pud, alloc_pmd()); | ||
159 | return pmd_offset(pud, va); | ||
160 | } | ||
161 | #else | ||
162 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
163 | { | ||
164 | return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||
165 | } | ||
166 | #endif | ||
167 | |||
154 | /* | 168 | /* |
155 | * This function initializes a certain range of kernel virtual memory | 169 | * This function initializes a certain range of kernel virtual memory |
156 | * with new bootmem page tables, everywhere page tables are missing in | 170 | * with new bootmem page tables, everywhere page tables are missing in |
@@ -163,24 +177,17 @@ void __init shatter_pmd(pmd_t *pmd) | |||
163 | * checking the pgd every time. | 177 | * checking the pgd every time. |
164 | */ | 178 | */ |
165 | static void __init page_table_range_init(unsigned long start, | 179 | static void __init page_table_range_init(unsigned long start, |
166 | unsigned long end, pgd_t *pgd_base) | 180 | unsigned long end, pgd_t *pgd) |
167 | { | 181 | { |
168 | pgd_t *pgd; | ||
169 | int pgd_idx; | ||
170 | unsigned long vaddr; | 182 | unsigned long vaddr; |
171 | 183 | start = round_down(start, PMD_SIZE); | |
172 | vaddr = start; | 184 | end = round_up(end, PMD_SIZE); |
173 | pgd_idx = pgd_index(vaddr); | 185 | for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) { |
174 | pgd = pgd_base + pgd_idx; | 186 | pmd_t *pmd = get_pmd(pgd, vaddr); |
175 | |||
176 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | ||
177 | pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); | ||
178 | if (pmd_none(*pmd)) | 187 | if (pmd_none(*pmd)) |
179 | assign_pte(pmd, alloc_pte()); | 188 | assign_pte(pmd, alloc_pte()); |
180 | vaddr += PMD_SIZE; | ||
181 | } | 189 | } |
182 | } | 190 | } |
183 | #endif /* CONFIG_HIGHMEM */ | ||
184 | 191 | ||
185 | 192 | ||
186 | #if CHIP_HAS_CBOX_HOME_MAP() | 193 | #if CHIP_HAS_CBOX_HOME_MAP() |
@@ -404,21 +411,6 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot) | |||
404 | return prot; | 411 | return prot; |
405 | } | 412 | } |
406 | 413 | ||
407 | #ifndef __tilegx__ | ||
408 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
409 | { | ||
410 | return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||
411 | } | ||
412 | #else | ||
413 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
414 | { | ||
415 | pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||
416 | if (pud_none(*pud)) | ||
417 | assign_pmd(pud, alloc_pmd()); | ||
418 | return pmd_offset(pud, va); | ||
419 | } | ||
420 | #endif | ||
421 | |||
422 | /* Temporary page table we use for staging. */ | 414 | /* Temporary page table we use for staging. */ |
423 | static pgd_t pgtables[PTRS_PER_PGD] | 415 | static pgd_t pgtables[PTRS_PER_PGD] |
424 | __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); | 416 | __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); |
@@ -741,16 +733,15 @@ static void __init set_non_bootmem_pages_init(void) | |||
741 | for_each_zone(z) { | 733 | for_each_zone(z) { |
742 | unsigned long start, end; | 734 | unsigned long start, end; |
743 | int nid = z->zone_pgdat->node_id; | 735 | int nid = z->zone_pgdat->node_id; |
736 | #ifdef CONFIG_HIGHMEM | ||
744 | int idx = zone_idx(z); | 737 | int idx = zone_idx(z); |
738 | #endif | ||
745 | 739 | ||
746 | start = z->zone_start_pfn; | 740 | start = z->zone_start_pfn; |
747 | if (start == 0) | ||
748 | continue; /* bootmem */ | ||
749 | end = start + z->spanned_pages; | 741 | end = start + z->spanned_pages; |
750 | if (idx == ZONE_NORMAL) { | 742 | start = max(start, node_free_pfn[nid]); |
751 | BUG_ON(start != node_start_pfn[nid]); | 743 | start = max(start, max_low_pfn); |
752 | start = node_free_pfn[nid]; | 744 | |
753 | } | ||
754 | #ifdef CONFIG_HIGHMEM | 745 | #ifdef CONFIG_HIGHMEM |
755 | if (idx == ZONE_HIGHMEM) | 746 | if (idx == ZONE_HIGHMEM) |
756 | totalhigh_pages += z->spanned_pages; | 747 | totalhigh_pages += z->spanned_pages; |
@@ -779,9 +770,6 @@ static void __init set_non_bootmem_pages_init(void) | |||
779 | */ | 770 | */ |
780 | void __init paging_init(void) | 771 | void __init paging_init(void) |
781 | { | 772 | { |
782 | #ifdef CONFIG_HIGHMEM | ||
783 | unsigned long vaddr, end; | ||
784 | #endif | ||
785 | #ifdef __tilegx__ | 773 | #ifdef __tilegx__ |
786 | pud_t *pud; | 774 | pud_t *pud; |
787 | #endif | 775 | #endif |
@@ -789,14 +777,14 @@ void __init paging_init(void) | |||
789 | 777 | ||
790 | kernel_physical_mapping_init(pgd_base); | 778 | kernel_physical_mapping_init(pgd_base); |
791 | 779 | ||
792 | #ifdef CONFIG_HIGHMEM | ||
793 | /* | 780 | /* |
794 | * Fixed mappings, only the page table structure has to be | 781 | * Fixed mappings, only the page table structure has to be |
795 | * created - mappings will be set by set_fixmap(): | 782 | * created - mappings will be set by set_fixmap(): |
796 | */ | 783 | */ |
797 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 784 | page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1), |
798 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 785 | FIXADDR_TOP, pgd_base); |
799 | page_table_range_init(vaddr, end, pgd_base); | 786 | |
787 | #ifdef CONFIG_HIGHMEM | ||
800 | permanent_kmaps_init(pgd_base); | 788 | permanent_kmaps_init(pgd_base); |
801 | #endif | 789 | #endif |
802 | 790 | ||
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 345edfed9fcd..de0de0c0e8a1 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
@@ -575,13 +575,6 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | |||
575 | } | 575 | } |
576 | EXPORT_SYMBOL(ioremap_prot); | 576 | EXPORT_SYMBOL(ioremap_prot); |
577 | 577 | ||
578 | /* Map a PCI MMIO bus address into VA space. */ | ||
579 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | ||
580 | { | ||
581 | panic("ioremap for PCI MMIO is not supported"); | ||
582 | } | ||
583 | EXPORT_SYMBOL(ioremap); | ||
584 | |||
585 | /* Unmap an MMIO VA mapping. */ | 578 | /* Unmap an MMIO VA mapping. */ |
586 | void iounmap(volatile void __iomem *addr_in) | 579 | void iounmap(volatile void __iomem *addr_in) |
587 | { | 580 | { |