diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-04-04 16:58:27 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-07-11 16:04:54 -0400 |
commit | 6369798037c0e915fc3e3844083f2aeecb924c9d (patch) | |
tree | 35e01293dc57d820fc0c89f110e52d4fad3a703d /arch/tile | |
parent | 44e56967100f22a21abade38821018ba03d0a39f (diff) |
arch/tile: common DMA code for the GXIO IORPC subsystem
The dma_queue support is used by both the mPipe (networking)
and Trio (PCI) hardware shims on tilegx. This common code is
selected when either of those drivers is built.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/gxio/Kconfig | 6 | ||||
-rw-r--r-- | arch/tile/gxio/Makefile | 1 | ||||
-rw-r--r-- | arch/tile/gxio/dma_queue.c | 176 | ||||
-rw-r--r-- | arch/tile/include/gxio/dma_queue.h | 161 |
4 files changed, 344 insertions, 0 deletions
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig index 8eff47fe1236..ecd076c8cfd5 100644 --- a/arch/tile/gxio/Kconfig +++ b/arch/tile/gxio/Kconfig | |||
@@ -3,3 +3,9 @@ | |||
3 | config TILE_GXIO | 3 | config TILE_GXIO |
4 | bool | 4 | bool |
5 | depends on TILEGX | 5 | depends on TILEGX |
6 | |||
7 | # Support direct access to the common I/O DMA facility within the | ||
8 | # TILE-Gx mPIPE and Trio hardware from kernel space. | ||
9 | config TILE_GXIO_DMA | ||
10 | bool | ||
11 | select TILE_GXIO | ||
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile index db1ee2863d8e..97ab468fb8c5 100644 --- a/arch/tile/gxio/Makefile +++ b/arch/tile/gxio/Makefile | |||
@@ -3,3 +3,4 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o | 5 | obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o |
6 | obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o | ||
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c new file mode 100644 index 000000000000..baa60357f8ba --- /dev/null +++ b/arch/tile/gxio/dma_queue.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/atomic.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <gxio/dma_queue.h> | ||
19 | |||
20 | /* Wait for a memory read to complete. */ | ||
21 | #define wait_for_value(val) \ | ||
22 | __asm__ __volatile__("move %0, %0" :: "r"(val)) | ||
23 | |||
24 | /* The index is in the low 16. */ | ||
25 | #define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1) | ||
26 | |||
27 | /* | ||
28 | * The hardware descriptor-ring type. | ||
29 | * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t) | ||
30 | * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t). | ||
31 | * See those types for more documentation on the individual fields. | ||
32 | */ | ||
33 | typedef union { | ||
34 | struct { | ||
35 | #ifndef __BIG_ENDIAN__ | ||
36 | uint64_t ring_idx:16; | ||
37 | uint64_t count:16; | ||
38 | uint64_t gen:1; | ||
39 | uint64_t __reserved:31; | ||
40 | #else | ||
41 | uint64_t __reserved:31; | ||
42 | uint64_t gen:1; | ||
43 | uint64_t count:16; | ||
44 | uint64_t ring_idx:16; | ||
45 | #endif | ||
46 | }; | ||
47 | uint64_t word; | ||
48 | } __gxio_ring_t; | ||
49 | |||
50 | void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||
51 | void *post_region_addr, unsigned int num_entries) | ||
52 | { | ||
53 | /* | ||
54 | * Limit 65536 entry rings to 65535 credits because we only have a | ||
55 | * 16 bit completion counter. | ||
56 | */ | ||
57 | int64_t credits = (num_entries < 65536) ? num_entries : 65535; | ||
58 | |||
59 | memset(dma_queue, 0, sizeof(*dma_queue)); | ||
60 | |||
61 | dma_queue->post_region_addr = post_region_addr; | ||
62 | dma_queue->hw_complete_count = 0; | ||
63 | dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT; | ||
64 | } | ||
65 | |||
66 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_init); | ||
67 | |||
68 | void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue) | ||
69 | { | ||
70 | __gxio_ring_t val; | ||
71 | uint64_t count; | ||
72 | uint64_t delta; | ||
73 | uint64_t new_count; | ||
74 | |||
75 | /* | ||
76 | * Read the 64-bit completion count without touching the cache, so | ||
77 | * we later avoid having to evict any sharers of this cache line | ||
78 | * when we update it below. | ||
79 | */ | ||
80 | uint64_t orig_hw_complete_count = | ||
81 | cmpxchg(&dma_queue->hw_complete_count, | ||
82 | -1, -1); | ||
83 | |||
84 | /* Make sure the load completes before we access the hardware. */ | ||
85 | wait_for_value(orig_hw_complete_count); | ||
86 | |||
87 | /* Read the 16-bit count of how many packets it has completed. */ | ||
88 | val.word = __gxio_mmio_read(dma_queue->post_region_addr); | ||
89 | count = val.count; | ||
90 | |||
91 | /* | ||
92 | * Calculate the number of completions since we last updated the | ||
93 | * 64-bit counter. It's safe to ignore the high bits because the | ||
94 | * maximum credit value is 65535. | ||
95 | */ | ||
96 | delta = (count - orig_hw_complete_count) & 0xffff; | ||
97 | if (delta == 0) | ||
98 | return; | ||
99 | |||
100 | /* | ||
101 | * Try to write back the count, advanced by delta. If we race with | ||
102 | * another thread, this might fail, in which case we return | ||
103 | * immediately on the assumption that some credits are (or at least | ||
104 | * were) available. | ||
105 | */ | ||
106 | new_count = orig_hw_complete_count + delta; | ||
107 | if (cmpxchg(&dma_queue->hw_complete_count, | ||
108 | orig_hw_complete_count, | ||
109 | new_count) != orig_hw_complete_count) | ||
110 | return; | ||
111 | |||
112 | /* | ||
113 | * We succeeded in advancing the completion count; add back the | ||
114 | * corresponding number of egress credits. | ||
115 | */ | ||
116 | __insn_fetchadd(&dma_queue->credits_and_next_index, | ||
117 | (delta << DMA_QUEUE_CREDIT_SHIFT)); | ||
118 | } | ||
119 | |||
120 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits); | ||
121 | |||
122 | /* | ||
123 | * A separate 'blocked' method for put() so that backtraces and | ||
124 | * profiles will clearly indicate that we're wasting time spinning on | ||
125 | * egress availability rather than actually posting commands. | ||
126 | */ | ||
127 | int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||
128 | int64_t modifier) | ||
129 | { | ||
130 | int backoff = 16; | ||
131 | int64_t old; | ||
132 | |||
133 | do { | ||
134 | int i; | ||
135 | /* Back off to avoid spamming memory networks. */ | ||
136 | for (i = backoff; i > 0; i--) | ||
137 | __insn_mfspr(SPR_PASS); | ||
138 | |||
139 | /* Check credits again. */ | ||
140 | __gxio_dma_queue_update_credits(dma_queue); | ||
141 | old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
142 | modifier); | ||
143 | |||
144 | /* Calculate bounded exponential backoff for next iteration. */ | ||
145 | if (backoff < 256) | ||
146 | backoff *= 2; | ||
147 | } while (old + modifier < 0); | ||
148 | |||
149 | return old; | ||
150 | } | ||
151 | |||
152 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits); | ||
153 | |||
154 | int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||
155 | unsigned int num, int wait) | ||
156 | { | ||
157 | return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true); | ||
158 | } | ||
159 | |||
160 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux); | ||
161 | |||
162 | int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||
163 | int64_t completion_slot, int update) | ||
164 | { | ||
165 | if (update) { | ||
166 | if (ACCESS_ONCE(dma_queue->hw_complete_count) > | ||
167 | completion_slot) | ||
168 | return 1; | ||
169 | |||
170 | __gxio_dma_queue_update_credits(dma_queue); | ||
171 | } | ||
172 | |||
173 | return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot; | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); | ||
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h new file mode 100644 index 000000000000..00654feb7db0 --- /dev/null +++ b/arch/tile/include/gxio/dma_queue.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _GXIO_DMA_QUEUE_H_ | ||
16 | #define _GXIO_DMA_QUEUE_H_ | ||
17 | |||
18 | /* | ||
19 | * DMA queue management APIs shared between TRIO and mPIPE. | ||
20 | */ | ||
21 | |||
22 | #include "common.h" | ||
23 | |||
24 | /* The credit counter lives in the high 32 bits. */ | ||
25 | #define DMA_QUEUE_CREDIT_SHIFT 32 | ||
26 | |||
27 | /* | ||
28 | * State object that tracks a DMA queue's head and tail indices, as | ||
29 | * well as the number of commands posted and completed. The | ||
30 | * structure is accessed via a thread-safe, lock-free algorithm. | ||
31 | */ | ||
32 | typedef struct { | ||
33 | /* | ||
34 | * Address of a MPIPE_EDMA_POST_REGION_VAL_t, | ||
35 | * TRIO_PUSH_DMA_REGION_VAL_t, or TRIO_PULL_DMA_REGION_VAL_t | ||
36 | * register. These register have identical encodings and provide | ||
37 | * information about how many commands have been processed. | ||
38 | */ | ||
39 | void *post_region_addr; | ||
40 | |||
41 | /* | ||
42 | * A lazily-updated count of how many edescs the hardware has | ||
43 | * completed. | ||
44 | */ | ||
45 | uint64_t hw_complete_count __attribute__ ((aligned(64))); | ||
46 | |||
47 | /* | ||
48 | * High 32 bits are a count of available egress command credits, | ||
49 | * low 24 bits are the next egress "slot". | ||
50 | */ | ||
51 | int64_t credits_and_next_index; | ||
52 | |||
53 | } __gxio_dma_queue_t; | ||
54 | |||
55 | /* Initialize a dma queue. */ | ||
56 | extern void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue, | ||
57 | void *post_region_addr, | ||
58 | unsigned int num_entries); | ||
59 | |||
60 | /* | ||
61 | * Update the "credits_and_next_index" and "hw_complete_count" fields | ||
62 | * based on pending hardware completions. Note that some other thread | ||
63 | * may have already done this and, importantly, may still be in the | ||
64 | * process of updating "credits_and_next_index". | ||
65 | */ | ||
66 | extern void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue); | ||
67 | |||
68 | /* Wait for credits to become available. */ | ||
69 | extern int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue, | ||
70 | int64_t modifier); | ||
71 | |||
72 | /* Reserve slots in the queue, optionally waiting for slots to become | ||
73 | * available, and optionally returning a "completion_slot" suitable for | ||
74 | * direct comparison to "hw_complete_count". | ||
75 | */ | ||
76 | static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue, | ||
77 | unsigned int num, bool wait, | ||
78 | bool completion) | ||
79 | { | ||
80 | uint64_t slot; | ||
81 | |||
82 | /* | ||
83 | * Try to reserve 'num' egress command slots. We do this by | ||
84 | * constructing a constant that subtracts N credits and adds N to | ||
85 | * the index, and using fetchaddgez to only apply it if the credits | ||
86 | * count doesn't go negative. | ||
87 | */ | ||
88 | int64_t modifier = (((int64_t)(-num)) << DMA_QUEUE_CREDIT_SHIFT) | num; | ||
89 | int64_t old = | ||
90 | __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
91 | modifier); | ||
92 | |||
93 | if (unlikely(old + modifier < 0)) { | ||
94 | /* | ||
95 | * We're out of credits. Try once to get more by checking for | ||
96 | * completed egress commands. If that fails, wait or fail. | ||
97 | */ | ||
98 | __gxio_dma_queue_update_credits(dma_queue); | ||
99 | old = __insn_fetchaddgez(&dma_queue->credits_and_next_index, | ||
100 | modifier); | ||
101 | if (old + modifier < 0) { | ||
102 | if (wait) | ||
103 | old = __gxio_dma_queue_wait_for_credits | ||
104 | (dma_queue, modifier); | ||
105 | else | ||
106 | return GXIO_ERR_DMA_CREDITS; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* The bottom 24 bits of old encode the "slot". */ | ||
111 | slot = (old & 0xffffff); | ||
112 | |||
113 | if (completion) { | ||
114 | /* | ||
115 | * A "completion_slot" is a "slot" which can be compared to | ||
116 | * "hw_complete_count" at any time in the future. To convert | ||
117 | * "slot" into a "completion_slot", we access "hw_complete_count" | ||
118 | * once (knowing that we have reserved a slot, and thus, it will | ||
119 | * be "basically" accurate), and combine its high 40 bits with | ||
120 | * the 24 bit "slot", and handle "wrapping" by adding "1 << 24" | ||
121 | * if the result is LESS than "hw_complete_count". | ||
122 | */ | ||
123 | uint64_t complete; | ||
124 | complete = ACCESS_ONCE(dma_queue->hw_complete_count); | ||
125 | slot |= (complete & 0xffffffffff000000); | ||
126 | if (slot < complete) | ||
127 | slot += 0x1000000; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * If any of our slots mod 256 were equivalent to 0, go ahead and | ||
132 | * collect some egress credits, and update "hw_complete_count", and | ||
133 | * make sure the index doesn't overflow into the credits. | ||
134 | */ | ||
135 | if (unlikely(((old + num) & 0xff) < num)) { | ||
136 | __gxio_dma_queue_update_credits(dma_queue); | ||
137 | |||
138 | /* Make sure the index doesn't overflow into the credits. */ | ||
139 | #ifdef __BIG_ENDIAN__ | ||
140 | *(((uint8_t *)&dma_queue->credits_and_next_index) + 4) = 0; | ||
141 | #else | ||
142 | *(((uint8_t *)&dma_queue->credits_and_next_index) + 3) = 0; | ||
143 | #endif | ||
144 | } | ||
145 | |||
146 | return slot; | ||
147 | } | ||
148 | |||
149 | /* Non-inlinable "__gxio_dma_queue_reserve(..., true)". */ | ||
150 | extern int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue, | ||
151 | unsigned int num, int wait); | ||
152 | |||
153 | /* Check whether a particular "completion slot" has completed. | ||
154 | * | ||
155 | * Note that this function requires a "completion slot", and thus | ||
156 | * cannot be used with the result of any "reserve_fast" function. | ||
157 | */ | ||
158 | extern int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | ||
159 | int64_t completion_slot, int update); | ||
160 | |||
161 | #endif /* !_GXIO_DMA_QUEUE_H_ */ | ||