aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-04-06 16:38:03 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-07-11 16:04:55 -0400
commit4875f69fecab08654972d6fb0d71ee2109d2538c (patch)
treea4e04bfe6c47e34a0a5309fc1c2735cd625b9ee9
parent6369798037c0e915fc3e3844083f2aeecb924c9d (diff)
arch/tile: provide kernel support for the tilegx mPIPE shim
The TILE-Gx chip includes a packet-processing network engine called mPIPE ("Multicore Programmable Intelligent Packet Engine"). This change adds support for using the mPIPE engine from within the kernel. The engine has more functionality than is exposed here, but to keep the kernel code and binary simpler, this is a subset of the full API designed to enable standard Linux networking only. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
-rw-r--r--arch/tile/gxio/Kconfig6
-rw-r--r--arch/tile/gxio/Makefile1
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c529
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c85
-rw-r--r--arch/tile/gxio/mpipe.c545
-rw-r--r--arch/tile/include/arch/mpipe.h359
-rw-r--r--arch/tile/include/arch/mpipe_constants.h42
-rw-r--r--arch/tile/include/arch/mpipe_def.h39
-rw-r--r--arch/tile/include/arch/mpipe_shm.h509
-rw-r--r--arch/tile/include/arch/mpipe_shm_def.h23
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h136
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h46
-rw-r--r--arch/tile/include/gxio/mpipe.h1736
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h602
14 files changed, 4658 insertions, 0 deletions
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig
index ecd076c8cfd5..8aeebb70a3df 100644
--- a/arch/tile/gxio/Kconfig
+++ b/arch/tile/gxio/Kconfig
@@ -9,3 +9,9 @@ config TILE_GXIO
9config TILE_GXIO_DMA 9config TILE_GXIO_DMA
10 bool 10 bool
11 select TILE_GXIO 11 select TILE_GXIO
12
13# Support direct access to the TILE-Gx mPIPE hardware from kernel space.
14config TILE_GXIO_MPIPE
15 bool
16 select TILE_GXIO
17 select TILE_GXIO_DMA
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile
index 97ab468fb8c5..130eec48c152 100644
--- a/arch/tile/gxio/Makefile
+++ b/arch/tile/gxio/Makefile
@@ -4,3 +4,4 @@
4 4
5obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o 5obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
6obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o 6obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
7obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
new file mode 100644
index 000000000000..31b87bf8c027
--- /dev/null
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -0,0 +1,529 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#include "gxio/iorpc_mpipe.h"
17
18struct alloc_buffer_stacks_param {
19 unsigned int count;
20 unsigned int first;
21 unsigned int flags;
22};
23
24int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
25 unsigned int count, unsigned int first,
26 unsigned int flags)
27{
28 struct alloc_buffer_stacks_param temp;
29 struct alloc_buffer_stacks_param *params = &temp;
30
31 params->count = count;
32 params->first = first;
33 params->flags = flags;
34
35 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
36 sizeof(*params),
37 GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS);
38}
39
40EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks);
41
42struct init_buffer_stack_aux_param {
43 union iorpc_mem_buffer buffer;
44 unsigned int stack;
45 unsigned int buffer_size_enum;
46};
47
48int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
49 void *mem_va, size_t mem_size,
50 unsigned int mem_flags, unsigned int stack,
51 unsigned int buffer_size_enum)
52{
53 int __result;
54 unsigned long long __cpa;
55 pte_t __pte;
56 struct init_buffer_stack_aux_param temp;
57 struct init_buffer_stack_aux_param *params = &temp;
58
59 __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
60 if (__result != 0)
61 return __result;
62 params->buffer.kernel.cpa = __cpa;
63 params->buffer.kernel.size = mem_size;
64 params->buffer.kernel.pte = __pte;
65 params->buffer.kernel.flags = mem_flags;
66 params->stack = stack;
67 params->buffer_size_enum = buffer_size_enum;
68
69 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
70 sizeof(*params),
71 GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX);
72}
73
74EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux);
75
76
77struct alloc_notif_rings_param {
78 unsigned int count;
79 unsigned int first;
80 unsigned int flags;
81};
82
83int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
84 unsigned int count, unsigned int first,
85 unsigned int flags)
86{
87 struct alloc_notif_rings_param temp;
88 struct alloc_notif_rings_param *params = &temp;
89
90 params->count = count;
91 params->first = first;
92 params->flags = flags;
93
94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
95 sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS);
96}
97
98EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings);
99
100struct init_notif_ring_aux_param {
101 union iorpc_mem_buffer buffer;
102 unsigned int ring;
103};
104
105int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
106 size_t mem_size, unsigned int mem_flags,
107 unsigned int ring)
108{
109 int __result;
110 unsigned long long __cpa;
111 pte_t __pte;
112 struct init_notif_ring_aux_param temp;
113 struct init_notif_ring_aux_param *params = &temp;
114
115 __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
116 if (__result != 0)
117 return __result;
118 params->buffer.kernel.cpa = __cpa;
119 params->buffer.kernel.size = mem_size;
120 params->buffer.kernel.pte = __pte;
121 params->buffer.kernel.flags = mem_flags;
122 params->ring = ring;
123
124 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
125 sizeof(*params),
126 GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX);
127}
128
129EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux);
130
131struct request_notif_ring_interrupt_param {
132 union iorpc_interrupt interrupt;
133 unsigned int ring;
134};
135
136int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
137 int inter_x, int inter_y,
138 int inter_ipi, int inter_event,
139 unsigned int ring)
140{
141 struct request_notif_ring_interrupt_param temp;
142 struct request_notif_ring_interrupt_param *params = &temp;
143
144 params->interrupt.kernel.x = inter_x;
145 params->interrupt.kernel.y = inter_y;
146 params->interrupt.kernel.ipi = inter_ipi;
147 params->interrupt.kernel.event = inter_event;
148 params->ring = ring;
149
150 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
151 sizeof(*params),
152 GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT);
153}
154
155EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt);
156
157struct enable_notif_ring_interrupt_param {
158 unsigned int ring;
159};
160
161int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
162 unsigned int ring)
163{
164 struct enable_notif_ring_interrupt_param temp;
165 struct enable_notif_ring_interrupt_param *params = &temp;
166
167 params->ring = ring;
168
169 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
170 sizeof(*params),
171 GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT);
172}
173
174EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt);
175
176struct alloc_notif_groups_param {
177 unsigned int count;
178 unsigned int first;
179 unsigned int flags;
180};
181
182int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
183 unsigned int count, unsigned int first,
184 unsigned int flags)
185{
186 struct alloc_notif_groups_param temp;
187 struct alloc_notif_groups_param *params = &temp;
188
189 params->count = count;
190 params->first = first;
191 params->flags = flags;
192
193 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
194 sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS);
195}
196
197EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups);
198
199struct init_notif_group_param {
200 unsigned int group;
201 gxio_mpipe_notif_group_bits_t bits;
202};
203
204int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
205 unsigned int group,
206 gxio_mpipe_notif_group_bits_t bits)
207{
208 struct init_notif_group_param temp;
209 struct init_notif_group_param *params = &temp;
210
211 params->group = group;
212 params->bits = bits;
213
214 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
215 sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP);
216}
217
218EXPORT_SYMBOL(gxio_mpipe_init_notif_group);
219
220struct alloc_buckets_param {
221 unsigned int count;
222 unsigned int first;
223 unsigned int flags;
224};
225
226int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
227 unsigned int first, unsigned int flags)
228{
229 struct alloc_buckets_param temp;
230 struct alloc_buckets_param *params = &temp;
231
232 params->count = count;
233 params->first = first;
234 params->flags = flags;
235
236 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
237 sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS);
238}
239
240EXPORT_SYMBOL(gxio_mpipe_alloc_buckets);
241
242struct init_bucket_param {
243 unsigned int bucket;
244 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
245};
246
247int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
248 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
249{
250 struct init_bucket_param temp;
251 struct init_bucket_param *params = &temp;
252
253 params->bucket = bucket;
254 params->bucket_info = bucket_info;
255
256 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
257 sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET);
258}
259
260EXPORT_SYMBOL(gxio_mpipe_init_bucket);
261
262struct alloc_edma_rings_param {
263 unsigned int count;
264 unsigned int first;
265 unsigned int flags;
266};
267
268int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
269 unsigned int count, unsigned int first,
270 unsigned int flags)
271{
272 struct alloc_edma_rings_param temp;
273 struct alloc_edma_rings_param *params = &temp;
274
275 params->count = count;
276 params->first = first;
277 params->flags = flags;
278
279 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
280 sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS);
281}
282
283EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings);
284
285struct init_edma_ring_aux_param {
286 union iorpc_mem_buffer buffer;
287 unsigned int ring;
288 unsigned int channel;
289};
290
291int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
292 size_t mem_size, unsigned int mem_flags,
293 unsigned int ring, unsigned int channel)
294{
295 int __result;
296 unsigned long long __cpa;
297 pte_t __pte;
298 struct init_edma_ring_aux_param temp;
299 struct init_edma_ring_aux_param *params = &temp;
300
301 __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
302 if (__result != 0)
303 return __result;
304 params->buffer.kernel.cpa = __cpa;
305 params->buffer.kernel.size = mem_size;
306 params->buffer.kernel.pte = __pte;
307 params->buffer.kernel.flags = mem_flags;
308 params->ring = ring;
309 params->channel = channel;
310
311 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
312 sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX);
313}
314
315EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
316
317
318int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
319 size_t blob_size)
320{
321 const void *params = blob;
322
323 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size,
324 GXIO_MPIPE_OP_COMMIT_RULES);
325}
326
327EXPORT_SYMBOL(gxio_mpipe_commit_rules);
328
329struct register_client_memory_param {
330 unsigned int iotlb;
331 HV_PTE pte;
332 unsigned int flags;
333};
334
335int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
336 unsigned int iotlb, HV_PTE pte,
337 unsigned int flags)
338{
339 struct register_client_memory_param temp;
340 struct register_client_memory_param *params = &temp;
341
342 params->iotlb = iotlb;
343 params->pte = pte;
344 params->flags = flags;
345
346 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
347 sizeof(*params),
348 GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY);
349}
350
351EXPORT_SYMBOL(gxio_mpipe_register_client_memory);
352
353struct link_open_aux_param {
354 _gxio_mpipe_link_name_t name;
355 unsigned int flags;
356};
357
358int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
359 _gxio_mpipe_link_name_t name, unsigned int flags)
360{
361 struct link_open_aux_param temp;
362 struct link_open_aux_param *params = &temp;
363
364 params->name = name;
365 params->flags = flags;
366
367 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
368 sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX);
369}
370
371EXPORT_SYMBOL(gxio_mpipe_link_open_aux);
372
373struct link_close_aux_param {
374 int mac;
375};
376
377int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
378{
379 struct link_close_aux_param temp;
380 struct link_close_aux_param *params = &temp;
381
382 params->mac = mac;
383
384 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
385 sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX);
386}
387
388EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
389
390
391struct get_timestamp_aux_param {
392 uint64_t sec;
393 uint64_t nsec;
394 uint64_t cycles;
395};
396
397int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
398 uint64_t * nsec, uint64_t * cycles)
399{
400 int __result;
401 struct get_timestamp_aux_param temp;
402 struct get_timestamp_aux_param *params = &temp;
403
404 __result =
405 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
406 GXIO_MPIPE_OP_GET_TIMESTAMP_AUX);
407 *sec = params->sec;
408 *nsec = params->nsec;
409 *cycles = params->cycles;
410
411 return __result;
412}
413
414EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux);
415
416struct set_timestamp_aux_param {
417 uint64_t sec;
418 uint64_t nsec;
419 uint64_t cycles;
420};
421
422int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
423 uint64_t nsec, uint64_t cycles)
424{
425 struct set_timestamp_aux_param temp;
426 struct set_timestamp_aux_param *params = &temp;
427
428 params->sec = sec;
429 params->nsec = nsec;
430 params->cycles = cycles;
431
432 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
433 sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX);
434}
435
436EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux);
437
438struct adjust_timestamp_aux_param {
439 int64_t nsec;
440};
441
442int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
443 int64_t nsec)
444{
445 struct adjust_timestamp_aux_param temp;
446 struct adjust_timestamp_aux_param *params = &temp;
447
448 params->nsec = nsec;
449
450 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
451 sizeof(*params),
452 GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX);
453}
454
455EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
456
457struct arm_pollfd_param {
458 union iorpc_pollfd pollfd;
459};
460
461int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
462{
463 struct arm_pollfd_param temp;
464 struct arm_pollfd_param *params = &temp;
465
466 params->pollfd.kernel.cookie = pollfd_cookie;
467
468 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
469 sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD);
470}
471
472EXPORT_SYMBOL(gxio_mpipe_arm_pollfd);
473
474struct close_pollfd_param {
475 union iorpc_pollfd pollfd;
476};
477
478int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
479{
480 struct close_pollfd_param temp;
481 struct close_pollfd_param *params = &temp;
482
483 params->pollfd.kernel.cookie = pollfd_cookie;
484
485 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
486 sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD);
487}
488
489EXPORT_SYMBOL(gxio_mpipe_close_pollfd);
490
491struct get_mmio_base_param {
492 HV_PTE base;
493};
494
495int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base)
496{
497 int __result;
498 struct get_mmio_base_param temp;
499 struct get_mmio_base_param *params = &temp;
500
501 __result =
502 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
503 GXIO_MPIPE_OP_GET_MMIO_BASE);
504 *base = params->base;
505
506 return __result;
507}
508
509EXPORT_SYMBOL(gxio_mpipe_get_mmio_base);
510
511struct check_mmio_offset_param {
512 unsigned long offset;
513 unsigned long size;
514};
515
516int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
517 unsigned long offset, unsigned long size)
518{
519 struct check_mmio_offset_param temp;
520 struct check_mmio_offset_param *params = &temp;
521
522 params->offset = offset;
523 params->size = size;
524
525 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
526 sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET);
527}
528
529EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
new file mode 100644
index 000000000000..d0254aa60cba
--- /dev/null
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#include "gxio/iorpc_mpipe_info.h"
17
18
19struct enumerate_aux_param {
20 _gxio_mpipe_link_name_t name;
21 _gxio_mpipe_link_mac_t mac;
22};
23
24int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
25 unsigned int idx,
26 _gxio_mpipe_link_name_t * name,
27 _gxio_mpipe_link_mac_t * mac)
28{
29 int __result;
30 struct enumerate_aux_param temp;
31 struct enumerate_aux_param *params = &temp;
32
33 __result =
34 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
35 (((uint64_t) idx << 32) |
36 GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
37 *name = params->name;
38 *mac = params->mac;
39
40 return __result;
41}
42
43EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux);
44
45struct get_mmio_base_param {
46 HV_PTE base;
47};
48
49int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
50 HV_PTE *base)
51{
52 int __result;
53 struct get_mmio_base_param temp;
54 struct get_mmio_base_param *params = &temp;
55
56 __result =
57 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
58 GXIO_MPIPE_INFO_OP_GET_MMIO_BASE);
59 *base = params->base;
60
61 return __result;
62}
63
64EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base);
65
66struct check_mmio_offset_param {
67 unsigned long offset;
68 unsigned long size;
69};
70
71int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
72 unsigned long offset, unsigned long size)
73{
74 struct check_mmio_offset_param temp;
75 struct check_mmio_offset_param *params = &temp;
76
77 params->offset = offset;
78 params->size = size;
79
80 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
81 sizeof(*params),
82 GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET);
83}
84
85EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset);
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
new file mode 100644
index 000000000000..e71c63390acc
--- /dev/null
+++ b/arch/tile/gxio/mpipe.c
@@ -0,0 +1,545 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * Implementation of mpipe gxio calls.
17 */
18
19#include <linux/errno.h>
20#include <linux/io.h>
21#include <linux/module.h>
22
23#include <gxio/iorpc_globals.h>
24#include <gxio/iorpc_mpipe.h>
25#include <gxio/iorpc_mpipe_info.h>
26#include <gxio/kiorpc.h>
27#include <gxio/mpipe.h>
28
29/* HACK: Avoid pointless "shadow" warnings. */
30#define link link_shadow
31
32int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
33{
34 char file[32];
35
36 int fd;
37 int i;
38
39 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
40 fd = hv_dev_open((HV_VirtAddr) file, 0);
41 if (fd < 0) {
42 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
43 return fd;
44 else
45 return -ENODEV;
46 }
47
48 context->fd = fd;
49
50 /* Map in the MMIO space. */
51 context->mmio_cfg_base = (void __force *)
52 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
53 HV_MPIPE_CONFIG_MMIO_SIZE);
54 if (context->mmio_cfg_base == NULL)
55 goto cfg_failed;
56
57 context->mmio_fast_base = (void __force *)
58 iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
59 HV_MPIPE_FAST_MMIO_SIZE);
60 if (context->mmio_fast_base == NULL)
61 goto fast_failed;
62
63 /* Initialize the stacks. */
64 for (i = 0; i < 8; i++)
65 context->__stacks.stacks[i] = 255;
66
67 return 0;
68
69 fast_failed:
70 iounmap((void __force __iomem *)(context->mmio_cfg_base));
71 cfg_failed:
72 hv_dev_close(context->fd);
73 return -ENODEV;
74}
75
76EXPORT_SYMBOL_GPL(gxio_mpipe_init);
77
78int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
79{
80 iounmap((void __force __iomem *)(context->mmio_cfg_base));
81 iounmap((void __force __iomem *)(context->mmio_fast_base));
82 return hv_dev_close(context->fd);
83}
84
85EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
86
87static int16_t gxio_mpipe_buffer_sizes[8] =
88 { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
89
90gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
91 size)
92{
93 int i;
94 for (i = 0; i < 7; i++)
95 if (size <= gxio_mpipe_buffer_sizes[i])
96 break;
97 return i;
98}
99
100EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
101
102size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
103 buffer_size_enum)
104{
105 if (buffer_size_enum > 7)
106 buffer_size_enum = 7;
107
108 return gxio_mpipe_buffer_sizes[buffer_size_enum];
109}
110
111EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
112
113size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
114{
115 const int BUFFERS_PER_LINE = 12;
116
117 /* Count the number of cachlines. */
118 unsigned long lines =
119 (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
120
121 /* Convert to bytes. */
122 return lines * CHIP_L2_LINE_SIZE();
123}
124
125EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
126
127int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
128 unsigned int stack,
129 gxio_mpipe_buffer_size_enum_t
130 buffer_size_enum, void *mem, size_t mem_size,
131 unsigned int mem_flags)
132{
133 int result;
134
135 memset(mem, 0, mem_size);
136
137 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
138 mem_flags, stack,
139 buffer_size_enum);
140 if (result < 0)
141 return result;
142
143 /* Save the stack. */
144 context->__stacks.stacks[buffer_size_enum] = stack;
145
146 return 0;
147}
148
149EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
150
151int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
152 unsigned int ring,
153 void *mem, size_t mem_size,
154 unsigned int mem_flags)
155{
156 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
157 mem_flags, ring);
158}
159
160EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
161
162int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
163 unsigned int group,
164 unsigned int ring,
165 unsigned int num_rings,
166 unsigned int bucket,
167 unsigned int num_buckets,
168 gxio_mpipe_bucket_mode_t mode)
169{
170 int i;
171 int result;
172
173 gxio_mpipe_bucket_info_t bucket_info = { {
174 .group = group,
175 .mode = mode,
176 }
177 };
178
179 gxio_mpipe_notif_group_bits_t bits = { {0} };
180
181 for (i = 0; i < num_rings; i++)
182 gxio_mpipe_notif_group_add_ring(&bits, ring + i);
183
184 result = gxio_mpipe_init_notif_group(context, group, bits);
185 if (result != 0)
186 return result;
187
188 for (i = 0; i < num_buckets; i++) {
189 bucket_info.notifring = ring + (i % num_rings);
190
191 result = gxio_mpipe_init_bucket(context, bucket + i,
192 bucket_info);
193 if (result != 0)
194 return result;
195 }
196
197 return 0;
198}
199
200EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
201
202int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
203 unsigned int ring, unsigned int channel,
204 void *mem, size_t mem_size,
205 unsigned int mem_flags)
206{
207 memset(mem, 0, mem_size);
208
209 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
210 ring, channel);
211}
212
213EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
214
215void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
216 gxio_mpipe_context_t *context)
217{
218 rules->context = context;
219 memset(&rules->list, 0, sizeof(rules->list));
220}
221
222EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
223
224int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
225 unsigned int bucket, unsigned int num_buckets,
226 gxio_mpipe_rules_stacks_t *stacks)
227{
228 int i;
229 int stack = 255;
230
231 gxio_mpipe_rules_list_t *list = &rules->list;
232
233 /* Current rule. */
234 gxio_mpipe_rules_rule_t *rule =
235 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
236
237 unsigned int head = list->tail;
238
239 /*
240 * Align next rule properly.
241 *Note that "dmacs_and_vlans" will also be aligned.
242 */
243 unsigned int pad = 0;
244 while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
245 pad++;
246
247 /*
248 * Verify room.
249 * ISSUE: Mark rules as broken on error?
250 */
251 if (head + pad + sizeof(*rule) >= sizeof(list->rules))
252 return GXIO_MPIPE_ERR_RULES_FULL;
253
254 /* Verify num_buckets is a power of 2. */
255 if (__builtin_popcount(num_buckets) != 1)
256 return GXIO_MPIPE_ERR_RULES_INVALID;
257
258 /* Add padding to previous rule. */
259 rule->size += pad;
260
261 /* Start a new rule. */
262 list->head = head + pad;
263
264 rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
265
266 /* Default some values. */
267 rule->headroom = 2;
268 rule->tailroom = 0;
269 rule->capacity = 16384;
270
271 /* Save the bucket info. */
272 rule->bucket_mask = num_buckets - 1;
273 rule->bucket_first = bucket;
274
275 for (i = 8 - 1; i >= 0; i--) {
276 int maybe =
277 stacks ? stacks->stacks[i] : rules->context->__stacks.
278 stacks[i];
279 if (maybe != 255)
280 stack = maybe;
281 rule->stacks.stacks[i] = stack;
282 }
283
284 if (stack == 255)
285 return GXIO_MPIPE_ERR_RULES_INVALID;
286
287 /* NOTE: Only entries at the end of the array can be 255. */
288 for (i = 8 - 1; i > 0; i--) {
289 if (rule->stacks.stacks[i] == 255) {
290 rule->stacks.stacks[i] = stack;
291 rule->capacity =
292 gxio_mpipe_buffer_size_enum_to_buffer_size(i -
293 1);
294 }
295 }
296
297 rule->size = sizeof(*rule);
298 list->tail = list->head + rule->size;
299
300 return 0;
301}
302
303EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
304
305int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
306 unsigned int channel)
307{
308 gxio_mpipe_rules_list_t *list = &rules->list;
309
310 gxio_mpipe_rules_rule_t *rule =
311 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
312
313 /* Verify channel. */
314 if (channel >= 32)
315 return GXIO_MPIPE_ERR_RULES_INVALID;
316
317 /* Verify begun. */
318 if (list->tail == 0)
319 return GXIO_MPIPE_ERR_RULES_EMPTY;
320
321 rule->channel_bits |= (1UL << channel);
322
323 return 0;
324}
325
326EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
327
328int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
329{
330 gxio_mpipe_rules_list_t *list = &rules->list;
331
332 gxio_mpipe_rules_rule_t *rule =
333 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
334
335 /* Verify begun. */
336 if (list->tail == 0)
337 return GXIO_MPIPE_ERR_RULES_EMPTY;
338
339 rule->headroom = headroom;
340
341 return 0;
342}
343
344EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
345
346int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
347{
348 gxio_mpipe_rules_list_t *list = &rules->list;
349 unsigned int size =
350 offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
351 return gxio_mpipe_commit_rules(rules->context, list, size);
352}
353
354EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
355
356int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
357 gxio_mpipe_context_t *context,
358 unsigned int ring,
359 void *mem, size_t mem_size, unsigned int mem_flags)
360{
361 /* The init call below will verify that "mem_size" is legal. */
362 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
363
364 iqueue->context = context;
365 iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
366 iqueue->ring = ring;
367 iqueue->num_entries = num_entries;
368 iqueue->mask_num_entries = num_entries - 1;
369 iqueue->log2_num_entries = __builtin_ctz(num_entries);
370 iqueue->head = 1;
371#ifdef __BIG_ENDIAN__
372 iqueue->swapped = 0;
373#endif
374
375 /* Initialize the "tail". */
376 __gxio_mmio_write(mem, iqueue->head);
377
378 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
379 mem_flags);
380}
381
382EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
383
384int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
385 gxio_mpipe_context_t *context,
386 unsigned int edma_ring_id,
387 unsigned int channel,
388 void *mem, unsigned int mem_size,
389 unsigned int mem_flags)
390{
391 /* The init call below will verify that "mem_size" is legal. */
392 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
393
394 /* Offset used to read number of completed commands. */
395 MPIPE_EDMA_POST_REGION_ADDR_t offset;
396
397 int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
398 mem, mem_size, mem_flags);
399 if (result < 0)
400 return result;
401
402 memset(equeue, 0, sizeof(*equeue));
403
404 offset.word = 0;
405 offset.region =
406 MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
407 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
408 offset.ring = edma_ring_id;
409
410 __gxio_dma_queue_init(&equeue->dma_queue,
411 context->mmio_fast_base + offset.word,
412 num_entries);
413 equeue->edescs = mem;
414 equeue->mask_num_entries = num_entries - 1;
415 equeue->log2_num_entries = __builtin_ctz(num_entries);
416
417 return 0;
418}
419
420EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
421
422int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
423 const struct timespec *ts)
424{
425 cycles_t cycles = get_cycles();
426 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
427 (uint64_t)ts->tv_nsec,
428 (uint64_t)cycles);
429}
430
431int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
432 struct timespec *ts)
433{
434 int ret;
435 cycles_t cycles_prev, cycles_now, clock_rate;
436 cycles_prev = get_cycles();
437 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
438 (uint64_t *)&ts->tv_nsec,
439 (uint64_t *)&cycles_now);
440 if (ret < 0) {
441 return ret;
442 }
443
444 clock_rate = get_clock_rate();
445 ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
446 if (ts->tv_nsec < 0) {
447 ts->tv_nsec += 1000000000LL;
448 ts->tv_sec -= 1;
449 }
450 return ret;
451}
452
453int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
454{
455 return gxio_mpipe_adjust_timestamp_aux(context, delta);
456}
457
458/* Get our internal context used for link name access. This context is
459 * special in that it is not associated with an mPIPE service domain.
460 */
461static gxio_mpipe_context_t *_gxio_get_link_context(void)
462{
463 static gxio_mpipe_context_t context;
464 static gxio_mpipe_context_t *contextp;
465 static int tried_open = 0;
466 static DEFINE_MUTEX(mutex);
467
468 mutex_lock(&mutex);
469
470 if (!tried_open) {
471 int i = 0;
472 tried_open = 1;
473
474 /*
475 * "4" here is the maximum possible number of mPIPE shims; it's
476 * an exaggeration but we shouldn't ever go beyond 2 anyway.
477 */
478 for (i = 0; i < 4; i++) {
479 char file[80];
480
481 snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
482 context.fd = hv_dev_open((HV_VirtAddr) file, 0);
483 if (context.fd < 0)
484 continue;
485
486 contextp = &context;
487 break;
488 }
489 }
490
491 mutex_unlock(&mutex);
492
493 return contextp;
494}
495
496int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
497{
498 int rv;
499 _gxio_mpipe_link_name_t name;
500 _gxio_mpipe_link_mac_t mac;
501
502 gxio_mpipe_context_t *context = _gxio_get_link_context();
503 if (!context)
504 return GXIO_ERR_NO_DEVICE;
505
506 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
507 if (rv >= 0) {
508 strncpy(link_name, name.name, sizeof(name.name));
509 memcpy(link_mac, mac.mac, sizeof(mac.mac));
510 }
511
512 return rv;
513}
514
515EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
516
517int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
518 gxio_mpipe_context_t *context, const char *link_name,
519 unsigned int flags)
520{
521 _gxio_mpipe_link_name_t name;
522 int rv;
523
524 strncpy(name.name, link_name, sizeof(name.name));
525 name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
526
527 rv = gxio_mpipe_link_open_aux(context, name, flags);
528 if (rv < 0)
529 return rv;
530
531 link->context = context;
532 link->channel = rv >> 8;
533 link->mac = rv & 0xFF;
534
535 return 0;
536}
537
538EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
539
540int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
541{
542 return gxio_mpipe_link_close_aux(link->context, link->mac);
543}
544
545EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h
new file mode 100644
index 000000000000..8a33912fd6cc
--- /dev/null
+++ b/arch/tile/include/arch/mpipe.h
@@ -0,0 +1,359 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17#ifndef __ARCH_MPIPE_H__
18#define __ARCH_MPIPE_H__
19
20#include <arch/abi.h>
21#include <arch/mpipe_def.h>
22
23#ifndef __ASSEMBLER__
24
25/*
26 * MMIO Ingress DMA Release Region Address.
27 * This is a description of the physical addresses used to manipulate ingress
28 * credit counters. Accesses to this address space should use an address of
29 * this form and a value like that specified in IDMA_RELEASE_REGION_VAL.
30 */
31
32__extension__
33typedef union
34{
35 struct
36 {
37#ifndef __BIG_ENDIAN__
38 /* Reserved. */
39 uint_reg_t __reserved_0 : 3;
40 /* NotifRing to be released */
41 uint_reg_t ring : 8;
42 /* Bucket to be released */
43 uint_reg_t bucket : 13;
44 /* Enable NotifRing release */
45 uint_reg_t ring_enable : 1;
46 /* Enable Bucket release */
47 uint_reg_t bucket_enable : 1;
48 /*
49 * This field of the address selects the region (address space) to be
50 * accessed. For the iDMA release region, this field must be 4.
51 */
52 uint_reg_t region : 3;
53 /* Reserved. */
54 uint_reg_t __reserved_1 : 6;
55 /* This field of the address indexes the 32 entry service domain table. */
56 uint_reg_t svc_dom : 5;
57 /* Reserved. */
58 uint_reg_t __reserved_2 : 24;
59#else /* __BIG_ENDIAN__ */
60 uint_reg_t __reserved_2 : 24;
61 uint_reg_t svc_dom : 5;
62 uint_reg_t __reserved_1 : 6;
63 uint_reg_t region : 3;
64 uint_reg_t bucket_enable : 1;
65 uint_reg_t ring_enable : 1;
66 uint_reg_t bucket : 13;
67 uint_reg_t ring : 8;
68 uint_reg_t __reserved_0 : 3;
69#endif
70 };
71
72 uint_reg_t word;
73} MPIPE_IDMA_RELEASE_REGION_ADDR_t;
74
75/*
76 * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket.
77 * Provides release of the associated NotifRing. The address of the MMIO
78 * operation is described in IDMA_RELEASE_REGION_ADDR.
79 */
80
81__extension__
82typedef union
83{
84 struct
85 {
86#ifndef __BIG_ENDIAN__
87 /*
88 * Number of packets being released. The load balancer's count of
89 * inflight packets will be decremented by this amount for the associated
90 * Bucket and/or NotifRing
91 */
92 uint_reg_t count : 16;
93 /* Reserved. */
94 uint_reg_t __reserved : 48;
95#else /* __BIG_ENDIAN__ */
96 uint_reg_t __reserved : 48;
97 uint_reg_t count : 16;
98#endif
99 };
100
101 uint_reg_t word;
102} MPIPE_IDMA_RELEASE_REGION_VAL_t;
103
104/*
105 * MMIO Buffer Stack Manager Region Address.
106 * This MMIO region is used for posting or fetching buffers to/from the
107 * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
108 * the top of stack if one is available. On an MMIO store, this pushes a
109 * buffer to the stack. The value read or written is described in
110 * BSM_REGION_VAL.
111 */
112
113__extension__
114typedef union
115{
116 struct
117 {
118#ifndef __BIG_ENDIAN__
119 /* Reserved. */
120 uint_reg_t __reserved_0 : 3;
121 /* BufferStack being accessed. */
122 uint_reg_t stack : 5;
123 /* Reserved. */
124 uint_reg_t __reserved_1 : 18;
125 /*
126 * This field of the address selects the region (address space) to be
127 * accessed. For the buffer stack manager region, this field must be 6.
128 */
129 uint_reg_t region : 3;
130 /* Reserved. */
131 uint_reg_t __reserved_2 : 6;
132 /* This field of the address indexes the 32 entry service domain table. */
133 uint_reg_t svc_dom : 5;
134 /* Reserved. */
135 uint_reg_t __reserved_3 : 24;
136#else /* __BIG_ENDIAN__ */
137 uint_reg_t __reserved_3 : 24;
138 uint_reg_t svc_dom : 5;
139 uint_reg_t __reserved_2 : 6;
140 uint_reg_t region : 3;
141 uint_reg_t __reserved_1 : 18;
142 uint_reg_t stack : 5;
143 uint_reg_t __reserved_0 : 3;
144#endif
145 };
146
147 uint_reg_t word;
148} MPIPE_BSM_REGION_ADDR_t;
149
150/*
151 * MMIO Buffer Stack Manager Region Value.
152 * This MMIO region is used for posting or fetching buffers to/from the
153 * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
154 * the top of stack if one is available. On an MMIO store, this pushes a
155 * buffer to the stack. The address of the MMIO operation is described in
156 * BSM_REGION_ADDR.
157 */
158
159__extension__
160typedef union
161{
162 struct
163 {
164#ifndef __BIG_ENDIAN__
165 /* Reserved. */
166 uint_reg_t __reserved_0 : 7;
167 /*
168 * Base virtual address of the buffer. Must be sign extended by consumer.
169 */
170 int_reg_t va : 35;
171 /* Reserved. */
172 uint_reg_t __reserved_1 : 6;
173 /*
174 * Index of the buffer stack to which this buffer belongs. Ignored on
175 * writes since the offset bits specify the stack being accessed.
176 */
177 uint_reg_t stack_idx : 5;
178 /* Reserved. */
179 uint_reg_t __reserved_2 : 5;
180 /*
181 * Reads as one to indicate that this is a hardware managed buffer.
182 * Ignored on writes since all buffers on a given stack are the same size.
183 */
184 uint_reg_t hwb : 1;
185 /*
186 * Encoded size of buffer (ignored on writes):
187 * 0 = 128 bytes
188 * 1 = 256 bytes
189 * 2 = 512 bytes
190 * 3 = 1024 bytes
191 * 4 = 1664 bytes
192 * 5 = 4096 bytes
193 * 6 = 10368 bytes
194 * 7 = 16384 bytes
195 */
196 uint_reg_t size : 3;
197 /*
198 * Valid indication for the buffer. Ignored on writes.
199 * 0 : Valid buffer descriptor popped from stack.
200 * 3 : Could not pop a buffer from the stack. Either the stack is empty,
201 * or the hardware's prefetch buffer is empty for this stack.
202 */
203 uint_reg_t c : 2;
204#else /* __BIG_ENDIAN__ */
205 uint_reg_t c : 2;
206 uint_reg_t size : 3;
207 uint_reg_t hwb : 1;
208 uint_reg_t __reserved_2 : 5;
209 uint_reg_t stack_idx : 5;
210 uint_reg_t __reserved_1 : 6;
211 int_reg_t va : 35;
212 uint_reg_t __reserved_0 : 7;
213#endif
214 };
215
216 uint_reg_t word;
217} MPIPE_BSM_REGION_VAL_t;
218
219/*
220 * MMIO Egress DMA Post Region Address.
221 * Used to post descriptor locations to the eDMA descriptor engine. The
222 * value to be written is described in EDMA_POST_REGION_VAL
223 */
224
225__extension__
226typedef union
227{
228 struct
229 {
230#ifndef __BIG_ENDIAN__
231 /* Reserved. */
232 uint_reg_t __reserved_0 : 3;
233 /* eDMA ring being accessed */
234 uint_reg_t ring : 5;
235 /* Reserved. */
236 uint_reg_t __reserved_1 : 18;
237 /*
238 * This field of the address selects the region (address space) to be
239 * accessed. For the egress DMA post region, this field must be 5.
240 */
241 uint_reg_t region : 3;
242 /* Reserved. */
243 uint_reg_t __reserved_2 : 6;
244 /* This field of the address indexes the 32 entry service domain table. */
245 uint_reg_t svc_dom : 5;
246 /* Reserved. */
247 uint_reg_t __reserved_3 : 24;
248#else /* __BIG_ENDIAN__ */
249 uint_reg_t __reserved_3 : 24;
250 uint_reg_t svc_dom : 5;
251 uint_reg_t __reserved_2 : 6;
252 uint_reg_t region : 3;
253 uint_reg_t __reserved_1 : 18;
254 uint_reg_t ring : 5;
255 uint_reg_t __reserved_0 : 3;
256#endif
257 };
258
259 uint_reg_t word;
260} MPIPE_EDMA_POST_REGION_ADDR_t;
261
262/*
263 * MMIO Egress DMA Post Region Value.
264 * Used to post descriptor locations to the eDMA descriptor engine. The
265 * address is described in EDMA_POST_REGION_ADDR.
266 */
267
268__extension__
269typedef union
270{
271 struct
272 {
273#ifndef __BIG_ENDIAN__
274 /*
275 * For writes, this specifies the current ring tail pointer prior to any
276 * post. For example, to post 1 or more descriptors starting at location
277 * 23, this would contain 23 (not 24). On writes, this index must be
278 * masked based on the ring size. The new tail pointer after this post
279 * is COUNT+RING_IDX (masked by the ring size).
280 *
281 * For reads, this provides the hardware descriptor fetcher's head
282 * pointer. The descriptors prior to the head pointer, however, may not
283 * yet have been processed so this indicator is only used to determine
284 * how full the ring is and if software may post more descriptors.
285 */
286 uint_reg_t ring_idx : 16;
287 /*
288 * For writes, this specifies number of contiguous descriptors that are
289 * being posted. Software may post up to RingSize descriptors with a
290 * single MMIO store. A zero in this field on a write will "wake up" an
291 * eDMA ring and cause it fetch descriptors regardless of the hardware's
292 * current view of the state of the tail pointer.
293 *
294 * For reads, this field provides a rolling count of the number of
295 * descriptors that have been completely processed. This may be used by
296 * software to determine when buffers associated with a descriptor may be
297 * returned or reused. When the ring's flush bit is cleared by software
298 * (after having been set by HW or SW), the COUNT will be cleared.
299 */
300 uint_reg_t count : 16;
301 /*
302 * For writes, this specifies the generation number of the tail being
303 * posted. Note that if tail+cnt wraps to the beginning of the ring, the
304 * eDMA hardware assumes that the descriptors posted at the beginning of
305 * the ring are also valid so it is okay to post around the wrap point.
306 *
307 * For reads, this is the current generation number. Valid descriptors
308 * will have the inverse of this generation number.
309 */
310 uint_reg_t gen : 1;
311 /* Reserved. */
312 uint_reg_t __reserved : 31;
313#else /* __BIG_ENDIAN__ */
314 uint_reg_t __reserved : 31;
315 uint_reg_t gen : 1;
316 uint_reg_t count : 16;
317 uint_reg_t ring_idx : 16;
318#endif
319 };
320
321 uint_reg_t word;
322} MPIPE_EDMA_POST_REGION_VAL_t;
323
324/*
325 * Load Balancer Bucket Status Data.
326 * Read/Write data for load balancer Bucket-Status Table. 4160 entries
327 * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL
328 */
329
330__extension__
331typedef union
332{
333 struct
334 {
335#ifndef __BIG_ENDIAN__
336 /* NotifRing currently assigned to this bucket. */
337 uint_reg_t notifring : 8;
338 /* Current reference count. */
339 uint_reg_t count : 16;
340 /* Group associated with this bucket. */
341 uint_reg_t group : 5;
342 /* Mode select for this bucket. */
343 uint_reg_t mode : 3;
344 /* Reserved. */
345 uint_reg_t __reserved : 32;
346#else /* __BIG_ENDIAN__ */
347 uint_reg_t __reserved : 32;
348 uint_reg_t mode : 3;
349 uint_reg_t group : 5;
350 uint_reg_t count : 16;
351 uint_reg_t notifring : 8;
352#endif
353 };
354
355 uint_reg_t word;
356} MPIPE_LBL_INIT_DAT_BSTS_TBL_t;
357#endif /* !defined(__ASSEMBLER__) */
358
359#endif /* !defined(__ARCH_MPIPE_H__) */
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h
new file mode 100644
index 000000000000..410a0400e055
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_constants.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15
16#ifndef __ARCH_MPIPE_CONSTANTS_H__
17#define __ARCH_MPIPE_CONSTANTS_H__
18
19#define MPIPE_NUM_CLASSIFIERS 10
20#define MPIPE_CLS_MHZ 1200
21
22#define MPIPE_NUM_EDMA_RINGS 32
23
24#define MPIPE_NUM_SGMII_MACS 16
25#define MPIPE_NUM_XAUI_MACS 4
26#define MPIPE_NUM_LOOPBACK_CHANNELS 4
27#define MPIPE_NUM_NON_LB_CHANNELS 28
28
29#define MPIPE_NUM_IPKT_BLOCKS 1536
30
31#define MPIPE_NUM_BUCKETS 4160
32
33#define MPIPE_NUM_NOTIF_RINGS 256
34
35#define MPIPE_NUM_NOTIF_GROUPS 32
36
37#define MPIPE_NUM_TLBS_PER_ASID 16
38#define MPIPE_TLB_IDX_WIDTH 4
39
40#define MPIPE_MMIO_NUM_SVC_DOM 32
41
42#endif /* __ARCH_MPIPE_CONSTANTS_H__ */
diff --git a/arch/tile/include/arch/mpipe_def.h b/arch/tile/include/arch/mpipe_def.h
new file mode 100644
index 000000000000..c3d30217fc66
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_def.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17#ifndef __ARCH_MPIPE_DEF_H__
18#define __ARCH_MPIPE_DEF_H__
19#define MPIPE_MMIO_ADDR__REGION_SHIFT 26
20#define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0
21#define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4
22#define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5
23#define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6
24#define MPIPE_BSM_REGION_VAL__VA_SHIFT 7
25#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0
26#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1
27#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2
28#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3
29#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4
30#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5
31#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6
32#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7
33#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0
34#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1
35#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2
36#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3
37#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7
38#define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138
39#endif /* !defined(__ARCH_MPIPE_DEF_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h
new file mode 100644
index 000000000000..f2e9e122818d
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_shm.h
@@ -0,0 +1,509 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17
18#ifndef __ARCH_MPIPE_SHM_H__
19#define __ARCH_MPIPE_SHM_H__
20
21#include <arch/abi.h>
22#include <arch/mpipe_shm_def.h>
23
24#ifndef __ASSEMBLER__
25/**
26 * MPIPE eDMA Descriptor.
27 * The eDMA descriptor is written by software and consumed by hardware. It
28 * is used to specify the location of egress packet data to be sent out of
29 * the chip via one of the packet interfaces.
30 */
31
32__extension__
33typedef union
34{
35 struct
36 {
37 /* Word 0 */
38
39#ifndef __BIG_ENDIAN__
40 /**
41 * Generation number. Used to indicate a valid descriptor in ring. When
42 * a new descriptor is written into the ring, software must toggle this
43 * bit. The net effect is that the GEN bit being written into new
44 * descriptors toggles each time the ring tail pointer wraps.
45 */
46 uint_reg_t gen : 1;
47 /** Reserved. Must be zero. */
48 uint_reg_t r0 : 7;
49 /** Checksum generation enabled for this transfer. */
50 uint_reg_t csum : 1;
51 /**
52 * Nothing to be sent. Used, for example, when software has dropped a
53 * packet but still wishes to return all of the associated buffers.
54 */
55 uint_reg_t ns : 1;
56 /**
57 * Notification interrupt will be delivered when packet has been egressed.
58 */
59 uint_reg_t notif : 1;
60 /**
61 * Boundary indicator. When 1, this transfer includes the EOP for this
62 * command. Must be clear on all but the last descriptor for an egress
63 * packet.
64 */
65 uint_reg_t bound : 1;
66 /** Reserved. Must be zero. */
67 uint_reg_t r1 : 4;
68 /**
69 * Number of bytes to be sent for this descriptor. When zero, no data
70 * will be moved and the buffer descriptor will be ignored. If the
71 * buffer descriptor indicates that it is chained, the low 7 bits of the
72 * VA indicate the offset within the first buffer (e.g. 127 bytes is the
73 * maximum offset into the first buffer). If the size exceeds a single
74 * buffer, subsequent buffer descriptors will be fetched prior to
75 * processing the next eDMA descriptor in the ring.
76 */
77 uint_reg_t xfer_size : 14;
78 /** Reserved. Must be zero. */
79 uint_reg_t r2 : 2;
80 /**
81 * Destination of checksum relative to CSUM_START relative to the first
82 * byte moved by this descriptor. Must be zero if CSUM=0 in this
83 * descriptor. Must be less than XFER_SIZE (e.g. the first byte of the
84 * CSUM_DEST must be within the span of this descriptor).
85 */
86 uint_reg_t csum_dest : 8;
87 /**
88 * Start byte of checksum relative to the first byte moved by this
89 * descriptor. If this is not the first descriptor for the egress
90 * packet, CSUM_START is still relative to the first byte in this
91 * descriptor. Must be zero if CSUM=0 in this descriptor.
92 */
93 uint_reg_t csum_start : 8;
94 /**
95 * Initial value for 16-bit 1's compliment checksum if enabled via CSUM.
96 * Specified in network order. That is, bits[7:0] will be added to the
97 * byte pointed to by CSUM_START and bits[15:8] will be added to the byte
98 * pointed to by CSUM_START+1 (with appropriate 1's compliment carries).
99 * Must be zero if CSUM=0 in this descriptor.
100 */
101 uint_reg_t csum_seed : 16;
102#else /* __BIG_ENDIAN__ */
103 uint_reg_t csum_seed : 16;
104 uint_reg_t csum_start : 8;
105 uint_reg_t csum_dest : 8;
106 uint_reg_t r2 : 2;
107 uint_reg_t xfer_size : 14;
108 uint_reg_t r1 : 4;
109 uint_reg_t bound : 1;
110 uint_reg_t notif : 1;
111 uint_reg_t ns : 1;
112 uint_reg_t csum : 1;
113 uint_reg_t r0 : 7;
114 uint_reg_t gen : 1;
115#endif
116
117 /* Word 1 */
118
119#ifndef __BIG_ENDIAN__
120 /** Virtual address. Must be sign extended by consumer. */
121 int_reg_t va : 42;
122 /** Reserved. */
123 uint_reg_t __reserved_0 : 6;
124 /** Index of the buffer stack to which this buffer belongs. */
125 uint_reg_t stack_idx : 5;
126 /** Reserved. */
127 uint_reg_t __reserved_1 : 3;
128 /**
129 * Instance ID. For devices that support more than one mPIPE instance,
130 * this field indicates the buffer owner. If the INST field does not
131 * match the mPIPE's instance number when a packet is egressed, buffers
132 * with HWB set will be returned to the other mPIPE instance.
133 */
134 uint_reg_t inst : 1;
135 /** Reserved. */
136 uint_reg_t __reserved_2 : 1;
137 /**
138 * Always set to one by hardware in iDMA packet descriptors. For eDMA,
139 * indicates whether the buffer will be released to the buffer stack
140 * manager. When 0, software is responsible for releasing the buffer.
141 */
142 uint_reg_t hwb : 1;
143 /**
144 * Encoded size of buffer. Set by the ingress hardware for iDMA packet
145 * descriptors. For eDMA descriptors, indicates the buffer size if .c
146 * indicates a chained packet. If an eDMA descriptor is not chained and
147 * the .hwb bit is not set, this field is ignored and the size is
148 * specified by the .xfer_size field.
149 * 0 = 128 bytes
150 * 1 = 256 bytes
151 * 2 = 512 bytes
152 * 3 = 1024 bytes
153 * 4 = 1664 bytes
154 * 5 = 4096 bytes
155 * 6 = 10368 bytes
156 * 7 = 16384 bytes
157 */
158 uint_reg_t size : 3;
159 /**
160 * Chaining configuration for the buffer. Indicates that an ingress
161 * packet or egress command is chained across multiple buffers, with each
162 * buffer's size indicated by the .size field.
163 */
164 uint_reg_t c : 2;
165#else /* __BIG_ENDIAN__ */
166 uint_reg_t c : 2;
167 uint_reg_t size : 3;
168 uint_reg_t hwb : 1;
169 uint_reg_t __reserved_2 : 1;
170 uint_reg_t inst : 1;
171 uint_reg_t __reserved_1 : 3;
172 uint_reg_t stack_idx : 5;
173 uint_reg_t __reserved_0 : 6;
174 int_reg_t va : 42;
175#endif
176
177 };
178
179 /** Word access */
180 uint_reg_t words[2];
181} MPIPE_EDMA_DESC_t;
182
183/**
184 * MPIPE Packet Descriptor.
185 * The packet descriptor is filled by the mPIPE's classification,
186 * load-balancing, and buffer management services. Some fields are consumed
187 * by mPIPE hardware, and others are consumed by Tile software.
188 */
189
190__extension__
191typedef union
192{
193 struct
194 {
195 /* Word 0 */
196
197#ifndef __BIG_ENDIAN__
198 /**
199 * Notification ring into which this packet descriptor is written.
200 * Typically written by load balancer, but can be overridden by
201 * classification program if NR is asserted.
202 */
203 uint_reg_t notif_ring : 8;
204 /** Source channel for this packet. Written by mPIPE DMA hardware. */
205 uint_reg_t channel : 5;
206 /** Reserved. */
207 uint_reg_t __reserved_0 : 1;
208 /**
209 * MAC Error.
210 * Generated by the MAC interface. Asserted if there was an overrun of
211 * the MAC's receive FIFO. This condition generally only occurs if the
212 * mPIPE clock is running too slowly.
213 */
214 uint_reg_t me : 1;
215 /**
216 * Truncation Error.
217 * Written by the iDMA hardware. Asserted if packet was truncated due to
218 * insufficient space in iPkt buffer
219 */
220 uint_reg_t tr : 1;
221 /**
222 * Written by the iDMA hardware. Indicates the number of bytes written
223 * to Tile memory. In general, this is the actual size of the packet as
224 * received from the MAC. But if the packet is truncated due to running
225 * out of buffers or due to the iPkt buffer filling up, then the L2_SIZE
226 * will be reduced to reflect the actual number of valid bytes written to
227 * Tile memory.
228 */
229 uint_reg_t l2_size : 14;
230 /**
231 * CRC Error.
232 * Generated by the MAC. Asserted if MAC indicated an L2 CRC error or
233 * other L2 error (bad length etc.) on the packet.
234 */
235 uint_reg_t ce : 1;
236 /**
237 * Cut Through.
238 * Written by the iDMA hardware. Asserted if packet was not completely
239 * received before being sent to classifier. L2_Size will indicate
240 * number of bytes received so far.
241 */
242 uint_reg_t ct : 1;
243 /**
244 * Written by the classification program. Used by the load balancer to
245 * select the ring into which this packet descriptor is written.
246 */
247 uint_reg_t bucket_id : 13;
248 /** Reserved. */
249 uint_reg_t __reserved_1 : 3;
250 /**
251 * Checksum.
252 * Written by classification program. When 1, the checksum engine will
253 * perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES
254 * fields. The result will be placed in CSUM_VAL.
255 */
256 uint_reg_t cs : 1;
257 /**
258 * Notification Ring Select.
259 * Written by the classification program. When 1, the NotifRingIDX is
260 * set by classification program rather than being set by load balancer.
261 */
262 uint_reg_t nr : 1;
263 /**
264 * Written by classification program. Indicates whether packet and
265 * descriptor should both be dropped, both be delivered, or only the
266 * descriptor should be delivered.
267 */
268 uint_reg_t dest : 2;
269 /**
270 * General Purpose Sequence Number Enable.
271 * Written by the classification program. When 1, the GP_SQN_SEL field
272 * contains the sequence number selector and the GP_SQN field will be
273 * replaced with the associated sequence number. When clear, the GP_SQN
274 * field is left intact and be used as "Custom" bytes.
275 */
276 uint_reg_t sq : 1;
277 /**
278 * TimeStamp Enable.
279 * Enable TimeStamp insertion. When clear, timestamp field may be filled
280 * with custom data by classifier. When set, hardware inserts the
281 * timestamp when the start of packet is received from the MAC.
282 */
283 uint_reg_t ts : 1;
284 /**
285 * Packet Sequence Number Enable.
286 * Enable PacketSQN insertion. When clear, PacketSQN field may be filled
287 * with custom data by classifier. When set, hardware inserts the packet
288 * sequence number when the packet descriptor is written to a
289 * notification ring.
290 */
291 uint_reg_t ps : 1;
292 /**
293 * Buffer Error.
294 * Written by the iDMA hardware. Asserted if iDMA ran out of buffers
295 * while writing the packet. Software must still return any buffer
296 * descriptors whose C field indicates a valid descriptor was consumed.
297 */
298 uint_reg_t be : 1;
299 /**
300 * Written by the classification program. The associated counter is
301 * incremented when the packet is sent.
302 */
303 uint_reg_t ctr0 : 5;
304 /** Reserved. */
305 uint_reg_t __reserved_2 : 3;
306#else /* __BIG_ENDIAN__ */
307 uint_reg_t __reserved_2 : 3;
308 uint_reg_t ctr0 : 5;
309 uint_reg_t be : 1;
310 uint_reg_t ps : 1;
311 uint_reg_t ts : 1;
312 uint_reg_t sq : 1;
313 uint_reg_t dest : 2;
314 uint_reg_t nr : 1;
315 uint_reg_t cs : 1;
316 uint_reg_t __reserved_1 : 3;
317 uint_reg_t bucket_id : 13;
318 uint_reg_t ct : 1;
319 uint_reg_t ce : 1;
320 uint_reg_t l2_size : 14;
321 uint_reg_t tr : 1;
322 uint_reg_t me : 1;
323 uint_reg_t __reserved_0 : 1;
324 uint_reg_t channel : 5;
325 uint_reg_t notif_ring : 8;
326#endif
327
328 /* Word 1 */
329
330#ifndef __BIG_ENDIAN__
331 /**
332 * Written by the classification program. The associated counter is
333 * incremented when the packet is sent.
334 */
335 uint_reg_t ctr1 : 5;
336 /** Reserved. */
337 uint_reg_t __reserved_3 : 3;
338 /**
339 * Written by classification program. Indicates the start byte for
340 * checksum. Relative to 1st byte received from MAC.
341 */
342 uint_reg_t csum_start : 8;
343 /**
344 * Checksum seed written by classification program. Overwritten with
345 * resultant checksum if CS bit is asserted. The endianness of the CSUM
346 * value bits when viewed by Tile software match the packet byte order.
347 * That is, bits[7:0] of the resulting checksum value correspond to
348 * earlier (more significant) bytes in the packet. To avoid classifier
349 * software from having to byte swap the CSUM_SEED, the iDMA checksum
350 * engine byte swaps the classifier's result before seeding the checksum
351 * calculation. Thus, the CSUM_START byte of packet data is added to
352 * bits[15:8] of the CSUM_SEED field generated by the classifier. This
353 * byte swap will be visible to Tile software if the CS bit is clear.
354 */
355 uint_reg_t csum_seed_val : 16;
356 /**
357 * Written by the classification program. Not interpreted by mPIPE
358 * hardware.
359 */
360 uint_reg_t custom0 : 32;
361#else /* __BIG_ENDIAN__ */
362 uint_reg_t custom0 : 32;
363 uint_reg_t csum_seed_val : 16;
364 uint_reg_t csum_start : 8;
365 uint_reg_t __reserved_3 : 3;
366 uint_reg_t ctr1 : 5;
367#endif
368
369 /* Word 2 */
370
371#ifndef __BIG_ENDIAN__
372 /**
373 * Written by the classification program. Not interpreted by mPIPE
374 * hardware.
375 */
376 uint_reg_t custom1 : 64;
377#else /* __BIG_ENDIAN__ */
378 uint_reg_t custom1 : 64;
379#endif
380
381 /* Word 3 */
382
383#ifndef __BIG_ENDIAN__
384 /**
385 * Written by the classification program. Not interpreted by mPIPE
386 * hardware.
387 */
388 uint_reg_t custom2 : 64;
389#else /* __BIG_ENDIAN__ */
390 uint_reg_t custom2 : 64;
391#endif
392
393 /* Word 4 */
394
395#ifndef __BIG_ENDIAN__
396 /**
397 * Written by the classification program. Not interpreted by mPIPE
398 * hardware.
399 */
400 uint_reg_t custom3 : 64;
401#else /* __BIG_ENDIAN__ */
402 uint_reg_t custom3 : 64;
403#endif
404
405 /* Word 5 */
406
407#ifndef __BIG_ENDIAN__
408 /**
409 * Sequence number applied when packet is distributed. Classifier
410 * selects which sequence number is to be applied by writing the 13-bit
411 * SQN-selector into this field.
412 */
413 uint_reg_t gp_sqn : 16;
414 /**
415 * Written by notification hardware. The packet sequence number is
416 * incremented for each packet that wasn't dropped.
417 */
418 uint_reg_t packet_sqn : 48;
419#else /* __BIG_ENDIAN__ */
420 uint_reg_t packet_sqn : 48;
421 uint_reg_t gp_sqn : 16;
422#endif
423
424 /* Word 6 */
425
426#ifndef __BIG_ENDIAN__
427 /**
428 * Written by hardware when the start-of-packet is received by the mPIPE
429 * from the MAC. This is the nanoseconds part of the packet timestamp.
430 */
431 uint_reg_t time_stamp_ns : 32;
432 /**
433 * Written by hardware when the start-of-packet is received by the mPIPE
434 * from the MAC. This is the seconds part of the packet timestamp.
435 */
436 uint_reg_t time_stamp_sec : 32;
437#else /* __BIG_ENDIAN__ */
438 uint_reg_t time_stamp_sec : 32;
439 uint_reg_t time_stamp_ns : 32;
440#endif
441
442 /* Word 7 */
443
444#ifndef __BIG_ENDIAN__
445 /** Virtual address. Must be sign extended by consumer. */
446 int_reg_t va : 42;
447 /** Reserved. */
448 uint_reg_t __reserved_4 : 6;
449 /** Index of the buffer stack to which this buffer belongs. */
450 uint_reg_t stack_idx : 5;
451 /** Reserved. */
452 uint_reg_t __reserved_5 : 3;
453 /**
454 * Instance ID. For devices that support more than one mPIPE instance,
455 * this field indicates the buffer owner. If the INST field does not
456 * match the mPIPE's instance number when a packet is egressed, buffers
457 * with HWB set will be returned to the other mPIPE instance.
458 */
459 uint_reg_t inst : 1;
460 /** Reserved. */
461 uint_reg_t __reserved_6 : 1;
462 /**
463 * Always set to one by hardware in iDMA packet descriptors. For eDMA,
464 * indicates whether the buffer will be released to the buffer stack
465 * manager. When 0, software is responsible for releasing the buffer.
466 */
467 uint_reg_t hwb : 1;
468 /**
469 * Encoded size of buffer. Set by the ingress hardware for iDMA packet
470 * descriptors. For eDMA descriptors, indicates the buffer size if .c
471 * indicates a chained packet. If an eDMA descriptor is not chained and
472 * the .hwb bit is not set, this field is ignored and the size is
473 * specified by the .xfer_size field.
474 * 0 = 128 bytes
475 * 1 = 256 bytes
476 * 2 = 512 bytes
477 * 3 = 1024 bytes
478 * 4 = 1664 bytes
479 * 5 = 4096 bytes
480 * 6 = 10368 bytes
481 * 7 = 16384 bytes
482 */
483 uint_reg_t size : 3;
484 /**
485 * Chaining configuration for the buffer. Indicates that an ingress
486 * packet or egress command is chained across multiple buffers, with each
487 * buffer's size indicated by the .size field.
488 */
489 uint_reg_t c : 2;
490#else /* __BIG_ENDIAN__ */
491 uint_reg_t c : 2;
492 uint_reg_t size : 3;
493 uint_reg_t hwb : 1;
494 uint_reg_t __reserved_6 : 1;
495 uint_reg_t inst : 1;
496 uint_reg_t __reserved_5 : 3;
497 uint_reg_t stack_idx : 5;
498 uint_reg_t __reserved_4 : 6;
499 int_reg_t va : 42;
500#endif
501
502 };
503
504 /** Word access */
505 uint_reg_t words[8];
506} MPIPE_PDESC_t;
507#endif /* !defined(__ASSEMBLER__) */
508
509#endif /* !defined(__ARCH_MPIPE_SHM_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm_def.h b/arch/tile/include/arch/mpipe_shm_def.h
new file mode 100644
index 000000000000..6124d39c8318
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_shm_def.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17#ifndef __ARCH_MPIPE_SHM_DEF_H__
18#define __ARCH_MPIPE_SHM_DEF_H__
19#define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0
20#define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1
21#define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2
22#define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3
23#endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
new file mode 100644
index 000000000000..9d50fce1b1a7
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#ifndef __GXIO_MPIPE_LINUX_RPC_H__
17#define __GXIO_MPIPE_LINUX_RPC_H__
18
19#include <hv/iorpc.h>
20
21#include <hv/drv_mpipe_intf.h>
22#include <asm/page.h>
23#include <gxio/kiorpc.h>
24#include <gxio/mpipe.h>
25#include <linux/string.h>
26#include <linux/module.h>
27#include <asm/pgtable.h>
28
29#define GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1200)
30#define GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1201)
31
32#define GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1203)
33#define GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1204)
34#define GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1205)
35#define GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1206)
36#define GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1207)
37#define GXIO_MPIPE_OP_INIT_NOTIF_GROUP IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1208)
38#define GXIO_MPIPE_OP_ALLOC_BUCKETS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1209)
39#define GXIO_MPIPE_OP_INIT_BUCKET IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120a)
40#define GXIO_MPIPE_OP_ALLOC_EDMA_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120b)
41#define GXIO_MPIPE_OP_INIT_EDMA_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x120c)
42
43#define GXIO_MPIPE_OP_COMMIT_RULES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120f)
44#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
45#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
46#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
47
48#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e)
49#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f)
50#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220)
51#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
52#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
53#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
54#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
55
56int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
57 unsigned int count, unsigned int first,
58 unsigned int flags);
59
60int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
61 void *mem_va, size_t mem_size,
62 unsigned int mem_flags, unsigned int stack,
63 unsigned int buffer_size_enum);
64
65
66int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
67 unsigned int count, unsigned int first,
68 unsigned int flags);
69
70int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
71 size_t mem_size, unsigned int mem_flags,
72 unsigned int ring);
73
74int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
75 int inter_x, int inter_y,
76 int inter_ipi, int inter_event,
77 unsigned int ring);
78
79int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
80 unsigned int ring);
81
82int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
83 unsigned int count, unsigned int first,
84 unsigned int flags);
85
86int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
87 unsigned int group,
88 gxio_mpipe_notif_group_bits_t bits);
89
90int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
91 unsigned int first, unsigned int flags);
92
93int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
94 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info);
95
96int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
97 unsigned int count, unsigned int first,
98 unsigned int flags);
99
100int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
101 size_t mem_size, unsigned int mem_flags,
102 unsigned int ring, unsigned int channel);
103
104
105int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
106 size_t blob_size);
107
108int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
109 unsigned int iotlb, HV_PTE pte,
110 unsigned int flags);
111
112int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
113 _gxio_mpipe_link_name_t name, unsigned int flags);
114
115int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
116
117
118int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
119 uint64_t * nsec, uint64_t * cycles);
120
121int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
122 uint64_t nsec, uint64_t cycles);
123
124int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
125 int64_t nsec);
126
127int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
128
129int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
130
131int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base);
132
133int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
134 unsigned long offset, unsigned long size);
135
136#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
new file mode 100644
index 000000000000..0bcf3f71ce8b
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#ifndef __GXIO_MPIPE_INFO_LINUX_RPC_H__
17#define __GXIO_MPIPE_INFO_LINUX_RPC_H__
18
19#include <hv/iorpc.h>
20
21#include <hv/drv_mpipe_intf.h>
22#include <asm/page.h>
23#include <gxio/kiorpc.h>
24#include <gxio/mpipe.h>
25#include <linux/string.h>
26#include <linux/module.h>
27#include <asm/pgtable.h>
28
29
30#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
31#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
32#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
33
34
35int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
36 unsigned int idx,
37 _gxio_mpipe_link_name_t * name,
38 _gxio_mpipe_link_mac_t * mac);
39
40int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
41 HV_PTE *base);
42
43int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
44 unsigned long offset, unsigned long size);
45
46#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
new file mode 100644
index 000000000000..78c598618c97
--- /dev/null
+++ b/arch/tile/include/gxio/mpipe.h
@@ -0,0 +1,1736 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _GXIO_MPIPE_H_
16#define _GXIO_MPIPE_H_
17
18/*
19 *
20 * An API for allocating, configuring, and manipulating mPIPE hardware
21 * resources.
22 */
23
24#include "common.h"
25#include "dma_queue.h"
26
27#include <linux/time.h>
28
29#include <arch/mpipe_def.h>
30#include <arch/mpipe_shm.h>
31
32#include <hv/drv_mpipe_intf.h>
33#include <hv/iorpc.h>
34
35/*
36 *
37 * The TILE-Gx mPIPE&tm; shim provides Ethernet connectivity, packet
38 * classification, and packet load balancing services. The
39 * gxio_mpipe_ API, declared in <gxio/mpipe.h>, allows applications to
40 * allocate mPIPE IO channels, configure packet distribution
41 * parameters, and send and receive Ethernet packets. The API is
42 * designed to be a minimal wrapper around the mPIPE hardware, making
43 * system calls only where necessary to preserve inter-process
44 * protection guarantees.
45 *
46 * The APIs described below allow the programmer to allocate and
47 * configure mPIPE resources. As described below, the mPIPE is a
48 * single shared hardware device that provides partitionable resources
49 * that are shared between all applications in the system. The
50 * gxio_mpipe_ API allows userspace code to make resource request
51 * calls to the hypervisor, which in turns keeps track of the
52 * resources in use by all applications, maintains protection
53 * guarantees, and resets resources upon application shutdown.
54 *
55 * We strongly recommend reading the mPIPE section of the IO Device
56 * Guide (UG404) before working with this API. Most functions in the
57 * gxio_mpipe_ API are directly analogous to hardware interfaces and
58 * the documentation assumes that the reader understands those
59 * hardware interfaces.
60 *
61 * @section mpipe__ingress mPIPE Ingress Hardware Resources
62 *
63 * The mPIPE ingress hardware provides extensive hardware offload for
64 * tasks like packet header parsing, load balancing, and memory
65 * management. This section provides a brief introduction to the
66 * hardware components and the gxio_mpipe_ calls used to manage them;
67 * see the IO Device Guide for a much more detailed description of the
68 * mPIPE's capabilities.
69 *
70 * When a packet arrives at one of the mPIPE's Ethernet MACs, it is
71 * assigned a channel number indicating which MAC received it. It
72 * then proceeds through the following hardware pipeline:
73 *
74 * @subsection mpipe__classification Classification
75 *
76 * A set of classification processors run header parsing code on each
77 * incoming packet, extracting information including the destination
78 * MAC address, VLAN, Ethernet type, and five-tuple hash. Some of
79 * this information is then used to choose which buffer stack will be
80 * used to hold the packet, and which bucket will be used by the load
81 * balancer to determine which application will receive the packet.
82 *
83 * The rules by which the buffer stack and bucket are chosen can be
84 * configured via the @ref gxio_mpipe_classifier API. A given app can
85 * specify multiple rules, each one specifying a bucket range, and a
86 * set of buffer stacks, to be used for packets matching the rule.
87 * Each rule can optionally specify a restricted set of channels,
88 * VLANs, and/or dMACs, in which it is interested. By default, a
89 * given rule starts out matching all channels associated with the
90 * mPIPE context's set of open links; all VLANs; and all dMACs.
91 * Subsequent restrictions can then be added.
92 *
93 * @subsection mpipe__load_balancing Load Balancing
94 *
95 * The mPIPE load balancer is responsible for choosing the NotifRing
96 * to which the packet will be delivered. This decision is based on
97 * the bucket number indicated by the classification program. In
98 * general, the bucket number is based on some number of low bits of
99 * the packet's flow hash (applications that aren't interested in flow
100 * hashing use a single bucket). Each load balancer bucket keeps a
101 * record of the NotifRing to which packets directed to that bucket
102 * are currently being delivered. Based on the bucket's load
103 * balancing mode (@ref gxio_mpipe_bucket_mode_t), the load balancer
104 * either forwards the packet to the previously assigned NotifRing or
105 * decides to choose a new NotifRing. If a new NotifRing is required,
106 * the load balancer chooses the least loaded ring in the NotifGroup
107 * associated with the bucket.
108 *
109 * The load balancer is a shared resource. Each application needs to
110 * explicitly allocate NotifRings, NotifGroups, and buckets, using
111 * gxio_mpipe_alloc_notif_rings(), gxio_mpipe_alloc_notif_groups(),
112 * and gxio_mpipe_alloc_buckets(). Then the application needs to
113 * configure them using gxio_mpipe_init_notif_ring() and
114 * gxio_mpipe_init_notif_group_and_buckets().
115 *
116 * @subsection mpipe__buffers Buffer Selection and Packet Delivery
117 *
118 * Once the load balancer has chosen the destination NotifRing, the
119 * mPIPE DMA engine pops at least one buffer off of the 'buffer stack'
120 * chosen by the classification program and DMAs the packet data into
121 * that buffer. Each buffer stack provides a hardware-accelerated
122 * stack of data buffers with the same size. If the packet data is
123 * larger than the buffers provided by the chosen buffer stack, the
124 * mPIPE hardware pops off multiple buffers and chains the packet data
125 * through a multi-buffer linked list. Once the packet data is
126 * delivered to the buffer(s), the mPIPE hardware writes the
127 * ::gxio_mpipe_idesc_t metadata object (calculated by the classifier)
128 * into the NotifRing and increments the number of packets delivered
129 * to that ring.
130 *
131 * Applications can push buffers onto a buffer stack by calling
132 * gxio_mpipe_push_buffer() or by egressing a packet with the
133 * ::gxio_mpipe_edesc_t::hwb bit set, indicating that the egressed
134 * buffers should be returned to the stack.
135 *
136 * Applications can allocate and initialize buffer stacks with the
137 * gxio_mpipe_alloc_buffer_stacks() and gxio_mpipe_init_buffer_stack()
138 * APIs.
139 *
140 * The application must also register the memory pages that will hold
141 * packets. This requires calling gxio_mpipe_register_page() for each
142 * memory page that will hold packets allocated by the application for
143 * a given buffer stack. Since each buffer stack is limited to 16
144 * registered pages, it may be necessary to use huge pages, or even
145 * extremely huge pages, to hold all the buffers.
146 *
147 * @subsection mpipe__iqueue NotifRings
148 *
149 * Each NotifRing is a region of shared memory, allocated by the
150 * application, to which the mPIPE delivers packet descriptors
151 * (::gxio_mpipe_idesc_t). The application can allocate them via
152 * gxio_mpipe_alloc_notif_rings(). The application can then either
153 * explicitly initialize them with gxio_mpipe_init_notif_ring() and
154 * then read from them manually, or can make use of the convenience
155 * wrappers provided by @ref gxio_mpipe_wrappers.
156 *
157 * @section mpipe__egress mPIPE Egress Hardware
158 *
159 * Applications use eDMA rings to queue packets for egress. The
160 * application can allocate them via gxio_mpipe_alloc_edma_rings().
161 * The application can then either explicitly initialize them with
162 * gxio_mpipe_init_edma_ring() and then write to them manually, or
163 * can make use of the convenience wrappers provided by
164 * @ref gxio_mpipe_wrappers.
165 *
166 * @section gxio__shortcomings Plans for Future API Revisions
167 *
168 * The API defined here is only an initial version of the mPIPE API.
169 * Future plans include:
170 *
171 * - Higher level wrapper functions to provide common initialization
172 * patterns. This should help users start writing mPIPE programs
173 * without having to learn the details of the hardware.
174 *
175 * - Support for reset and deallocation of resources, including
176 * cleanup upon application shutdown.
177 *
178 * - Support for calling these APIs in the BME.
179 *
180 * - Support for IO interrupts.
181 *
182 * - Clearer definitions of thread safety guarantees.
183 *
184 * @section gxio__mpipe_examples Examples
185 *
186 * See the following mPIPE example programs for more information about
187 * allocating mPIPE resources and using them in real applications:
188 *
189 * - @ref mpipe/ingress/app.c : Receiving packets.
190 *
191 * - @ref mpipe/forward/app.c : Forwarding packets.
192 *
193 * Note that there are several more examples.
194 */
195
196/* Flags that can be passed to resource allocation functions. */
197enum gxio_mpipe_alloc_flags_e {
198 /* Require an allocation to start at a specified resource index. */
199 GXIO_MPIPE_ALLOC_FIXED = HV_MPIPE_ALLOC_FIXED,
200};
201
202/* Flags that can be passed to memory registration functions. */
203enum gxio_mpipe_mem_flags_e {
204 /* Do not fill L3 when writing, and invalidate lines upon egress. */
205 GXIO_MPIPE_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT,
206
207 /* L3 cache fills should only populate IO cache ways. */
208 GXIO_MPIPE_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN,
209};
210
211/* An ingress packet descriptor. When a packet arrives, the mPIPE
212 * hardware generates this structure and writes it into a NotifRing.
213 */
214typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
215
216/* An egress command descriptor. Applications write this structure
217 * into eDMA rings and the hardware performs the indicated operation
218 * (normally involving egressing some bytes). Note that egressing a
219 * single packet may involve multiple egress command descriptors.
220 */
221typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
222
223/* Get the "va" field from an "idesc".
224 *
225 * This is the address at which the ingress hardware copied the first
226 * byte of the packet.
227 *
228 * If the classifier detected a custom header, then this will point to
229 * the custom header, and gxio_mpipe_idesc_get_l2_start() will point
230 * to the actual L2 header.
231 *
232 * Note that this value may be misleading if "idesc->be" is set.
233 *
234 * @param idesc An ingress packet descriptor.
235 */
236static inline unsigned char *gxio_mpipe_idesc_get_va(gxio_mpipe_idesc_t *idesc)
237{
238 return (unsigned char *)(long)idesc->va;
239}
240
241/* Get the "xfer_size" from an "idesc".
242 *
243 * This is the actual number of packet bytes transferred into memory
244 * by the hardware.
245 *
246 * Note that this value may be misleading if "idesc->be" is set.
247 *
248 * @param idesc An ingress packet descriptor.
249 *
250 * ISSUE: Is this the best name for this?
251 * FIXME: Add more docs about chaining, clipping, etc.
252 */
253static inline unsigned int gxio_mpipe_idesc_get_xfer_size(gxio_mpipe_idesc_t
254 *idesc)
255{
256 return idesc->l2_size;
257}
258
259/* Get the "l2_offset" from an "idesc".
260 *
261 * Extremely customized classifiers might not support this function.
262 *
263 * This is the number of bytes between the "va" and the L2 header.
264 *
265 * The L2 header consists of a destination mac address, a source mac
266 * address, and an initial ethertype. Various initial ethertypes
267 * allow encoding extra information in the L2 header, often including
268 * a vlan, and/or a new ethertype.
269 *
270 * Note that the "l2_offset" will be non-zero if (and only if) the
271 * classifier processed a custom header for the packet.
272 *
273 * @param idesc An ingress packet descriptor.
274 */
275static inline uint8_t gxio_mpipe_idesc_get_l2_offset(gxio_mpipe_idesc_t *idesc)
276{
277 return (idesc->custom1 >> 32) & 0xFF;
278}
279
280/* Get the "l2_start" from an "idesc".
281 *
282 * This is simply gxio_mpipe_idesc_get_va() plus
283 * gxio_mpipe_idesc_get_l2_offset().
284 *
285 * @param idesc An ingress packet descriptor.
286 */
287static inline unsigned char *gxio_mpipe_idesc_get_l2_start(gxio_mpipe_idesc_t
288 *idesc)
289{
290 unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
291 return va + gxio_mpipe_idesc_get_l2_offset(idesc);
292}
293
294/* Get the "l2_length" from an "idesc".
295 *
296 * This is simply gxio_mpipe_idesc_get_xfer_size() minus
297 * gxio_mpipe_idesc_get_l2_offset().
298 *
299 * @param idesc An ingress packet descriptor.
300 */
301static inline unsigned int gxio_mpipe_idesc_get_l2_length(gxio_mpipe_idesc_t
302 *idesc)
303{
304 unsigned int xfer_size = idesc->l2_size;
305 return xfer_size - gxio_mpipe_idesc_get_l2_offset(idesc);
306}
307
308/* A context object used to manage mPIPE hardware resources. */
309typedef struct {
310
311 /* File descriptor for calling up to Linux (and thus the HV). */
312 int fd;
313
314 /* The VA at which configuration registers are mapped. */
315 char *mmio_cfg_base;
316
317 /* The VA at which IDMA, EDMA, and buffer manager are mapped. */
318 char *mmio_fast_base;
319
320 /* The "initialized" buffer stacks. */
321 gxio_mpipe_rules_stacks_t __stacks;
322
323} gxio_mpipe_context_t;
324
325/* This is only used internally, but it's most easily made visible here. */
326typedef gxio_mpipe_context_t gxio_mpipe_info_context_t;
327
328/* Initialize an mPIPE context.
329 *
330 * This function allocates an mPIPE "service domain" and maps the MMIO
331 * registers into the caller's VA space.
332 *
333 * @param context Context object to be initialized.
334 * @param mpipe_instance Instance number of mPIPE shim to be controlled via
335 * context.
336 */
337extern int gxio_mpipe_init(gxio_mpipe_context_t *context,
338 unsigned int mpipe_instance);
339
340/* Destroy an mPIPE context.
341 *
342 * This function frees the mPIPE "service domain" and unmaps the MMIO
343 * registers from the caller's VA space.
344 *
345 * If a user process exits without calling this routine, the kernel
346 * will destroy the mPIPE context as part of process teardown.
347 *
348 * @param context Context object to be destroyed.
349 */
350extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context);
351
352/*****************************************************************
353 * Buffer Stacks *
354 ******************************************************************/
355
356/* Allocate a set of buffer stacks.
357 *
358 * The return value is NOT interesting if count is zero.
359 *
360 * @param context An initialized mPIPE context.
361 * @param count Number of stacks required.
362 * @param first Index of first stack if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
363 * otherwise ignored.
364 * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
365 * @return Index of first allocated buffer stack, or
366 * ::GXIO_MPIPE_ERR_NO_BUFFER_STACK if allocation failed.
367 */
368extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
369 unsigned int count,
370 unsigned int first,
371 unsigned int flags);
372
373/* Enum codes for buffer sizes supported by mPIPE. */
374typedef enum {
375 /* 128 byte packet data buffer. */
376 GXIO_MPIPE_BUFFER_SIZE_128 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128,
377 /* 256 byte packet data buffer. */
378 GXIO_MPIPE_BUFFER_SIZE_256 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256,
379 /* 512 byte packet data buffer. */
380 GXIO_MPIPE_BUFFER_SIZE_512 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512,
381 /* 1024 byte packet data buffer. */
382 GXIO_MPIPE_BUFFER_SIZE_1024 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024,
383 /* 1664 byte packet data buffer. */
384 GXIO_MPIPE_BUFFER_SIZE_1664 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664,
385 /* 4096 byte packet data buffer. */
386 GXIO_MPIPE_BUFFER_SIZE_4096 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096,
387 /* 10368 byte packet data buffer. */
388 GXIO_MPIPE_BUFFER_SIZE_10368 =
389 MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368,
390 /* 16384 byte packet data buffer. */
391 GXIO_MPIPE_BUFFER_SIZE_16384 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384
392} gxio_mpipe_buffer_size_enum_t;
393
394/* Convert a buffer size in bytes into a buffer size enum. */
395extern gxio_mpipe_buffer_size_enum_t
396gxio_mpipe_buffer_size_to_buffer_size_enum(size_t size);
397
398/* Convert a buffer size enum into a buffer size in bytes. */
399extern size_t
400gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
401 buffer_size_enum);
402
403/* Calculate the number of bytes required to store a given number of
404 * buffers in the memory registered with a buffer stack via
405 * gxio_mpipe_init_buffer_stack().
406 */
407extern size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers);
408
409/* Initialize a buffer stack. This function binds a region of memory
410 * to be used by the hardware for storing buffer addresses pushed via
411 * gxio_mpipe_push_buffer() or as the result of sending a buffer out
412 * the egress with the 'push to stack when done' bit set. Once this
413 * function returns, the memory region's contents may be arbitrarily
414 * modified by the hardware at any time and software should not access
415 * the memory region again.
416 *
417 * @param context An initialized mPIPE context.
418 * @param stack The buffer stack index.
419 * @param buffer_size_enum The size of each buffer in the buffer stack,
420 * as an enum.
421 * @param mem The address of the buffer stack. This memory must be
422 * physically contiguous and aligned to a 64kB boundary.
423 * @param mem_size The size of the buffer stack, in bytes.
424 * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
425 * @return Zero on success, ::GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE if
426 * buffer_size_enum is invalid, ::GXIO_MPIPE_ERR_BAD_BUFFER_STACK if
427 * stack has not been allocated.
428 */
429extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
430 unsigned int stack,
431 gxio_mpipe_buffer_size_enum_t
432 buffer_size_enum, void *mem,
433 size_t mem_size,
434 unsigned int mem_flags);
435
436/* Push a buffer onto a previously initialized buffer stack.
437 *
438 * The size of the buffer being pushed must match the size that was
439 * registered with gxio_mpipe_init_buffer_stack(). All packet buffer
440 * addresses are 128-byte aligned; the low 7 bits of the specified
441 * buffer address will be ignored.
442 *
443 * @param context An initialized mPIPE context.
444 * @param stack The buffer stack index.
445 * @param buffer The buffer (the low seven bits are ignored).
446 */
447static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context,
448 unsigned int stack, void *buffer)
449{
450 MPIPE_BSM_REGION_ADDR_t offset = { {0} };
451 MPIPE_BSM_REGION_VAL_t val = { {0} };
452
453 /*
454 * The mmio_fast_base region starts at the IDMA region, so subtract
455 * off that initial offset.
456 */
457 offset.region =
458 MPIPE_MMIO_ADDR__REGION_VAL_BSM -
459 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
460 offset.stack = stack;
461
462#if __SIZEOF_POINTER__ == 4
463 val.va = ((ulong) buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
464#else
465 val.va = ((long)buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
466#endif
467
468 __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
469}
470
471/* Pop a buffer off of a previously initialized buffer stack.
472 *
473 * @param context An initialized mPIPE context.
474 * @param stack The buffer stack index.
475 * @return The buffer, or NULL if the stack is empty.
476 */
477static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context,
478 unsigned int stack)
479{
480 MPIPE_BSM_REGION_ADDR_t offset = { {0} };
481
482 /*
483 * The mmio_fast_base region starts at the IDMA region, so subtract
484 * off that initial offset.
485 */
486 offset.region =
487 MPIPE_MMIO_ADDR__REGION_VAL_BSM -
488 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
489 offset.stack = stack;
490
491 while (1) {
492 /*
493 * Case 1: val.c == ..._UNCHAINED, va is non-zero.
494 * Case 2: val.c == ..._INVALID, va is zero.
495 * Case 3: val.c == ..._NOT_RDY, va is zero.
496 */
497 MPIPE_BSM_REGION_VAL_t val;
498 val.word =
499 __gxio_mmio_read(context->mmio_fast_base +
500 offset.word);
501
502 /*
503 * Handle case 1 and 2 by returning the buffer (or NULL).
504 * Handle case 3 by waiting for the prefetch buffer to refill.
505 */
506 if (val.c != MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY)
507 return (void *)((unsigned long)val.
508 va << MPIPE_BSM_REGION_VAL__VA_SHIFT);
509 }
510}
511
512/*****************************************************************
513 * NotifRings *
514 ******************************************************************/
515
516/* Allocate a set of NotifRings.
517 *
518 * The return value is NOT interesting if count is zero.
519 *
520 * Note that NotifRings are allocated in chunks, so allocating one at
521 * a time is much less efficient than allocating several at once.
522 *
523 * @param context An initialized mPIPE context.
524 * @param count Number of NotifRings required.
525 * @param first Index of first NotifRing if ::GXIO_MPIPE_ALLOC_FIXED flag
526 * is set, otherwise ignored.
527 * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
528 * @return Index of first allocated buffer NotifRing, or
529 * ::GXIO_MPIPE_ERR_NO_NOTIF_RING if allocation failed.
530 */
531extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
532 unsigned int count, unsigned int first,
533 unsigned int flags);
534
535/* Initialize a NotifRing, using the given memory and size.
536 *
537 * @param context An initialized mPIPE context.
538 * @param ring The NotifRing index.
539 * @param mem A physically contiguous region of memory to be filled
540 * with a ring of ::gxio_mpipe_idesc_t structures.
541 * @param mem_size Number of bytes in the ring. Must be 128, 512,
542 * 2048, or 65536 * sizeof(gxio_mpipe_idesc_t).
543 * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
544 *
545 * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_NOTIF_RING or
546 * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
547 */
548extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
549 unsigned int ring,
550 void *mem, size_t mem_size,
551 unsigned int mem_flags);
552
553/* Configure an interrupt to be sent to a tile on incoming NotifRing
554 * traffic. Once an interrupt is sent for a particular ring, no more
555 * will be sent until gxio_mica_enable_notif_ring_interrupt() is called.
556 *
557 * @param context An initialized mPIPE context.
558 * @param x X coordinate of interrupt target tile.
559 * @param y Y coordinate of interrupt target tile.
560 * @param i Index of the IPI register which will receive the interrupt.
561 * @param e Specific event which will be set in the target IPI register when
562 * the interrupt occurs.
563 * @param ring The NotifRing index.
564 * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
565 */
566extern int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t
567 *context, int x, int y,
568 int i, int e,
569 unsigned int ring);
570
571/* Enable an interrupt on incoming NotifRing traffic.
572 *
573 * @param context An initialized mPIPE context.
574 * @param ring The NotifRing index.
575 * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
576 */
577extern int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t
578 *context, unsigned int ring);
579
580/* Map all of a client's memory via the given IOTLB.
581 * @param context An initialized mPIPE context.
582 * @param iotlb IOTLB index.
583 * @param pte Page table entry.
584 * @param flags Flags.
585 * @return Zero on success, or a negative error code.
586 */
587extern int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
588 unsigned int iotlb, HV_PTE pte,
589 unsigned int flags);
590
591/*****************************************************************
592 * Notif Groups *
593 ******************************************************************/
594
595/* Allocate a set of NotifGroups.
596 *
597 * The return value is NOT interesting if count is zero.
598 *
599 * @param context An initialized mPIPE context.
600 * @param count Number of NotifGroups required.
601 * @param first Index of first NotifGroup if ::GXIO_MPIPE_ALLOC_FIXED flag
602 * is set, otherwise ignored.
603 * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
604 * @return Index of first allocated buffer NotifGroup, or
605 * ::GXIO_MPIPE_ERR_NO_NOTIF_GROUP if allocation failed.
606 */
607extern int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
608 unsigned int count,
609 unsigned int first,
610 unsigned int flags);
611
612/* Add a NotifRing to a NotifGroup. This only sets a bit in the
613 * application's 'group' object; the hardware NotifGroup can be
614 * initialized by passing 'group' to gxio_mpipe_init_notif_group() or
615 * gxio_mpipe_init_notif_group_and_buckets().
616 */
617static inline void
618gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring)
619{
620 bits->ring_mask[ring / 64] |= (1ull << (ring % 64));
621}
622
623/* Set a particular NotifGroup bitmask. Since the load balancer
624 * makes decisions based on both bucket and NotifGroup state, most
625 * applications should use gxio_mpipe_init_notif_group_and_buckets()
626 * rather than using this function to configure just a NotifGroup.
627 */
628extern int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
629 unsigned int group,
630 gxio_mpipe_notif_group_bits_t bits);
631
632/*****************************************************************
633 * Load Balancer *
634 ******************************************************************/
635
636/* Allocate a set of load balancer buckets.
637 *
638 * The return value is NOT interesting if count is zero.
639 *
640 * Note that buckets are allocated in chunks, so allocating one at
641 * a time is much less efficient than allocating several at once.
642 *
643 * Note that the buckets are actually divided into two sub-ranges, of
644 * different sizes, and different chunk sizes, and the range you get
645 * by default is determined by the size of the request. Allocations
646 * cannot span the two sub-ranges.
647 *
648 * @param context An initialized mPIPE context.
649 * @param count Number of buckets required.
650 * @param first Index of first bucket if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
651 * otherwise ignored.
652 * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
653 * @return Index of first allocated buffer bucket, or
654 * ::GXIO_MPIPE_ERR_NO_BUCKET if allocation failed.
655 */
656extern int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context,
657 unsigned int count, unsigned int first,
658 unsigned int flags);
659
660/* The legal modes for gxio_mpipe_bucket_info_t and
661 * gxio_mpipe_init_notif_group_and_buckets().
662 *
663 * All modes except ::GXIO_MPIPE_BUCKET_ROUND_ROBIN expect that the user
664 * will allocate a power-of-two number of buckets and initialize them
665 * to the same mode. The classifier program then uses the appropriate
666 * number of low bits from the incoming packet's flow hash to choose a
667 * load balancer bucket. Based on that bucket's load balancing mode,
668 * reference count, and currently active NotifRing, the load balancer
669 * chooses the NotifRing to which the packet will be delivered.
670 */
671typedef enum {
672 /* All packets for a bucket go to the same NotifRing unless the
673 * NotifRing gets full, in which case packets will be dropped. If
674 * the bucket reference count ever reaches zero, a new NotifRing may
675 * be chosen.
676 */
677 GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY =
678 MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA,
679
680 /* All packets for a bucket always go to the same NotifRing.
681 */
682 GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY =
683 MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED,
684
685 /* All packets for a bucket go to the least full NotifRing in the
686 * group, providing load balancing round robin behavior.
687 */
688 GXIO_MPIPE_BUCKET_ROUND_ROBIN =
689 MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK,
690
691 /* All packets for a bucket go to the same NotifRing unless the
692 * NotifRing gets full, at which point the bucket starts using the
693 * least full NotifRing in the group. If all NotifRings in the
694 * group are full, packets will be dropped.
695 */
696 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY =
697 MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY,
698
699 /* All packets for a bucket go to the same NotifRing unless the
700 * NotifRing gets full, or a random timer fires, at which point the
701 * bucket starts using the least full NotifRing in the group. If
702 * all NotifRings in the group are full, packets will be dropped.
703 * WARNING: This mode is BROKEN on chips with fewer than 64 tiles.
704 */
705 GXIO_MPIPE_BUCKET_PREFER_FLOW_LOCALITY =
706 MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND,
707
708} gxio_mpipe_bucket_mode_t;
709
710/* Copy a set of bucket initialization values into the mPIPE
711 * hardware. Since the load balancer makes decisions based on both
712 * bucket and NotifGroup state, most applications should use
713 * gxio_mpipe_init_notif_group_and_buckets() rather than using this
714 * function to configure a single bucket.
715 *
716 * @param context An initialized mPIPE context.
717 * @param bucket Bucket index to be initialized.
718 * @param bucket_info Initial reference count, NotifRing index, and mode.
719 * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET on failure.
720 */
721extern int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context,
722 unsigned int bucket,
723 gxio_mpipe_bucket_info_t bucket_info);
724
725/* Initializes a group and range of buckets and range of rings such
726 * that the load balancer runs a particular load balancing function.
727 *
728 * First, the group is initialized with the given rings.
729 *
730 * Second, each bucket is initialized with the mode and group, and a
731 * ring chosen round-robin from the given rings.
732 *
733 * Normally, the classifier picks a bucket, and then the load balancer
734 * picks a ring, based on the bucket's mode, group, and current ring,
735 * possibly updating the bucket's ring.
736 *
737 * @param context An initialized mPIPE context.
738 * @param group The group.
739 * @param ring The first ring.
740 * @param num_rings The number of rings.
741 * @param bucket The first bucket.
742 * @param num_buckets The number of buckets.
743 * @param mode The load balancing mode.
744 *
745 * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET,
746 * ::GXIO_MPIPE_ERR_BAD_NOTIF_GROUP, or
747 * ::GXIO_MPIPE_ERR_BAD_NOTIF_RING on failure.
748 */
749extern int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t
750 *context,
751 unsigned int group,
752 unsigned int ring,
753 unsigned int num_rings,
754 unsigned int bucket,
755 unsigned int num_buckets,
756 gxio_mpipe_bucket_mode_t
757 mode);
758
759/* Return credits to a NotifRing and/or bucket.
760 *
761 * @param context An initialized mPIPE context.
762 * @param ring The NotifRing index, or -1.
763 * @param bucket The bucket, or -1.
764 * @param count The number of credits to return.
765 */
766static inline void gxio_mpipe_credit(gxio_mpipe_context_t *context,
767 int ring, int bucket, unsigned int count)
768{
769 /* NOTE: Fancy struct initialization would break "C89" header test. */
770
771 MPIPE_IDMA_RELEASE_REGION_ADDR_t offset = { {0} };
772 MPIPE_IDMA_RELEASE_REGION_VAL_t val = { {0} };
773
774 /*
775 * The mmio_fast_base region starts at the IDMA region, so subtract
776 * off that initial offset.
777 */
778 offset.region =
779 MPIPE_MMIO_ADDR__REGION_VAL_IDMA -
780 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
781 offset.ring = ring;
782 offset.bucket = bucket;
783 offset.ring_enable = (ring >= 0);
784 offset.bucket_enable = (bucket >= 0);
785 val.count = count;
786
787 __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
788}
789
790/*****************************************************************
791 * Egress Rings *
792 ******************************************************************/
793
794/* Allocate a set of eDMA rings.
795 *
796 * The return value is NOT interesting if count is zero.
797 *
798 * @param context An initialized mPIPE context.
799 * @param count Number of eDMA rings required.
800 * @param first Index of first eDMA ring if ::GXIO_MPIPE_ALLOC_FIXED flag
801 * is set, otherwise ignored.
802 * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
803 * @return Index of first allocated buffer eDMA ring, or
804 * ::GXIO_MPIPE_ERR_NO_EDMA_RING if allocation failed.
805 */
806extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
807 unsigned int count, unsigned int first,
808 unsigned int flags);
809
810/* Initialize an eDMA ring, using the given memory and size.
811 *
812 * @param context An initialized mPIPE context.
813 * @param ring The eDMA ring index.
814 * @param channel The channel to use. This must be one of the channels
815 * associated with the context's set of open links.
816 * @param mem A physically contiguous region of memory to be filled
817 * with a ring of ::gxio_mpipe_edesc_t structures.
818 * @param mem_size Number of bytes in the ring. Must be 512, 2048,
819 * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
820 * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
821 *
822 * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
823 * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
824 */
825extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
826 unsigned int ring, unsigned int channel,
827 void *mem, size_t mem_size,
828 unsigned int mem_flags);
829
830/*****************************************************************
831 * Classifier Program *
832 ******************************************************************/
833
834/*
835 *
836 * Functions for loading or configuring the mPIPE classifier program.
837 *
838 * The mPIPE classification processors all run a special "classifier"
839 * program which, for each incoming packet, parses the packet headers,
840 * encodes some packet metadata in the "idesc", and either drops the
841 * packet, or picks a notif ring to handle the packet, and a buffer
842 * stack to contain the packet, usually based on the channel, VLAN,
843 * dMAC, flow hash, and packet size, under the guidance of the "rules"
844 * API described below.
845 *
846 * @section gxio_mpipe_classifier_default Default Classifier
847 *
848 * The MDE provides a simple "default" classifier program. It is
849 * shipped as source in "$TILERA_ROOT/src/sys/mpipe/classifier.c",
850 * which serves as its official documentation. It is shipped as a
851 * binary program in "$TILERA_ROOT/tile/boot/classifier", which is
852 * automatically included in bootroms created by "tile-monitor", and
853 * is automatically loaded by the hypervisor at boot time.
854 *
855 * The L2 analysis handles LLC packets, SNAP packets, and "VLAN
856 * wrappers" (keeping the outer VLAN).
857 *
858 * The L3 analysis handles IPv4 and IPv6, dropping packets with bad
859 * IPv4 header checksums, requesting computation of a TCP/UDP checksum
860 * if appropriate, and hashing the dest and src IP addresses, plus the
861 * ports for TCP/UDP packets, into the flow hash. No special analysis
862 * is done for "fragmented" packets or "tunneling" protocols. Thus,
863 * the first fragment of a fragmented TCP/UDP packet is hashed using
864 * src/dest IP address and ports and all subsequent fragments are only
865 * hashed according to src/dest IP address.
866 *
867 * The L3 analysis handles other packets too, hashing the dMAC
868 * smac into a flow hash.
869 *
870 * The channel, VLAN, and dMAC used to pick a "rule" (see the
871 * "rules" APIs below), which in turn is used to pick a buffer stack
872 * (based on the packet size) and a bucket (based on the flow hash).
873 *
874 * To receive traffic matching a particular (channel/VLAN/dMAC
875 * pattern, an application should allocate its own buffer stacks and
876 * load balancer buckets, and map traffic to those stacks and buckets,
877 * as decribed by the "rules" API below.
878 *
879 * Various packet metadata is encoded in the idesc. The flow hash is
880 * four bytes at 0x0C. The VLAN is two bytes at 0x10. The ethtype is
881 * two bytes at 0x12. The l3 start is one byte at 0x14. The l4 start
882 * is one byte at 0x15 for IPv4 and IPv6 packets, and otherwise zero.
883 * The protocol is one byte at 0x16 for IPv4 and IPv6 packets, and
884 * otherwise zero.
885 *
886 * @section gxio_mpipe_classifier_custom Custom Classifiers.
887 *
888 * A custom classifier may be created using "tile-mpipe-cc" with a
889 * customized version of the default classifier sources.
890 *
891 * The custom classifier may be included in bootroms using the
892 * "--classifier" option to "tile-monitor", or loaded dynamically
893 * using gxio_mpipe_classifier_load_from_file().
894 *
895 * Be aware that "extreme" customizations may break the assumptions of
896 * the "rules" APIs described below, but simple customizations, such
897 * as adding new packet metadata, should be fine.
898 */
899
900/* A set of classifier rules, plus a context. */
901typedef struct {
902
903 /* The context. */
904 gxio_mpipe_context_t *context;
905
906 /* The actual rules. */
907 gxio_mpipe_rules_list_t list;
908
909} gxio_mpipe_rules_t;
910
911/* Initialize a classifier program rules list.
912 *
913 * This function can be called on a previously initialized rules list
914 * to discard any previously added rules.
915 *
916 * @param rules Rules list to initialize.
917 * @param context An initialized mPIPE context.
918 */
919extern void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
920 gxio_mpipe_context_t *context);
921
922/* Begin a new rule on the indicated rules list.
923 *
924 * Note that an empty rule matches all packets, but an empty rule list
925 * matches no packets.
926 *
927 * @param rules Rules list to which new rule is appended.
928 * @param bucket First load balancer bucket to which packets will be
929 * delivered.
930 * @param num_buckets Number of buckets (must be a power of two) across
931 * which packets will be distributed based on the "flow hash".
932 * @param stacks Either NULL, to assign each packet to the smallest
933 * initialized buffer stack which does not induce chaining (and to
934 * drop packets which exceed the largest initialized buffer stack
935 * buffer size), or an array, with each entry indicating which buffer
936 * stack should be used for packets up to that size (with 255
937 * indicating that those packets should be dropped).
938 * @return 0 on success, or a negative error code on failure.
939 */
940extern int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
941 unsigned int bucket,
942 unsigned int num_buckets,
943 gxio_mpipe_rules_stacks_t *stacks);
944
945/* Set the headroom of the current rule.
946 *
947 * @param rules Rules list whose current rule will be modified.
948 * @param headroom The headroom.
949 * @return 0 on success, or a negative error code on failure.
950 */
951extern int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules,
952 uint8_t headroom);
953
954/* Indicate that packets from a particular channel can be delivered
955 * to the buckets and buffer stacks associated with the current rule.
956 *
957 * Channels added must be associated with links opened by the mPIPE context
958 * used in gxio_mpipe_rules_init(). A rule with no channels is equivalent
959 * to a rule naming all such associated channels.
960 *
961 * @param rules Rules list whose current rule will be modified.
962 * @param channel The channel to add.
963 * @return 0 on success, or a negative error code on failure.
964 */
965extern int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
966 unsigned int channel);
967
968/* Commit rules.
969 *
970 * The rules are sent to the hypervisor, where they are combined with
971 * the rules from other apps, and used to program the hardware classifier.
972 *
973 * Note that if this function returns an error, then the rules will NOT
974 * have been committed, even if the error is due to interactions with
975 * rules from another app.
976 *
977 * @param rules Rules list to commit.
978 * @return 0 on success, or a negative error code on failure.
979 */
980extern int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules);
981
982/*****************************************************************
983 * Ingress Queue Wrapper *
984 ******************************************************************/
985
986/*
987 *
988 * Convenience functions for receiving packets from a NotifRing and
989 * sending packets via an eDMA ring.
990 *
991 * The mpipe ingress and egress hardware uses shared memory packet
992 * descriptors to describe packets that have arrived on ingress or
993 * are destined for egress. These descriptors are stored in shared
994 * memory ring buffers and written or read by hardware as necessary.
995 * The gxio library provides wrapper functions that manage the head and
996 * tail pointers for these rings, allowing the user to easily read or
997 * write packet descriptors.
998 *
999 * The initialization interface for ingress and egress rings is quite
1000 * similar. For example, to create an ingress queue, the user passes
1001 * a ::gxio_mpipe_iqueue_t state object, a ring number from
1002 * gxio_mpipe_alloc_notif_rings(), and the address of memory to hold a
1003 * ring buffer to the gxio_mpipe_iqueue_init() function. The function
1004 * returns success when the state object has been initialized and the
1005 * hardware configured to deliver packets to the specified ring
1006 * buffer. Similarly, gxio_mpipe_equeue_init() takes a
1007 * ::gxio_mpipe_equeue_t state object, a ring number from
1008 * gxio_mpipe_alloc_edma_rings(), and a shared memory buffer.
1009 *
1010 * @section gxio_mpipe_iqueue Working with Ingress Queues
1011 *
1012 * Once initialized, the gxio_mpipe_iqueue_t API provides two flows
1013 * for getting the ::gxio_mpipe_idesc_t packet descriptor associated
1014 * with incoming packets. The simplest is to call
1015 * gxio_mpipe_iqueue_get() or gxio_mpipe_iqueue_try_get(). These
1016 * functions copy the oldest packet descriptor out of the NotifRing and
1017 * into a descriptor provided by the caller. They also immediately
1018 * inform the hardware that a descriptor has been processed.
1019 *
1020 * For applications with stringent performance requirements, higher
1021 * efficiency can be achieved by avoiding the packet descriptor copy
1022 * and processing multiple descriptors at once. The
1023 * gxio_mpipe_iqueue_peek() and gxio_mpipe_iqueue_try_peek() functions
1024 * allow such optimizations. These functions provide a pointer to the
1025 * next valid ingress descriptor in the NotifRing's shared memory ring
1026 * buffer, and a count of how many contiguous descriptors are ready to
1027 * be processed. The application can then process any number of those
1028 * descriptors in place, calling gxio_mpipe_iqueue_consume() to inform
1029 * the hardware after each one has been processed.
1030 *
1031 * @section gxio_mpipe_equeue Working with Egress Queues
1032 *
1033 * Similarly, the egress queue API provides a high-performance
1034 * interface plus a simple wrapper for use in posting
1035 * ::gxio_mpipe_edesc_t egress packet descriptors. The simple
1036 * version, gxio_mpipe_equeue_put(), allows the programmer to wait for
1037 * an eDMA ring slot to become available and write a single descriptor
1038 * into the ring.
1039 *
1040 * Alternatively, you can reserve slots in the eDMA ring using
1041 * gxio_mpipe_equeue_reserve() or gxio_mpipe_equeue_try_reserve(), and
1042 * then fill in each slot using gxio_mpipe_equeue_put_at(). This
1043 * capability can be used to amortize the cost of reserving slots
1044 * across several packets. It also allows gather operations to be
1045 * performed on a shared equeue, by ensuring that the edescs for all
1046 * the fragments are all contiguous in the eDMA ring.
1047 *
1048 * The gxio_mpipe_equeue_reserve() and gxio_mpipe_equeue_try_reserve()
1049 * functions return a 63-bit "completion slot", which is actually a
1050 * sequence number, the low bits of which indicate the ring buffer
1051 * index and the high bits the number of times the application has
1052 * gone around the egress ring buffer. The extra bits allow an
1053 * application to check for egress completion by calling
1054 * gxio_mpipe_equeue_is_complete() to see whether a particular 'slot'
1055 * number has finished. Given the maximum packet rates of the Gx
1056 * processor, the 63-bit slot number will never wrap.
1057 *
1058 * In practice, most applications use the ::gxio_mpipe_edesc_t::hwb
1059 * bit to indicate that the buffers containing egress packet data
1060 * should be pushed onto a buffer stack when egress is complete. Such
1061 * applications generally do not need to know when an egress operation
1062 * completes (since there is no need to free a buffer post-egress),
1063 * and thus can use the optimized gxio_mpipe_equeue_reserve_fast() or
1064 * gxio_mpipe_equeue_try_reserve_fast() functions, which return a 24
1065 * bit "slot", instead of a 63-bit "completion slot".
1066 *
1067 * Once a slot has been "reserved", it MUST be filled. If the
1068 * application reserves a slot and then decides that it does not
1069 * actually need it, it can set the ::gxio_mpipe_edesc_t::ns (no send)
1070 * bit on the descriptor passed to gxio_mpipe_equeue_put_at() to
1071 * indicate that no data should be sent. This technique can also be
1072 * used to drop an incoming packet, instead of forwarding it, since
1073 * any buffer will still be pushed onto the buffer stack when the
1074 * egress descriptor is processed.
1075 */
1076
1077/* A convenient interface to a NotifRing, for use by a single thread.
1078 */
1079typedef struct {
1080
1081 /* The context. */
1082 gxio_mpipe_context_t *context;
1083
1084 /* The actual NotifRing. */
1085 gxio_mpipe_idesc_t *idescs;
1086
1087 /* The number of entries. */
1088 unsigned long num_entries;
1089
1090 /* The number of entries minus one. */
1091 unsigned long mask_num_entries;
1092
1093 /* The log2() of the number of entries. */
1094 unsigned long log2_num_entries;
1095
1096 /* The next entry. */
1097 unsigned int head;
1098
1099 /* The NotifRing id. */
1100 unsigned int ring;
1101
1102#ifdef __BIG_ENDIAN__
1103 /* The number of byteswapped entries. */
1104 unsigned int swapped;
1105#endif
1106
1107} gxio_mpipe_iqueue_t;
1108
1109/* Initialize an "iqueue".
1110 *
1111 * Takes the iqueue plus the same args as gxio_mpipe_init_notif_ring().
1112 */
1113extern int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
1114 gxio_mpipe_context_t *context,
1115 unsigned int ring,
1116 void *mem, size_t mem_size,
1117 unsigned int mem_flags);
1118
1119/* Advance over some old entries in an iqueue.
1120 *
1121 * Please see the documentation for gxio_mpipe_iqueue_consume().
1122 *
1123 * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
1124 * @param count The number of entries to advance over.
1125 */
1126static inline void gxio_mpipe_iqueue_advance(gxio_mpipe_iqueue_t *iqueue,
1127 int count)
1128{
1129 /* Advance with proper wrap. */
1130 int head = iqueue->head + count;
1131 iqueue->head =
1132 (head & iqueue->mask_num_entries) +
1133 (head >> iqueue->log2_num_entries);
1134
1135#ifdef __BIG_ENDIAN__
1136 /* HACK: Track swapped entries. */
1137 iqueue->swapped -= count;
1138#endif
1139}
1140
1141/* Release the ring and bucket for an old entry in an iqueue.
1142 *
1143 * Releasing the ring allows more packets to be delivered to the ring.
1144 *
1145 * Releasing the bucket allows flows using the bucket to be moved to a
1146 * new ring when using GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY.
1147 *
1148 * This function is shorthand for "gxio_mpipe_credit(iqueue->context,
1149 * iqueue->ring, idesc->bucket_id, 1)", and it may be more convenient
1150 * to make that underlying call, using those values, instead of
1151 * tracking the entire "idesc".
1152 *
1153 * If packet processing is deferred, optimal performance requires that
1154 * the releasing be deferred as well.
1155 *
1156 * Please see the documentation for gxio_mpipe_iqueue_consume().
1157 *
1158 * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
1159 * @param idesc The descriptor which was processed.
1160 */
1161static inline void gxio_mpipe_iqueue_release(gxio_mpipe_iqueue_t *iqueue,
1162 gxio_mpipe_idesc_t *idesc)
1163{
1164 gxio_mpipe_credit(iqueue->context, iqueue->ring, idesc->bucket_id, 1);
1165}
1166
1167/* Consume a packet from an "iqueue".
1168 *
1169 * After processing packets peeked at via gxio_mpipe_iqueue_peek()
1170 * or gxio_mpipe_iqueue_try_peek(), you must call this function, or
1171 * gxio_mpipe_iqueue_advance() plus gxio_mpipe_iqueue_release(), to
1172 * advance over those entries, and release their rings and buckets.
1173 *
1174 * You may call this function as each packet is processed, or you can
1175 * wait until several packets have been processed.
1176 *
1177 * Note that if you are using a single bucket, and you are handling
1178 * batches of N packets, then you can replace several calls to this
1179 * function with calls to "gxio_mpipe_iqueue_advance(iqueue, N)" and
1180 * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)".
1181 *
1182 * Note that if your classifier sets "idesc->nr", then you should
1183 * explicitly call "gxio_mpipe_iqueue_advance(iqueue, idesc)" plus
1184 * "gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, 1)", to
1185 * avoid incorrectly crediting the (unused) bucket.
1186 *
1187 * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
1188 * @param idesc The descriptor which was processed.
1189 */
1190static inline void gxio_mpipe_iqueue_consume(gxio_mpipe_iqueue_t *iqueue,
1191 gxio_mpipe_idesc_t *idesc)
1192{
1193 gxio_mpipe_iqueue_advance(iqueue, 1);
1194 gxio_mpipe_iqueue_release(iqueue, idesc);
1195}
1196
1197/* Peek at the next packet(s) in an "iqueue", without waiting.
1198 *
1199 * If no packets are available, fills idesc_ref with NULL, and then
1200 * returns ::GXIO_MPIPE_ERR_IQUEUE_EMPTY. Otherwise, fills idesc_ref
1201 * with the address of the next valid packet descriptor, and returns
1202 * the maximum number of valid descriptors which can be processed.
1203 * You may process fewer descriptors if desired.
1204 *
1205 * Call gxio_mpipe_iqueue_consume() on each packet once it has been
1206 * processed (or dropped), to allow more packets to be delivered.
1207 *
1208 * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
1209 * @param idesc_ref A pointer to a packet descriptor pointer.
1210 * @return The (positive) number of packets which can be processed,
1211 * or ::GXIO_MPIPE_ERR_IQUEUE_EMPTY if no packets are available.
1212 */
1213static inline int gxio_mpipe_iqueue_try_peek(gxio_mpipe_iqueue_t *iqueue,
1214 gxio_mpipe_idesc_t **idesc_ref)
1215{
1216 gxio_mpipe_idesc_t *next;
1217
1218 uint64_t head = iqueue->head;
1219 uint64_t tail = __gxio_mmio_read(iqueue->idescs);
1220
1221 /* Available entries. */
1222 uint64_t avail =
1223 (tail >= head) ? (tail - head) : (iqueue->num_entries - head);
1224
1225 if (avail == 0) {
1226 *idesc_ref = NULL;
1227 return GXIO_MPIPE_ERR_IQUEUE_EMPTY;
1228 }
1229
1230 next = &iqueue->idescs[head];
1231
1232 /* ISSUE: Is this helpful? */
1233 __insn_prefetch(next);
1234
1235#ifdef __BIG_ENDIAN__
1236 /* HACK: Swap new entries directly in memory. */
1237 {
1238 int i, j;
1239 for (i = iqueue->swapped; i < avail; i++) {
1240 for (j = 0; j < 8; j++)
1241 next[i].words[j] =
1242 __builtin_bswap64(next[i].words[j]);
1243 }
1244 iqueue->swapped = avail;
1245 }
1246#endif
1247
1248 *idesc_ref = next;
1249
1250 return avail;
1251}
1252
1253/* Drop a packet by pushing its buffer (if appropriate).
1254 *
1255 * NOTE: The caller must still call gxio_mpipe_iqueue_consume() if idesc
1256 * came from gxio_mpipe_iqueue_try_peek() or gxio_mpipe_iqueue_peek().
1257 *
1258 * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
1259 * @param idesc A packet descriptor.
1260 */
1261static inline void gxio_mpipe_iqueue_drop(gxio_mpipe_iqueue_t *iqueue,
1262 gxio_mpipe_idesc_t *idesc)
1263{
1264 /* FIXME: Handle "chaining" properly. */
1265
1266 if (!idesc->be) {
1267 unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
1268 gxio_mpipe_push_buffer(iqueue->context, idesc->stack_idx, va);
1269 }
1270}
1271
1272/*****************************************************************
1273 * Egress Queue Wrapper *
1274 ******************************************************************/
1275
1276/* A convenient, thread-safe interface to an eDMA ring. */
1277typedef struct {
1278
1279 /* State object for tracking head and tail pointers. */
1280 __gxio_dma_queue_t dma_queue;
1281
1282 /* The ring entries. */
1283 gxio_mpipe_edesc_t *edescs;
1284
1285 /* The number of entries minus one. */
1286 unsigned long mask_num_entries;
1287
1288 /* The log2() of the number of entries. */
1289 unsigned long log2_num_entries;
1290
1291} gxio_mpipe_equeue_t;
1292
1293/* Initialize an "equeue".
1294 *
1295 * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring().
1296 */
1297extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
1298 gxio_mpipe_context_t *context,
1299 unsigned int edma_ring_id,
1300 unsigned int channel,
1301 void *mem, unsigned int mem_size,
1302 unsigned int mem_flags);
1303
1304/* Reserve completion slots for edescs.
1305 *
1306 * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
1307 *
1308 * This function is slower than gxio_mpipe_equeue_reserve_fast(), but
1309 * returns a full 64 bit completion slot, which can be used with
1310 * gxio_mpipe_equeue_is_complete().
1311 *
1312 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1313 * @param num Number of slots to reserve (must be non-zero).
1314 * @return The first reserved completion slot, or a negative error code.
1315 */
1316static inline int64_t gxio_mpipe_equeue_reserve(gxio_mpipe_equeue_t *equeue,
1317 unsigned int num)
1318{
1319 return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, true);
1320}
1321
1322/* Reserve completion slots for edescs, if possible.
1323 *
1324 * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
1325 *
1326 * This function is slower than gxio_mpipe_equeue_try_reserve_fast(),
1327 * but returns a full 64 bit completion slot, which can be used with
1328 * gxio_mpipe_equeue_is_complete().
1329 *
1330 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1331 * @param num Number of slots to reserve (must be non-zero).
1332 * @return The first reserved completion slot, or a negative error code.
1333 */
1334static inline int64_t gxio_mpipe_equeue_try_reserve(gxio_mpipe_equeue_t
1335 *equeue, unsigned int num)
1336{
1337 return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, false);
1338}
1339
1340/* Reserve slots for edescs.
1341 *
1342 * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
1343 *
1344 * This function is faster than gxio_mpipe_equeue_reserve(), but
1345 * returns a 24 bit slot (instead of a 64 bit completion slot), which
1346 * thus cannot be used with gxio_mpipe_equeue_is_complete().
1347 *
1348 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1349 * @param num Number of slots to reserve (should be non-zero).
1350 * @return The first reserved slot, or a negative error code.
1351 */
1352static inline int64_t gxio_mpipe_equeue_reserve_fast(gxio_mpipe_equeue_t
1353 *equeue, unsigned int num)
1354{
1355 return __gxio_dma_queue_reserve(&equeue->dma_queue, num, true, false);
1356}
1357
1358/* Reserve slots for edescs, if possible.
1359 *
1360 * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
1361 *
1362 * This function is faster than gxio_mpipe_equeue_try_reserve(), but
1363 * returns a 24 bit slot (instead of a 64 bit completion slot), which
1364 * thus cannot be used with gxio_mpipe_equeue_is_complete().
1365 *
1366 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1367 * @param num Number of slots to reserve (should be non-zero).
1368 * @return The first reserved slot, or a negative error code.
1369 */
1370static inline int64_t gxio_mpipe_equeue_try_reserve_fast(gxio_mpipe_equeue_t
1371 *equeue,
1372 unsigned int num)
1373{
1374 return __gxio_dma_queue_reserve(&equeue->dma_queue, num, false, false);
1375}
1376
1377/*
1378 * HACK: This helper function tricks gcc 4.6 into avoiding saving
1379 * a copy of "edesc->words[0]" on the stack for no obvious reason.
1380 */
1381
1382static inline void gxio_mpipe_equeue_put_at_aux(gxio_mpipe_equeue_t *equeue,
1383 uint_reg_t ew[2],
1384 unsigned long slot)
1385{
1386 unsigned long edma_slot = slot & equeue->mask_num_entries;
1387 gxio_mpipe_edesc_t *edesc_p = &equeue->edescs[edma_slot];
1388
1389 /*
1390 * ISSUE: Could set eDMA ring to be on generation 1 at start, which
1391 * would avoid the negation here, perhaps allowing "__insn_bfins()".
1392 */
1393 ew[0] |= !((slot >> equeue->log2_num_entries) & 1);
1394
1395 /*
1396 * NOTE: We use "__gxio_mpipe_write()", plus the fact that the eDMA
1397 * queue alignment restrictions ensure that these two words are on
1398 * the same cacheline, to force proper ordering between the stores.
1399 */
1400 __gxio_mmio_write64(&edesc_p->words[1], ew[1]);
1401 __gxio_mmio_write64(&edesc_p->words[0], ew[0]);
1402}
1403
1404/* Post an edesc to a given slot in an equeue.
1405 *
1406 * This function copies the supplied edesc into entry "slot mod N" in
1407 * the underlying ring, setting the "gen" bit to the appropriate value
1408 * based on "(slot mod N*2)", where "N" is the size of the ring. Note
1409 * that the higher bits of slot are unused, and thus, this function
1410 * can handle "slots" as well as "completion slots".
1411 *
1412 * Normally this function is used to fill in slots reserved by
1413 * gxio_mpipe_equeue_try_reserve(), gxio_mpipe_equeue_reserve(),
1414 * gxio_mpipe_equeue_try_reserve_fast(), or
1415 * gxio_mpipe_equeue_reserve_fast(),
1416 *
1417 * This function can also be used without "reserving" slots, if the
1418 * application KNOWS that the ring can never overflow, for example, by
1419 * pushing fewer buffers into the buffer stacks than there are total
1420 * slots in the equeue, but this is NOT recommended.
1421 *
1422 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1423 * @param edesc The egress descriptor to be posted.
1424 * @param slot An egress slot (only the low bits are actually used).
1425 */
1426static inline void gxio_mpipe_equeue_put_at(gxio_mpipe_equeue_t *equeue,
1427 gxio_mpipe_edesc_t edesc,
1428 unsigned long slot)
1429{
1430 gxio_mpipe_equeue_put_at_aux(equeue, edesc.words, slot);
1431}
1432
1433/* Post an edesc to the next slot in an equeue.
1434 *
1435 * This is a convenience wrapper around
1436 * gxio_mpipe_equeue_reserve_fast() and gxio_mpipe_equeue_put_at().
1437 *
1438 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1439 * @param edesc The egress descriptor to be posted.
1440 * @return 0 on success.
1441 */
1442static inline int gxio_mpipe_equeue_put(gxio_mpipe_equeue_t *equeue,
1443 gxio_mpipe_edesc_t edesc)
1444{
1445 int64_t slot = gxio_mpipe_equeue_reserve_fast(equeue, 1);
1446 if (slot < 0)
1447 return (int)slot;
1448
1449 gxio_mpipe_equeue_put_at(equeue, edesc, slot);
1450
1451 return 0;
1452}
1453
1454/* Ask the mPIPE hardware to egress outstanding packets immediately.
1455 *
1456 * This call is not necessary, but may slightly reduce overall latency.
1457 *
1458 * Technically, you should flush all gxio_mpipe_equeue_put_at() writes
1459 * to memory before calling this function, to ensure the descriptors
1460 * are visible in memory before the mPIPE hardware actually looks for
1461 * them. But this should be very rare, and the only side effect would
1462 * be increased latency, so it is up to the caller to decide whether
1463 * or not to flush memory.
1464 *
1465 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1466 */
1467static inline void gxio_mpipe_equeue_flush(gxio_mpipe_equeue_t *equeue)
1468{
1469 /* Use "ring_idx = 0" and "count = 0" to "wake up" the eDMA ring. */
1470 MPIPE_EDMA_POST_REGION_VAL_t val = { {0} };
1471 /* Flush the write buffers. */
1472 __insn_flushwb();
1473 __gxio_mmio_write(equeue->dma_queue.post_region_addr, val.word);
1474}
1475
1476/* Determine if a given edesc has been completed.
1477 *
1478 * Note that this function requires a "completion slot", and thus may
1479 * NOT be used with a "slot" from gxio_mpipe_equeue_reserve_fast() or
1480 * gxio_mpipe_equeue_try_reserve_fast().
1481 *
1482 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1483 * @param completion_slot The completion slot used by the edesc.
1484 * @param update If true, and the desc does not appear to have completed
1485 * yet, then update any software cache of the hardware completion counter,
1486 * and check again. This should normally be true.
1487 * @return True iff the given edesc has been completed.
1488 */
1489static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
1490 int64_t completion_slot,
1491 int update)
1492{
1493 return __gxio_dma_queue_is_complete(&equeue->dma_queue,
1494 completion_slot, update);
1495}
1496
1497/*****************************************************************
1498 * Link Management *
1499 ******************************************************************/
1500
1501/*
1502 *
1503 * Functions for manipulating and sensing the state and configuration
1504 * of physical network links.
1505 *
1506 * @section gxio_mpipe_link_perm Link Permissions
1507 *
1508 * Opening a link (with gxio_mpipe_link_open()) requests a set of link
1509 * permissions, which control what may be done with the link, and potentially
1510 * what permissions may be granted to other processes.
1511 *
1512 * Data permission allows the process to receive packets from the link by
1513 * specifying the link's channel number in mPIPE packet distribution rules,
1514 * and to send packets to the link by using the link's channel number as
1515 * the target for an eDMA ring.
1516 *
1517 * Stats permission allows the process to retrieve link attributes (such as
1518 * the speeds it is capable of running at, or whether it is currently up), and
1519 * to read and write certain statistics-related registers in the link's MAC.
1520 *
1521 * Control permission allows the process to retrieve and modify link attributes
1522 * (so that it may, for example, bring the link up and take it down), and
1523 * read and write many registers in the link's MAC and PHY.
1524 *
1525 * Any permission may be requested as shared, which allows other processes
1526 * to also request shared permission, or exclusive, which prevents other
1527 * processes from requesting it. In keeping with GXIO's typical usage in
1528 * an embedded environment, the defaults for all permissions are shared.
1529 *
1530 * Permissions are granted on a first-come, first-served basis, so if two
1531 * applications request an exclusive permission on the same link, the one
1532 * to run first will win. Note, however, that some system components, like
1533 * the kernel Ethernet driver, may get an opportunity to open links before
1534 * any applications run.
1535 *
1536 * @section gxio_mpipe_link_names Link Names
1537 *
1538 * Link names are of the form gbe<em>number</em> (for Gigabit Ethernet),
1539 * xgbe<em>number</em> (for 10 Gigabit Ethernet), loop<em>number</em> (for
1540 * internal mPIPE loopback), or ilk<em>number</em>/<em>channel</em>
1541 * (for Interlaken links); for instance, gbe0, xgbe1, loop3, and
1542 * ilk0/12 are all possible link names. The correspondence between
1543 * the link name and an mPIPE instance number or mPIPE channel number is
1544 * system-dependent; all links will not exist on all systems, and the set
1545 * of numbers used for a particular link type may not start at zero and may
1546 * not be contiguous. Use gxio_mpipe_link_enumerate() to retrieve the set of
1547 * links which exist on a system, and always use gxio_mpipe_link_instance()
1548 * to determine which mPIPE controls a particular link.
1549 *
1550 * Note that in some cases, links may share hardware, such as PHYs, or
1551 * internal mPIPE buffers; in these cases, only one of the links may be
1552 * opened at a time. This is especially common with xgbe and gbe ports,
1553 * since each xgbe port uses 4 SERDES lanes, each of which may also be
1554 * configured as one gbe port.
1555 *
1556 * @section gxio_mpipe_link_states Link States
1557 *
1558 * The mPIPE link management model revolves around three different states,
1559 * which are maintained for each link:
1560 *
1561 * 1. The <em>current</em> link state: is the link up now, and if so, at
1562 * what speed?
1563 *
1564 * 2. The <em>desired</em> link state: what do we want the link state to be?
1565 * The system is always working to make this state the current state;
1566 * thus, if the desired state is up, and the link is down, we'll be
1567 * constantly trying to bring it up, automatically.
1568 *
1569 * 3. The <em>possible</em> link state: what speeds are valid for this
1570 * particular link? Or, in other words, what are the capabilities of
1571 * the link hardware?
1572 *
1573 * These link states are not, strictly speaking, related to application
1574 * state; they may be manipulated at any time, whether or not the link
1575 * is currently being used for data transfer. However, for convenience,
1576 * gxio_mpipe_link_open() and gxio_mpipe_link_close() (or application exit)
1577 * can affect the link state. These implicit link management operations
1578 * may be modified or disabled by the use of link open flags.
1579 *
1580 * From an application, you can use gxio_mpipe_link_get_attr()
1581 * and gxio_mpipe_link_set_attr() to manipulate the link states.
1582 * gxio_mpipe_link_get_attr() with ::GXIO_MPIPE_LINK_POSSIBLE_STATE
1583 * gets you the possible link state. gxio_mpipe_link_get_attr() with
1584 * ::GXIO_MPIPE_LINK_CURRENT_STATE gets you the current link state.
1585 * Finally, gxio_mpipe_link_set_attr() and gxio_mpipe_link_get_attr()
1586 * with ::GXIO_MPIPE_LINK_DESIRED_STATE allow you to modify or retrieve
1587 * the desired link state.
1588 *
1589 * If you want to manage a link from a part of your application which isn't
1590 * involved in packet processing, you can use the ::GXIO_MPIPE_LINK_NO_DATA
1591 * flags on a gxio_mpipe_link_open() call. This opens the link, but does
1592 * not request data permission, so it does not conflict with any exclusive
1593 * permissions which may be held by other processes. You can then can use
1594 * gxio_mpipe_link_get_attr() and gxio_mpipe_link_set_attr() on this link
1595 * object to bring up or take down the link.
1596 *
1597 * Some links support link state bits which support various loopback
1598 * modes. ::GXIO_MPIPE_LINK_LOOP_MAC tests datapaths within the Tile
1599 * Processor itself; ::GXIO_MPIPE_LINK_LOOP_PHY tests the datapath between
1600 * the Tile Processor and the external physical layer interface chip; and
1601 * ::GXIO_MPIPE_LINK_LOOP_EXT tests the entire network datapath with the
1602 * aid of an external loopback connector. In addition to enabling hardware
1603 * testing, such configuration can be useful for software testing, as well.
1604 *
1605 * When LOOP_MAC or LOOP_PHY is enabled, packets transmitted on a channel
1606 * will be received by that channel, instead of being emitted on the
1607 * physical link, and packets received on the physical link will be ignored.
1608 * Other than that, all standard GXIO operations work as you might expect.
1609 * Note that loopback operation requires that the link be brought up using
1610 * one or more of the GXIO_MPIPE_LINK_SPEED_xxx link state bits.
1611 *
1612 * Those familiar with previous versions of the MDE on TILEPro hardware
1613 * will notice significant similarities between the NetIO link management
1614 * model and the mPIPE link management model. However, the NetIO model
1615 * was developed in stages, and some of its features -- for instance,
1616 * the default setting of certain flags -- were shaped by the need to be
1617 * compatible with previous versions of NetIO. Since the features provided
1618 * by the mPIPE hardware and the mPIPE GXIO library are significantly
1619 * different than those provided by NetIO, in some cases, we have made
1620 * different choices in the mPIPE link management API. Thus, please read
1621 * this documentation carefully before assuming that mPIPE link management
1622 * operations are exactly equivalent to their NetIO counterparts.
1623 */
1624
1625/* An object used to manage mPIPE link state and resources. */
1626typedef struct {
1627 /* The overall mPIPE context. */
1628 gxio_mpipe_context_t *context;
1629
1630 /* The channel number used by this link. */
1631 uint8_t channel;
1632
1633 /* The MAC index used by this link. */
1634 uint8_t mac;
1635} gxio_mpipe_link_t;
1636
1637/* Retrieve one of this system's legal link names, and its MAC address.
1638 *
1639 * @param index Link name index. If a system supports N legal link names,
1640 * then indices between 0 and N - 1, inclusive, each correspond to one of
1641 * those names. Thus, to retrieve all of a system's legal link names,
1642 * call this function in a loop, starting with an index of zero, and
1643 * incrementing it once per iteration until -1 is returned.
1644 * @param link_name Pointer to the buffer which will receive the retrieved
1645 * link name. The buffer should contain space for at least
1646 * ::GXIO_MPIPE_LINK_NAME_LEN bytes; the returned name, including the
1647 * terminating null byte, will be no longer than that.
1648 * @param link_name Pointer to the buffer which will receive the retrieved
1649 * MAC address. The buffer should contain space for at least 6 bytes.
1650 * @return Zero if a link name was successfully retrieved; -1 if one was
1651 * not.
1652 */
1653extern int gxio_mpipe_link_enumerate_mac(int index, char *link_name,
1654 uint8_t *mac_addr);
1655
1656/* Open an mPIPE link.
1657 *
1658 * A link must be opened before it may be used to send or receive packets,
1659 * and before its state may be examined or changed. Depending up on the
1660 * link's intended use, one or more link permissions may be requested via
1661 * the flags parameter; see @ref gxio_mpipe_link_perm. In addition, flags
1662 * may request that the link's state be modified at open time. See @ref
1663 * gxio_mpipe_link_states and @ref gxio_mpipe_link_open_flags for more detail.
1664 *
1665 * @param link A link state object, which will be initialized if this
1666 * function completes successfully.
1667 * @param context An initialized mPIPE context.
1668 * @param link_name Name of the link.
1669 * @param flags Zero or more @ref gxio_mpipe_link_open_flags, ORed together.
1670 * @return 0 if the link was successfully opened, or a negative error code.
1671 *
1672 */
1673extern int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
1674 gxio_mpipe_context_t *context,
1675 const char *link_name, unsigned int flags);
1676
1677/* Close an mPIPE link.
1678 *
1679 * Closing a link makes it available for use by other processes. Once
1680 * a link has been closed, packets may no longer be sent on or received
1681 * from the link, and its state may not be examined or changed.
1682 *
1683 * @param link A link state object, which will no longer be initialized
1684 * if this function completes successfully.
1685 * @return 0 if the link was successfully closed, or a negative error code.
1686 *
1687 */
1688extern int gxio_mpipe_link_close(gxio_mpipe_link_t *link);
1689
1690/* Return a link's channel number.
1691 *
1692 * @param link A properly initialized link state object.
1693 * @return The channel number for the link.
1694 */
1695static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
1696{
1697 return link->channel;
1698}
1699
1700///////////////////////////////////////////////////////////////////
1701// Timestamp //
1702///////////////////////////////////////////////////////////////////
1703
1704/* Get the timestamp of mPIPE when this routine is called.
1705 *
1706 * @param context An initialized mPIPE context.
1707 * @param ts A timespec structure to store the current clock.
1708 * @return If the call was successful, zero; otherwise, a negative error
1709 * code.
1710 */
1711extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
1712 struct timespec *ts);
1713
1714/* Set the timestamp of mPIPE.
1715 *
1716 * @param context An initialized mPIPE context.
1717 * @param ts A timespec structure to store the requested clock.
1718 * @return If the call was successful, zero; otherwise, a negative error
1719 * code.
1720 */
1721extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
1722 const struct timespec *ts);
1723
1724/* Adjust the timestamp of mPIPE.
1725 *
1726 * @param context An initialized mPIPE context.
1727 * @param delta A signed time offset to adjust, in nanoseconds.
1728 * The absolute value of this parameter must be less than or
1729 * equal to 1000000000.
1730 * @return If the call was successful, zero; otherwise, a negative error
1731 * code.
1732 */
1733extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
1734 int64_t delta);
1735
1736#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
new file mode 100644
index 000000000000..6cdae3bf046e
--- /dev/null
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -0,0 +1,602 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * Interface definitions for the mpipe driver.
17 */
18
19#ifndef _SYS_HV_DRV_MPIPE_INTF_H
20#define _SYS_HV_DRV_MPIPE_INTF_H
21
22#include <arch/mpipe.h>
23#include <arch/mpipe_constants.h>
24
25
26/** Number of buffer stacks (32). */
27#define HV_MPIPE_NUM_BUFFER_STACKS \
28 (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
29
30/** Number of NotifRings (256). */
31#define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS)
32
33/** Number of NotifGroups (32). */
34#define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS)
35
36/** Number of buckets (4160). */
37#define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS)
38
39/** Number of "lo" buckets (4096). */
40#define HV_MPIPE_NUM_LO_BUCKETS 4096
41
42/** Number of "hi" buckets (64). */
43#define HV_MPIPE_NUM_HI_BUCKETS \
44 (HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS)
45
46/** Number of edma rings (24). */
47#define HV_MPIPE_NUM_EDMA_RINGS \
48 (MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH)
49
50
51
52
53/** A flag bit indicating a fixed resource allocation. */
54#define HV_MPIPE_ALLOC_FIXED 0x01
55
56/** Offset for the config register MMIO region. */
57#define HV_MPIPE_CONFIG_MMIO_OFFSET \
58 (MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT)
59
60/** Size of the config register MMIO region. */
61#define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024)
62
63/** Offset for the config register MMIO region. */
64#define HV_MPIPE_FAST_MMIO_OFFSET \
65 (MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT)
66
67/** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */
68#define HV_MPIPE_FAST_MMIO_SIZE \
69 ((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \
70 << MPIPE_MMIO_ADDR__REGION_SHIFT)
71
72
73/*
74 * Each type of resource allocation comes in quantized chunks, where
75 * XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number
76 * of resources in each chunk.
77 */
78
79/** Number of buffer stack chunks available (32). */
80#define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \
81 MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH
82
83/** Granularity of buffer stack allocation (1). */
84#define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \
85 (HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS)
86
87/** Number of NotifRing chunks available (32). */
88#define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \
89 MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH
90
91/** Granularity of NotifRing allocation (8). */
92#define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \
93 (HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS)
94
95/** Number of NotifGroup chunks available (32). */
96#define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \
97 HV_MPIPE_NUM_NOTIF_GROUPS
98
99/** Granularity of NotifGroup allocation (1). */
100#define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \
101 (HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS)
102
103/** Number of lo bucket chunks available (16). */
104#define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \
105 MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH
106
107/** Granularity of lo bucket allocation (256). */
108#define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \
109 (HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS)
110
111/** Number of hi bucket chunks available (16). */
112#define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \
113 MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH
114
115/** Granularity of hi bucket allocation (4). */
116#define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \
117 (HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS)
118
119/** Number of eDMA ring chunks available (24). */
120#define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \
121 MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH
122
123/** Granularity of eDMA ring allocation (1). */
124#define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \
125 (HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS)
126
127
128
129
130/** Bit vector encoding which NotifRings are in a NotifGroup. */
131typedef struct
132{
133 /** The actual bits. */
134 uint64_t ring_mask[4];
135
136} gxio_mpipe_notif_group_bits_t;
137
138
139/** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */
140typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t;
141
142
143
144/** Eight buffer stack ids. */
145typedef struct
146{
147 /** The stacks. */
148 uint8_t stacks[8];
149
150} gxio_mpipe_rules_stacks_t;
151
152
153/** A destination mac address. */
154typedef struct
155{
156 /** The octets. */
157 uint8_t octets[6];
158
159} gxio_mpipe_rules_dmac_t;
160
161
162/** A vlan. */
163typedef uint16_t gxio_mpipe_rules_vlan_t;
164
165
166
167/** Maximum number of characters in a link name. */
168#define GXIO_MPIPE_LINK_NAME_LEN 32
169
170
171/** Structure holding a link name. Only needed, and only typedef'ed,
172 * because the IORPC stub generator only handles types which are single
173 * words coming before the parameter name. */
174typedef struct
175{
176 /** The name itself. */
177 char name[GXIO_MPIPE_LINK_NAME_LEN];
178}
179_gxio_mpipe_link_name_t;
180
181/** Maximum number of characters in a symbol name. */
182#define GXIO_MPIPE_SYMBOL_NAME_LEN 128
183
184
185/** Structure holding a symbol name. Only needed, and only typedef'ed,
186 * because the IORPC stub generator only handles types which are single
187 * words coming before the parameter name. */
188typedef struct
189{
190 /** The name itself. */
191 char name[GXIO_MPIPE_SYMBOL_NAME_LEN];
192}
193_gxio_mpipe_symbol_name_t;
194
195
196/** Structure holding a MAC address. */
197typedef struct
198{
199 /** The address. */
200 uint8_t mac[6];
201}
202_gxio_mpipe_link_mac_t;
203
204
205
206/** Request shared data permission -- that is, the ability to send and
207 * receive packets -- on the specified link. Other processes may also
208 * request shared data permission on the same link.
209 *
210 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
211 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
212 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
213 */
214#define GXIO_MPIPE_LINK_DATA 0x00000001UL
215
216/** Do not request data permission on the specified link.
217 *
218 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
219 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
220 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
221 */
222#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL
223
224/** Request exclusive data permission -- that is, the ability to send and
225 * receive packets -- on the specified link. No other processes may
226 * request data permission on this link, and if any process already has
227 * data permission on it, this open will fail.
228 *
229 * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
230 * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
231 * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
232 */
233#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL
234
235/** Request shared stats permission -- that is, the ability to read and write
236 * registers which contain link statistics, and to get link attributes --
237 * on the specified link. Other processes may also request shared stats
238 * permission on the same link.
239 *
240 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
241 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
242 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
243 */
244#define GXIO_MPIPE_LINK_STATS 0x00000008UL
245
246/** Do not request stats permission on the specified link.
247 *
248 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
249 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
250 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
251 */
252#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL
253
254/** Request exclusive stats permission -- that is, the ability to read and
255 * write registers which contain link statistics, and to get link
256 * attributes -- on the specified link. No other processes may request
257 * stats permission on this link, and if any process already
258 * has stats permission on it, this open will fail.
259 *
260 * Requesting exclusive stats permission is normally a very bad idea, since
261 * it prevents programs like mpipe-stat from providing information on this
262 * link. Applications should only do this if they use MAC statistics
263 * registers, and cannot tolerate any of the clear-on-read registers being
264 * reset by other statistics programs.
265 *
266 * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
267 * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
268 * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
269 */
270#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL
271
272/** Request shared control permission -- that is, the ability to modify link
273 * attributes, and read and write MAC and MDIO registers -- on the
274 * specified link. Other processes may also request shared control
275 * permission on the same link.
276 *
277 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
278 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
279 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
280 */
281#define GXIO_MPIPE_LINK_CTL 0x00000040UL
282
283/** Do not request control permission on the specified link.
284 *
285 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
286 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
287 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
288 */
289#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL
290
291/** Request exclusive control permission -- that is, the ability to modify
292 * link attributes, and read and write MAC and MDIO registers -- on the
293 * specified link. No other processes may request control permission on
294 * this link, and if any process already has control permission on it,
295 * this open will fail.
296 *
297 * Requesting exclusive control permission is not always a good idea, since
298 * it prevents programs like mpipe-link from configuring the link.
299 *
300 * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
301 * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
302 * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
303 */
304#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL
305
306/** Set the desired state of the link to up, allowing any speeds which are
307 * supported by the link hardware, as part of this open operation; do not
308 * change the desired state of the link when it is closed or the process
309 * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
310 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
311 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
312 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
313 */
314#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL
315
316/** Set the desired state of the link to up, allowing any speeds which are
317 * supported by the link hardware, as part of this open operation; when the
318 * link is closed or this process exits, if no other process has the link
319 * open, set the desired state of the link to down. No more than one of
320 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
321 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
322 * specifed in a gxio_mpipe_link_open() call. If none are specified,
323 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
324 */
325#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL
326
327/** Do not change the desired state of the link as part of the open
328 * operation; when the link is closed or this process exits, if no other
329 * process has the link open, set the desired state of the link to down.
330 * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
331 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
332 * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
333 * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
334 */
335#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL
336
337/** Do not change the desired state of the link as part of the open
338 * operation; do not change the desired state of the link when it is
339 * closed or the process exits. No more than one of
340 * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
341 * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
342 * specifed in a gxio_mpipe_link_open() call. If none are specified,
343 * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
344 */
345#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL
346
347/** Request that this open call not complete until the network link is up.
348 * The process will wait as long as necessary for this to happen;
349 * applications which wish to abandon waiting for the link after a
350 * specific time period should not specify this flag when opening a link,
351 * but should instead call gxio_mpipe_link_wait() afterward. The link
352 * must be opened with stats permission. Note that this flag by itself
353 * does not change the desired link state; if other open flags or previous
354 * link state changes have not requested a desired state of up, the open
355 * call will never complete. This flag is not available to kernel
356 * clients.
357 */
358#define GXIO_MPIPE_LINK_WAIT 0x00002000UL
359
360
361/*
362 * Note: link attributes must fit in 24 bits, since we use the top 8 bits
363 * of the IORPC offset word for the channel number.
364 */
365
366/** Determine whether jumbo frames may be received. If this attribute's
367 * value value is nonzero, the MAC will accept frames of up to 10240 bytes.
368 * If the value is zero, the MAC will only accept frames of up to 1544
369 * bytes. The default value is zero. */
370#define GXIO_MPIPE_LINK_RECEIVE_JUMBO 0x010000
371
372/** Determine whether to send pause frames on this link if the mPIPE packet
373 * FIFO is nearly full. If the value is zero, pause frames are not sent.
374 * If the value is nonzero, it is the delay value which will be sent in any
375 * pause frames which are output, in units of 512 bit times.
376 *
377 * Bear in mind that in almost all circumstances, the mPIPE packet FIFO
378 * will never fill up, since mPIPE will empty it as fast as or faster than
379 * the incoming data rate, by either delivering or dropping packets. The
380 * only situation in which this is not true is if the memory and cache
381 * subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of
382 * packet data to memory in a timely fashion. In particular, pause frames
383 * will <em>not</em> be sent if packets cannot be delivered because
384 * NotifRings are full, buckets are full, or buffers are not available in
385 * a buffer stack. */
386#define GXIO_MPIPE_LINK_SEND_PAUSE 0x020000
387
388/** Determine whether to suspend output on the receipt of pause frames.
389 * If the value is nonzero, mPIPE shim will suspend output on the link's
390 * channel when a pause frame is received. If the value is zero, pause
391 * frames will be ignored. The default value is zero. */
392#define GXIO_MPIPE_LINK_RECEIVE_PAUSE 0x030000
393
394/** Interface MAC address. The value is a 6-byte MAC address, in the least
395 * significant 48 bits of the value; in other words, an address which would
396 * be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would
397 * be returned as 0x12345678ab.
398 *
399 * Depending upon the overall system design, a MAC address may or may not
400 * be available for each interface. Note that the interface's MAC address
401 * does not limit the packets received on its channel, although the
402 * classifier's rules could be configured to do that. Similarly, the MAC
403 * address is not used when transmitting packets, although applications
404 * could certainly decide to use the assigned address as a source MAC
405 * address when doing so. This attribute may only be retrieved with
406 * gxio_mpipe_link_get_attr(); it may not be modified.
407 */
408#define GXIO_MPIPE_LINK_MAC 0x040000
409
410/** Determine whether to discard egress packets on link down. If this value
411 * is nonzero, packets sent on this link while the link is down will be
412 * discarded. If this value is zero, no packets will be sent on this link
413 * while it is down. The default value is one. */
414#define GXIO_MPIPE_LINK_DISCARD_IF_DOWN 0x050000
415
416/** Possible link state. The value is a combination of link state flags,
417 * ORed together, that indicate link modes which are actually supported by
418 * the hardware. This attribute may only be retrieved with
419 * gxio_mpipe_link_get_attr(); it may not be modified. */
420#define GXIO_MPIPE_LINK_POSSIBLE_STATE 0x060000
421
422/** Current link state. The value is a combination of link state flags,
423 * ORed together, that indicate the current state of the hardware. If the
424 * link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero;
425 * if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will
426 * result in exactly one of the speed values, indicating the current speed.
427 * This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it
428 * may not be modified. */
429#define GXIO_MPIPE_LINK_CURRENT_STATE 0x070000
430
431/** Desired link state. The value is a conbination of flags, which specify
432 * the desired state for the link. With gxio_mpipe_link_set_attr(), this
433 * will, in the background, attempt to bring up the link using whichever of
434 * the requested flags are reasonable, or take down the link if the flags
435 * are zero. The actual link up or down operation may happen after this
436 * call completes. If the link state changes in the future, the system
437 * will continue to try to get back to the desired link state; for
438 * instance, if the link is brought up successfully, and then the network
439 * cable is disconnected, the link will go down. However, the desired
440 * state of the link is still up, so if the cable is reconnected, the link
441 * will be brought up again.
442 *
443 * With gxio_mpipe_link_set_attr(), this will indicate the desired state
444 * for the link, as set with a previous gxio_mpipe_link_set_attr() call,
445 * or implicitly by a gxio_mpipe_link_open() or link close operation.
446 * This may not reflect the current state of the link; to get that, use
447 * ::GXIO_MPIPE_LINK_CURRENT_STATE.
448 */
449#define GXIO_MPIPE_LINK_DESIRED_STATE 0x080000
450
451
452
453/** Link can run, should run, or is running at 10 Mbps. */
454#define GXIO_MPIPE_LINK_10M 0x0000000000000001UL
455
456/** Link can run, should run, or is running at 100 Mbps. */
457#define GXIO_MPIPE_LINK_100M 0x0000000000000002UL
458
459/** Link can run, should run, or is running at 1 Gbps. */
460#define GXIO_MPIPE_LINK_1G 0x0000000000000004UL
461
462/** Link can run, should run, or is running at 10 Gbps. */
463#define GXIO_MPIPE_LINK_10G 0x0000000000000008UL
464
465/** Link can run, should run, or is running at 20 Gbps. */
466#define GXIO_MPIPE_LINK_20G 0x0000000000000010UL
467
468/** Link can run, should run, or is running at 25 Gbps. */
469#define GXIO_MPIPE_LINK_25G 0x0000000000000020UL
470
471/** Link can run, should run, or is running at 50 Gbps. */
472#define GXIO_MPIPE_LINK_50G 0x0000000000000040UL
473
474/** Link should run at the highest speed supported by the link and by
475 * the device connected to the link. Only usable as a value for
476 * the link's desired state; never returned as a value for the current
477 * or possible states. */
478#define GXIO_MPIPE_LINK_ANYSPEED 0x0000000000000800UL
479
480/** All legal link speeds. This value is provided for use in extracting
481 * the speed-related subset of the link state flags; it is not intended
482 * to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE
483 * attributes. A link is up or is requested to be up if its current or
484 * desired state, respectively, ANDED with this value, is nonzero. */
485#define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL
486
487/** Link can run, should run, or is running in MAC loopback mode. This
488 * loops transmitted packets back to the receiver, inside the Tile
489 * Processor. */
490#define GXIO_MPIPE_LINK_LOOP_MAC 0x0000000000001000UL
491
492/** Link can run, should run, or is running in PHY loopback mode. This
493 * loops transmitted packets back to the receiver, inside the external
494 * PHY chip. */
495#define GXIO_MPIPE_LINK_LOOP_PHY 0x0000000000002000UL
496
497/** Link can run, should run, or is running in external loopback mode.
498 * This requires that an external loopback plug be installed on the
499 * Ethernet port. Note that only some links require that this be
500 * configured via the gxio_mpipe_link routines; other links can do
501 * external loopack with the plug and no special configuration. */
502#define GXIO_MPIPE_LINK_LOOP_EXT 0x0000000000004000UL
503
504/** All legal loopback types. */
505#define GXIO_MPIPE_LINK_LOOP_MASK 0x000000000000F000UL
506
507/** Link can run, should run, or is running in full-duplex mode.
508 * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
509 * specified in a set of desired state flags, both are assumed. */
510#define GXIO_MPIPE_LINK_FDX 0x0000000000010000UL
511
512/** Link can run, should run, or is running in half-duplex mode.
513 * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
514 * specified in a set of desired state flags, both are assumed. */
515#define GXIO_MPIPE_LINK_HDX 0x0000000000020000UL
516
517
518/** An individual rule. */
519typedef struct
520{
521 /** The total size. */
522 uint16_t size;
523
524 /** The priority. */
525 int16_t priority;
526
527 /** The "headroom" in each buffer. */
528 uint8_t headroom;
529
530 /** The "tailroom" in each buffer. */
531 uint8_t tailroom;
532
533 /** The "capacity" of the largest buffer. */
534 uint16_t capacity;
535
536 /** The mask for converting a flow hash into a bucket. */
537 uint16_t bucket_mask;
538
539 /** The offset for converting a flow hash into a bucket. */
540 uint16_t bucket_first;
541
542 /** The buffer stack ids. */
543 gxio_mpipe_rules_stacks_t stacks;
544
545 /** The actual channels. */
546 uint32_t channel_bits;
547
548 /** The number of dmacs. */
549 uint16_t num_dmacs;
550
551 /** The number of vlans. */
552 uint16_t num_vlans;
553
554 /** The actual dmacs and vlans. */
555 uint8_t dmacs_and_vlans[];
556
557} gxio_mpipe_rules_rule_t;
558
559
560/** A list of classifier rules. */
561typedef struct
562{
563 /** The offset to the end of the current rule. */
564 uint16_t tail;
565
566 /** The offset to the start of the current rule. */
567 uint16_t head;
568
569 /** The actual rules. */
570 uint8_t rules[4096 - 4];
571
572} gxio_mpipe_rules_list_t;
573
574
575
576
577/** mPIPE statistics structure. These counters include all relevant
578 * events occurring on all links within the mPIPE shim. */
579typedef struct
580{
581 /** Number of ingress packets dropped for any reason. */
582 uint64_t ingress_drops;
583 /** Number of ingress packets dropped because a buffer stack was empty. */
584 uint64_t ingress_drops_no_buf;
585 /** Number of ingress packets dropped or truncated due to lack of space in
586 * the iPkt buffer. */
587 uint64_t ingress_drops_ipkt;
588 /** Number of ingress packets dropped by the classifier or load balancer */
589 uint64_t ingress_drops_cls_lb;
590 /** Total number of ingress packets. */
591 uint64_t ingress_packets;
592 /** Total number of egress packets. */
593 uint64_t egress_packets;
594 /** Total number of ingress bytes. */
595 uint64_t ingress_bytes;
596 /** Total number of egress bytes. */
597 uint64_t egress_bytes;
598}
599gxio_mpipe_stats_t;
600
601
602#endif /* _SYS_HV_DRV_MPIPE_INTF_H */