aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cnic_defs.h
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2009-06-08 21:14:43 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-09 11:22:42 -0400
commita463696039f7097ce87c21db3cf5c16cdcb3850d (patch)
tree3308681e117008282fd73a224215e0aab173262e /drivers/net/cnic_defs.h
parent4edd473f208cff77ce1f7ef26d5a41f31fa198e0 (diff)
[SCSI] cnic: Add new Broadcom CNIC driver.
The CNIC driver controls BNX2 hardware rings and resources used by iSCSI. Most hardware resources for iSCSI are separate from those used for ethernet networking. iSCSI uses a separate MAC address and IP address. The CNIC driver creates a UIO interface to handle the non-offloaded packets such as ARP, etc in userspace. Signed-off-by: Michael Chan <mchan@broadcom.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/net/cnic_defs.h')
-rw-r--r--drivers/net/cnic_defs.h580
1 files changed, 580 insertions, 0 deletions
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 00000000000..cee80f69445
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */