diff options
author | Jan-Bernd Themann <ossthema@de.ibm.com> | 2006-09-13 11:44:31 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-13 13:23:52 -0400 |
commit | 7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (patch) | |
tree | c87a93ee7d5c1c63ce98dc90a62cd0b4dfc4318f /drivers/net/ehea/ehea_qmr.h | |
parent | 7de745e56244156233e5cdd62b462e52e638d408 (diff) |
[PATCH] ehea: IBM eHEA Ethernet Device Driver
Hi Jeff,
I fixed the __iomem issue and tested the driver with sparse. Looks good so far.
Thanks for your effort.
Jan-Bernd Themann
Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com>
drivers/net/Kconfig | 9
drivers/net/Makefile | 1
drivers/net/ehea/Makefile | 6
drivers/net/ehea/ehea.h | 447 ++++++
drivers/net/ehea/ehea_ethtool.c | 294 ++++
drivers/net/ehea/ehea_hcall.h | 51
drivers/net/ehea/ehea_hw.h | 287 ++++
drivers/net/ehea/ehea_main.c | 2654 ++++++++++++++++++++++++++++++++++++++++
drivers/net/ehea/ehea_phyp.c | 705 ++++++++++
drivers/net/ehea/ehea_phyp.h | 455 ++++++
drivers/net/ehea/ehea_qmr.c | 582 ++++++++
drivers/net/ehea/ehea_qmr.h | 358 +++++
12 files changed, 5849 insertions(+)
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea/ehea_qmr.h')
-rw-r--r-- | drivers/net/ehea/ehea_qmr.h | 358 |
1 files changed, 358 insertions, 0 deletions
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h new file mode 100644 index 000000000000..7efdc96919ca --- /dev/null +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_qmr.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_QMR_H__ | ||
30 | #define __EHEA_QMR_H__ | ||
31 | |||
32 | #include "ehea.h" | ||
33 | #include "ehea_hw.h" | ||
34 | |||
35 | /* | ||
36 | * page size of ehea hardware queues | ||
37 | */ | ||
38 | |||
39 | #define EHEA_PAGESHIFT 12 | ||
40 | #define EHEA_PAGESIZE 4096UL | ||
41 | |||
42 | /* Some abbreviations used here: | ||
43 | * | ||
44 | * WQE - Work Queue Entry | ||
45 | * SWQE - Send Work Queue Entry | ||
46 | * RWQE - Receive Work Queue Entry | ||
47 | * CQE - Completion Queue Entry | ||
48 | * EQE - Event Queue Entry | ||
49 | * MR - Memory Region | ||
50 | */ | ||
51 | |||
52 | /* Use of WR_ID field for EHEA */ | ||
53 | #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19) | ||
54 | #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23) | ||
55 | #define EHEA_SWQE2_TYPE 0x1 | ||
56 | #define EHEA_SWQE3_TYPE 0x2 | ||
57 | #define EHEA_RWQE2_TYPE 0x3 | ||
58 | #define EHEA_RWQE3_TYPE 0x4 | ||
59 | #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47) | ||
60 | #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63) | ||
61 | |||
62 | struct ehea_vsgentry { | ||
63 | u64 vaddr; | ||
64 | u32 l_key; | ||
65 | u32 len; | ||
66 | }; | ||
67 | |||
68 | /* maximum number of sg entries allowed in a WQE */ | ||
69 | #define EHEA_MAX_WQE_SG_ENTRIES 252 | ||
70 | #define SWQE2_MAX_IMM (0xD0 - 0x30) | ||
71 | #define SWQE3_MAX_IMM 224 | ||
72 | |||
73 | /* tx control flags for swqe */ | ||
74 | #define EHEA_SWQE_CRC 0x8000 | ||
75 | #define EHEA_SWQE_IP_CHECKSUM 0x4000 | ||
76 | #define EHEA_SWQE_TCP_CHECKSUM 0x2000 | ||
77 | #define EHEA_SWQE_TSO 0x1000 | ||
78 | #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800 | ||
79 | #define EHEA_SWQE_VLAN_INSERT 0x0400 | ||
80 | #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200 | ||
81 | #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100 | ||
82 | #define EHEA_SWQE_WRAP_CTL_REC 0x0080 | ||
83 | #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040 | ||
84 | #define EHEA_SWQE_BIND 0x0020 | ||
85 | #define EHEA_SWQE_PURGE 0x0010 | ||
86 | |||
87 | /* sizeof(struct ehea_swqe) less the union */ | ||
88 | #define SWQE_HEADER_SIZE 32 | ||
89 | |||
90 | struct ehea_swqe { | ||
91 | u64 wr_id; | ||
92 | u16 tx_control; | ||
93 | u16 vlan_tag; | ||
94 | u8 reserved1; | ||
95 | u8 ip_start; | ||
96 | u8 ip_end; | ||
97 | u8 immediate_data_length; | ||
98 | u8 tcp_offset; | ||
99 | u8 reserved2; | ||
100 | u16 tcp_end; | ||
101 | u8 wrap_tag; | ||
102 | u8 descriptors; /* number of valid descriptors in WQE */ | ||
103 | u16 reserved3; | ||
104 | u16 reserved4; | ||
105 | u16 mss; | ||
106 | u32 reserved5; | ||
107 | union { | ||
108 | /* Send WQE Format 1 */ | ||
109 | struct { | ||
110 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | ||
111 | } no_immediate_data; | ||
112 | |||
113 | /* Send WQE Format 2 */ | ||
114 | struct { | ||
115 | struct ehea_vsgentry sg_entry; | ||
116 | /* 0x30 */ | ||
117 | u8 immediate_data[SWQE2_MAX_IMM]; | ||
118 | /* 0xd0 */ | ||
119 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; | ||
120 | } immdata_desc __attribute__ ((packed)); | ||
121 | |||
122 | /* Send WQE Format 3 */ | ||
123 | struct { | ||
124 | u8 immediate_data[SWQE3_MAX_IMM]; | ||
125 | } immdata_nodesc; | ||
126 | } u; | ||
127 | }; | ||
128 | |||
129 | struct ehea_rwqe { | ||
130 | u64 wr_id; /* work request ID */ | ||
131 | u8 reserved1[5]; | ||
132 | u8 data_segments; | ||
133 | u16 reserved2; | ||
134 | u64 reserved3; | ||
135 | u64 reserved4; | ||
136 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | ||
137 | }; | ||
138 | |||
139 | #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 | ||
140 | |||
141 | #define EHEA_CQE_TYPE_RQ 0x60 | ||
142 | #define EHEA_CQE_STAT_ERR_MASK 0x721F | ||
143 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F | ||
144 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 | ||
145 | |||
146 | struct ehea_cqe { | ||
147 | u64 wr_id; /* work request ID from WQE */ | ||
148 | u8 type; | ||
149 | u8 valid; | ||
150 | u16 status; | ||
151 | u16 reserved1; | ||
152 | u16 num_bytes_transfered; | ||
153 | u16 vlan_tag; | ||
154 | u16 inet_checksum_value; | ||
155 | u8 reserved2; | ||
156 | u8 header_length; | ||
157 | u16 reserved3; | ||
158 | u16 page_offset; | ||
159 | u16 wqe_count; | ||
160 | u32 qp_token; | ||
161 | u32 timestamp; | ||
162 | u32 reserved4; | ||
163 | u64 reserved5[3]; | ||
164 | }; | ||
165 | |||
166 | #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0) | ||
167 | #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1) | ||
168 | #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7) | ||
169 | #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31) | ||
170 | #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63) | ||
171 | #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63) | ||
172 | #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63) | ||
173 | #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | ||
174 | #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63) | ||
175 | #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63) | ||
176 | #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) | ||
177 | #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | ||
178 | |||
179 | struct ehea_eqe { | ||
180 | u64 entry; | ||
181 | }; | ||
182 | |||
183 | static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) | ||
184 | { | ||
185 | struct ehea_page *current_page; | ||
186 | |||
187 | if (q_offset >= queue->queue_length) | ||
188 | q_offset -= queue->queue_length; | ||
189 | current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; | ||
190 | return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; | ||
191 | } | ||
192 | |||
193 | static inline void *hw_qeit_get(struct hw_queue *queue) | ||
194 | { | ||
195 | return hw_qeit_calc(queue, queue->current_q_offset); | ||
196 | } | ||
197 | |||
198 | static inline void hw_qeit_inc(struct hw_queue *queue) | ||
199 | { | ||
200 | queue->current_q_offset += queue->qe_size; | ||
201 | if (queue->current_q_offset >= queue->queue_length) { | ||
202 | queue->current_q_offset = 0; | ||
203 | /* toggle the valid flag */ | ||
204 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static inline void *hw_qeit_get_inc(struct hw_queue *queue) | ||
209 | { | ||
210 | void *retvalue = hw_qeit_get(queue); | ||
211 | hw_qeit_inc(queue); | ||
212 | return retvalue; | ||
213 | } | ||
214 | |||
215 | static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) | ||
216 | { | ||
217 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | ||
218 | u8 valid = retvalue->valid; | ||
219 | void *pref; | ||
220 | |||
221 | if ((valid >> 7) == (queue->toggle_state & 1)) { | ||
222 | /* this is a good one */ | ||
223 | hw_qeit_inc(queue); | ||
224 | pref = hw_qeit_calc(queue, queue->current_q_offset); | ||
225 | prefetch(pref); | ||
226 | prefetch(pref + 128); | ||
227 | } else | ||
228 | retvalue = NULL; | ||
229 | return retvalue; | ||
230 | } | ||
231 | |||
232 | static inline void *hw_qeit_get_valid(struct hw_queue *queue) | ||
233 | { | ||
234 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | ||
235 | void *pref; | ||
236 | u8 valid; | ||
237 | |||
238 | pref = hw_qeit_calc(queue, queue->current_q_offset); | ||
239 | prefetch(pref); | ||
240 | prefetch(pref + 128); | ||
241 | prefetch(pref + 256); | ||
242 | valid = retvalue->valid; | ||
243 | if (!((valid >> 7) == (queue->toggle_state & 1))) | ||
244 | retvalue = NULL; | ||
245 | return retvalue; | ||
246 | } | ||
247 | |||
248 | static inline void *hw_qeit_reset(struct hw_queue *queue) | ||
249 | { | ||
250 | queue->current_q_offset = 0; | ||
251 | return hw_qeit_get(queue); | ||
252 | } | ||
253 | |||
254 | static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) | ||
255 | { | ||
256 | u64 last_entry_in_q = queue->queue_length - queue->qe_size; | ||
257 | void *retvalue; | ||
258 | |||
259 | retvalue = hw_qeit_get(queue); | ||
260 | queue->current_q_offset += queue->qe_size; | ||
261 | if (queue->current_q_offset > last_entry_in_q) { | ||
262 | queue->current_q_offset = 0; | ||
263 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
264 | } | ||
265 | return retvalue; | ||
266 | } | ||
267 | |||
268 | static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) | ||
269 | { | ||
270 | void *retvalue = hw_qeit_get(queue); | ||
271 | u32 qe = *(u8*)retvalue; | ||
272 | if ((qe >> 7) == (queue->toggle_state & 1)) | ||
273 | hw_qeit_eq_get_inc(queue); | ||
274 | else | ||
275 | retvalue = NULL; | ||
276 | return retvalue; | ||
277 | } | ||
278 | |||
279 | static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, | ||
280 | int rq_nr) | ||
281 | { | ||
282 | struct hw_queue *queue; | ||
283 | |||
284 | if (rq_nr == 1) | ||
285 | queue = &qp->hw_rqueue1; | ||
286 | else if (rq_nr == 2) | ||
287 | queue = &qp->hw_rqueue2; | ||
288 | else | ||
289 | queue = &qp->hw_rqueue3; | ||
290 | |||
291 | return hw_qeit_get_inc(queue); | ||
292 | } | ||
293 | |||
294 | static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp, | ||
295 | int *wqe_index) | ||
296 | { | ||
297 | struct hw_queue *queue = &my_qp->hw_squeue; | ||
298 | struct ehea_swqe *wqe_p; | ||
299 | |||
300 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); | ||
301 | wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); | ||
302 | |||
303 | return wqe_p; | ||
304 | } | ||
305 | |||
306 | static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe) | ||
307 | { | ||
308 | iosync(); | ||
309 | ehea_update_sqa(my_qp, 1); | ||
310 | } | ||
311 | |||
312 | static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) | ||
313 | { | ||
314 | struct hw_queue *queue = &qp->hw_rqueue1; | ||
315 | |||
316 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); | ||
317 | return hw_qeit_get_valid(queue); | ||
318 | } | ||
319 | |||
320 | static inline void ehea_inc_rq1(struct ehea_qp *qp) | ||
321 | { | ||
322 | hw_qeit_inc(&qp->hw_rqueue1); | ||
323 | } | ||
324 | |||
325 | static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) | ||
326 | { | ||
327 | return hw_qeit_get_inc_valid(&my_cq->hw_queue); | ||
328 | } | ||
329 | |||
330 | #define EHEA_CQ_REGISTER_ORIG 0 | ||
331 | #define EHEA_EQ_REGISTER_ORIG 0 | ||
332 | |||
333 | enum ehea_eq_type { | ||
334 | EHEA_EQ = 0, /* event queue */ | ||
335 | EHEA_NEQ /* notification event queue */ | ||
336 | }; | ||
337 | |||
338 | struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | ||
339 | enum ehea_eq_type type, | ||
340 | const u32 length, const u8 eqe_gen); | ||
341 | |||
342 | int ehea_destroy_eq(struct ehea_eq *eq); | ||
343 | |||
344 | struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq); | ||
345 | |||
346 | struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, | ||
347 | u64 eq_handle, u32 cq_token); | ||
348 | |||
349 | int ehea_destroy_cq(struct ehea_cq *cq); | ||
350 | |||
351 | struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, | ||
352 | struct ehea_qp_init_attr *init_attr); | ||
353 | |||
354 | int ehea_destroy_qp(struct ehea_qp *qp); | ||
355 | |||
356 | int ehea_reg_mr_adapter(struct ehea_adapter *adapter); | ||
357 | |||
358 | #endif /* __EHEA_QMR_H__ */ | ||