aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_hw.h
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2006-09-13 11:44:31 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-13 13:23:52 -0400
commit7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (patch)
treec87a93ee7d5c1c63ce98dc90a62cd0b4dfc4318f /drivers/net/ehea/ehea_hw.h
parent7de745e56244156233e5cdd62b462e52e638d408 (diff)
[PATCH] ehea: IBM eHEA Ethernet Device Driver
Hi Jeff, I fixed the __iomem issue and tested the driver with sparse. Looks good so far. Thanks for your effort. Jan-Bernd Themann Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> drivers/net/Kconfig | 9 drivers/net/Makefile | 1 drivers/net/ehea/Makefile | 6 drivers/net/ehea/ehea.h | 447 ++++++ drivers/net/ehea/ehea_ethtool.c | 294 ++++ drivers/net/ehea/ehea_hcall.h | 51 drivers/net/ehea/ehea_hw.h | 287 ++++ drivers/net/ehea/ehea_main.c | 2654 ++++++++++++++++++++++++++++++++++++++++ drivers/net/ehea/ehea_phyp.c | 705 ++++++++++ drivers/net/ehea/ehea_phyp.h | 455 ++++++ drivers/net/ehea/ehea_qmr.c | 582 ++++++++ drivers/net/ehea/ehea_qmr.h | 358 +++++ 12 files changed, 5849 insertions(+) Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea/ehea_hw.h')
-rw-r--r--drivers/net/ehea/ehea_hw.h287
1 files changed, 287 insertions, 0 deletions
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
new file mode 100644
index 000000000000..e3a7d07f88cc
--- /dev/null
+++ b/drivers/net/ehea/ehea_hw.h
@@ -0,0 +1,287 @@
1/*
2 * linux/drivers/net/ehea/ehea_hw.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_HW_H__
30#define __EHEA_HW_H__
31
32#define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63)
33#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63)
34#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63)
35#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63)
36
37#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
38
39struct ehea_qptemm {
40 u64 qpx_hcr;
41 u64 qpx_c;
42 u64 qpx_herr;
43 u64 qpx_aer;
44 u64 qpx_sqa;
45 u64 qpx_sqc;
46 u64 qpx_rq1a;
47 u64 qpx_rq1c;
48 u64 qpx_st;
49 u64 qpx_aerr;
50 u64 qpx_tenure;
51 u64 qpx_reserved1[(0x098 - 0x058) / 8];
52 u64 qpx_portp;
53 u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
54 u64 qpx_t;
55 u64 qpx_sqhp;
56 u64 qpx_sqptp;
57 u64 qpx_reserved3[(0x140 - 0x118) / 8];
58 u64 qpx_sqwsize;
59 u64 qpx_reserved4[(0x170 - 0x148) / 8];
60 u64 qpx_sqsize;
61 u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
62 u64 qpx_sigt;
63 u64 qpx_wqecnt;
64 u64 qpx_rq1hp;
65 u64 qpx_rq1ptp;
66 u64 qpx_rq1size;
67 u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
68 u64 qpx_rq1wsize;
69 u64 qpx_reserved7[(0x240 - 0x228) / 8];
70 u64 qpx_pd;
71 u64 qpx_scqn;
72 u64 qpx_rcqn;
73 u64 qpx_aeqn;
74 u64 reserved49;
75 u64 qpx_ram;
76 u64 qpx_reserved8[(0x300 - 0x270) / 8];
77 u64 qpx_rq2a;
78 u64 qpx_rq2c;
79 u64 qpx_rq2hp;
80 u64 qpx_rq2ptp;
81 u64 qpx_rq2size;
82 u64 qpx_rq2wsize;
83 u64 qpx_rq2th;
84 u64 qpx_rq3a;
85 u64 qpx_rq3c;
86 u64 qpx_rq3hp;
87 u64 qpx_rq3ptp;
88 u64 qpx_rq3size;
89 u64 qpx_rq3wsize;
90 u64 qpx_rq3th;
91 u64 qpx_lpn;
92 u64 qpx_reserved9[(0x400 - 0x378) / 8];
93 u64 reserved_ext[(0x500 - 0x400) / 8];
94 u64 reserved2[(0x1000 - 0x500) / 8];
95};
96
97#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
98
99#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
100
101struct ehea_mrmwmm {
102 u64 mrx_hcr;
103 u64 mrx_c;
104 u64 mrx_herr;
105 u64 mrx_aer;
106 u64 mrx_pp;
107 u64 reserved1;
108 u64 reserved2;
109 u64 reserved3;
110 u64 reserved4[(0x200 - 0x40) / 8];
111 u64 mrx_ctl[64];
112};
113
114#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
115
116struct ehea_qpedmm {
117
118 u64 reserved0[(0x400) / 8];
119 u64 qpedx_phh;
120 u64 qpedx_ppsgp;
121 u64 qpedx_ppsgu;
122 u64 qpedx_ppdgp;
123 u64 qpedx_ppdgu;
124 u64 qpedx_aph;
125 u64 qpedx_apsgp;
126 u64 qpedx_apsgu;
127 u64 qpedx_apdgp;
128 u64 qpedx_apdgu;
129 u64 qpedx_apav;
130 u64 qpedx_apsav;
131 u64 qpedx_hcr;
132 u64 reserved1[4];
133 u64 qpedx_rrl0;
134 u64 qpedx_rrrkey0;
135 u64 qpedx_rrva0;
136 u64 reserved2;
137 u64 qpedx_rrl1;
138 u64 qpedx_rrrkey1;
139 u64 qpedx_rrva1;
140 u64 reserved3;
141 u64 qpedx_rrl2;
142 u64 qpedx_rrrkey2;
143 u64 qpedx_rrva2;
144 u64 reserved4;
145 u64 qpedx_rrl3;
146 u64 qpedx_rrrkey3;
147 u64 qpedx_rrva3;
148};
149
150#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
151#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
152#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
153#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
154
155#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
156
157struct ehea_cqtemm {
158 u64 cqx_hcr;
159 u64 cqx_c;
160 u64 cqx_herr;
161 u64 cqx_aer;
162 u64 cqx_ptp;
163 u64 cqx_tp;
164 u64 cqx_fec;
165 u64 cqx_feca;
166 u64 cqx_ep;
167 u64 cqx_eq;
168 u64 reserved1;
169 u64 cqx_n0;
170 u64 cqx_n1;
171 u64 reserved2[(0x1000 - 0x60) / 8];
172};
173
174#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
175
176struct ehea_eqtemm {
177 u64 eqx_hcr;
178 u64 eqx_c;
179 u64 eqx_herr;
180 u64 eqx_aer;
181 u64 eqx_ptp;
182 u64 eqx_tp;
183 u64 eqx_ssba;
184 u64 eqx_psba;
185 u64 eqx_cec;
186 u64 eqx_meql;
187 u64 eqx_xisbi;
188 u64 eqx_xisc;
189 u64 eqx_it;
190};
191
192static inline u64 epa_load(struct h_epa epa, u32 offset)
193{
194 return readq((void __iomem *)(epa.addr + offset));
195}
196
197static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
198{
199 writeq(value, (void __iomem *)(epa.addr + offset));
200 epa_load(epa, offset); /* synchronize explicitly to eHEA */
201}
202
203static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
204{
205 writeq(value, (void __iomem *)(epa.addr + offset));
206}
207
208#define epa_store_eq(epa, offset, value)\
209 epa_store(epa, EQTEMM_OFFSET(offset), value)
210#define epa_load_eq(epa, offset)\
211 epa_load(epa, EQTEMM_OFFSET(offset))
212
213#define epa_store_cq(epa, offset, value)\
214 epa_store(epa, CQTEMM_OFFSET(offset), value)
215#define epa_load_cq(epa, offset)\
216 epa_load(epa, CQTEMM_OFFSET(offset))
217
218#define epa_store_qp(epa, offset, value)\
219 epa_store(epa, QPTEMM_OFFSET(offset), value)
220#define epa_load_qp(epa, offset)\
221 epa_load(epa, QPTEMM_OFFSET(offset))
222
223#define epa_store_qped(epa, offset, value)\
224 epa_store(epa, QPEDMM_OFFSET(offset), value)
225#define epa_load_qped(epa, offset)\
226 epa_load(epa, QPEDMM_OFFSET(offset))
227
228#define epa_store_mrmw(epa, offset, value)\
229 epa_store(epa, MRMWMM_OFFSET(offset), value)
230#define epa_load_mrmw(epa, offset)\
231 epa_load(epa, MRMWMM_OFFSET(offset))
232
233#define epa_store_base(epa, offset, value)\
234 epa_store(epa, HCAGR_OFFSET(offset), value)
235#define epa_load_base(epa, offset)\
236 epa_load(epa, HCAGR_OFFSET(offset))
237
238static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
239{
240 struct h_epa epa = qp->epas.kernel;
241 epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
242 EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
243}
244
245static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
246{
247 struct h_epa epa = qp->epas.kernel;
248 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
249 EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
250}
251
252static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
253{
254 struct h_epa epa = qp->epas.kernel;
255 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
256 EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
257}
258
259static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
260{
261 struct h_epa epa = qp->epas.kernel;
262 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
263 EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
264}
265
266static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
267{
268 struct h_epa epa = cq->epas.kernel;
269 epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
270 EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
271}
272
273static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
274{
275 struct h_epa epa = cq->epas.kernel;
276 epa_store_cq(epa, cqx_n1,
277 EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
278}
279
280static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
281{
282 struct h_epa epa = my_cq->epas.kernel;
283 epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
284 EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
285}
286
287#endif /* __EHEA_HW_H__ */