diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath')
34 files changed, 2507 insertions, 2319 deletions
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 1db9489f1e82..574a678e7fdd 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig | |||
@@ -1,16 +1,9 @@ | |||
1 | config IPATH_CORE | ||
2 | tristate "QLogic InfiniPath Driver" | ||
3 | depends on 64BIT && PCI_MSI && NET | ||
4 | ---help--- | ||
5 | This is a low-level driver for QLogic InfiniPath host channel | ||
6 | adapters (HCAs) based on the HT-400 and PE-800 chips. | ||
7 | |||
8 | config INFINIBAND_IPATH | 1 | config INFINIBAND_IPATH |
9 | tristate "QLogic InfiniPath Verbs Driver" | 2 | tristate "QLogic InfiniPath Driver" |
10 | depends on IPATH_CORE && INFINIBAND | 3 | depends on PCI_MSI && 64BIT && INFINIBAND |
11 | ---help--- | 4 | ---help--- |
12 | This is a driver that provides InfiniBand verbs support for | 5 | This is a driver for QLogic InfiniPath host channel adapters, |
13 | QLogic InfiniPath host channel adapters (HCAs). This | 6 | including InfiniBand verbs support. This driver allows these |
14 | allows these devices to be used with both kernel upper level | 7 | devices to be used with both kernel upper level protocols such |
15 | protocols such as IP-over-InfiniBand as well as with userspace | 8 | as IP-over-InfiniBand as well as with userspace applications |
16 | applications (in conjunction with InfiniBand userspace access). | 9 | (in conjunction with InfiniBand userspace access). |
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index b0bf72864130..5e29cb0095e5 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile | |||
@@ -1,36 +1,35 @@ | |||
1 | EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \ | 1 | EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \ |
2 | -DIPATH_KERN_TYPE=0 | 2 | -DIPATH_KERN_TYPE=0 |
3 | 3 | ||
4 | obj-$(CONFIG_IPATH_CORE) += ipath_core.o | ||
5 | obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o | 4 | obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o |
6 | 5 | ||
7 | ipath_core-y := \ | 6 | ib_ipath-y := \ |
7 | ipath_cq.o \ | ||
8 | ipath_diag.o \ | 8 | ipath_diag.o \ |
9 | ipath_driver.o \ | 9 | ipath_driver.o \ |
10 | ipath_eeprom.o \ | 10 | ipath_eeprom.o \ |
11 | ipath_file_ops.o \ | 11 | ipath_file_ops.o \ |
12 | ipath_fs.o \ | 12 | ipath_fs.o \ |
13 | ipath_ht400.o \ | 13 | ipath_iba6110.o \ |
14 | ipath_iba6120.o \ | ||
14 | ipath_init_chip.o \ | 15 | ipath_init_chip.o \ |
15 | ipath_intr.o \ | 16 | ipath_intr.o \ |
16 | ipath_layer.o \ | ||
17 | ipath_pe800.o \ | ||
18 | ipath_stats.o \ | ||
19 | ipath_sysfs.o \ | ||
20 | ipath_user_pages.o | ||
21 | |||
22 | ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o | ||
23 | |||
24 | ib_ipath-y := \ | ||
25 | ipath_cq.o \ | ||
26 | ipath_keys.o \ | 17 | ipath_keys.o \ |
18 | ipath_layer.o \ | ||
27 | ipath_mad.o \ | 19 | ipath_mad.o \ |
20 | ipath_mmap.o \ | ||
28 | ipath_mr.o \ | 21 | ipath_mr.o \ |
29 | ipath_qp.o \ | 22 | ipath_qp.o \ |
30 | ipath_rc.o \ | 23 | ipath_rc.o \ |
31 | ipath_ruc.o \ | 24 | ipath_ruc.o \ |
32 | ipath_srq.o \ | 25 | ipath_srq.o \ |
26 | ipath_stats.o \ | ||
27 | ipath_sysfs.o \ | ||
33 | ipath_uc.o \ | 28 | ipath_uc.o \ |
34 | ipath_ud.o \ | 29 | ipath_ud.o \ |
35 | ipath_verbs.o \ | 30 | ipath_user_pages.o \ |
36 | ipath_verbs_mcast.o | 31 | ipath_verbs_mcast.o \ |
32 | ipath_verbs.o | ||
33 | |||
34 | ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o | ||
35 | ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h index 062bd392e7e5..f577905e3aca 100644 --- a/drivers/infiniband/hw/ipath/ipath_common.h +++ b/drivers/infiniband/hw/ipath/ipath_common.h | |||
@@ -106,9 +106,9 @@ struct infinipath_stats { | |||
106 | __u64 sps_ether_spkts; | 106 | __u64 sps_ether_spkts; |
107 | /* number of "ethernet" packets received by driver */ | 107 | /* number of "ethernet" packets received by driver */ |
108 | __u64 sps_ether_rpkts; | 108 | __u64 sps_ether_rpkts; |
109 | /* number of SMA packets sent by driver */ | 109 | /* number of SMA packets sent by driver. Obsolete. */ |
110 | __u64 sps_sma_spkts; | 110 | __u64 sps_sma_spkts; |
111 | /* number of SMA packets received by driver */ | 111 | /* number of SMA packets received by driver. Obsolete. */ |
112 | __u64 sps_sma_rpkts; | 112 | __u64 sps_sma_rpkts; |
113 | /* number of times all ports rcvhdrq was full and packet dropped */ | 113 | /* number of times all ports rcvhdrq was full and packet dropped */ |
114 | __u64 sps_hdrqfull; | 114 | __u64 sps_hdrqfull; |
@@ -138,7 +138,7 @@ struct infinipath_stats { | |||
138 | __u64 sps_pageunlocks; | 138 | __u64 sps_pageunlocks; |
139 | /* | 139 | /* |
140 | * Number of packets dropped in kernel other than errors (ether | 140 | * Number of packets dropped in kernel other than errors (ether |
141 | * packets if ipath not configured, sma/mad, etc.) | 141 | * packets if ipath not configured, etc.) |
142 | */ | 142 | */ |
143 | __u64 sps_krdrops; | 143 | __u64 sps_krdrops; |
144 | /* pad for future growth */ | 144 | /* pad for future growth */ |
@@ -153,8 +153,6 @@ struct infinipath_stats { | |||
153 | #define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */ | 153 | #define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */ |
154 | /* Device has been disabled via admin request */ | 154 | /* Device has been disabled via admin request */ |
155 | #define IPATH_STATUS_ADMIN_DISABLED 0x4 | 155 | #define IPATH_STATUS_ADMIN_DISABLED 0x4 |
156 | #define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */ | ||
157 | #define IPATH_STATUS_SMA 0x10 /* user SMA running */ | ||
158 | /* Chip has been found and initted */ | 156 | /* Chip has been found and initted */ |
159 | #define IPATH_STATUS_CHIP_PRESENT 0x20 | 157 | #define IPATH_STATUS_CHIP_PRESENT 0x20 |
160 | /* IB link is at ACTIVE, usable for data traffic */ | 158 | /* IB link is at ACTIVE, usable for data traffic */ |
@@ -465,12 +463,11 @@ struct __ipath_sendpkt { | |||
465 | struct ipath_iovec sps_iov[4]; | 463 | struct ipath_iovec sps_iov[4]; |
466 | }; | 464 | }; |
467 | 465 | ||
468 | /* Passed into SMA special file's ->read and ->write methods. */ | 466 | /* Passed into diag data special file's ->write method. */ |
469 | struct ipath_sma_pkt | 467 | struct ipath_diag_pkt { |
470 | { | 468 | __u32 unit; |
471 | __u32 unit; /* unit on which to send packet */ | 469 | __u64 data; |
472 | __u64 data; /* address of payload in userspace */ | 470 | __u32 len; |
473 | __u32 len; /* length of payload */ | ||
474 | }; | 471 | }; |
475 | 472 | ||
476 | /* | 473 | /* |
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 3efee341c9bc..049221bc590e 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -42,20 +42,28 @@ | |||
42 | * @entry: work completion entry to add | 42 | * @entry: work completion entry to add |
43 | * @sig: true if @entry is a solicitated entry | 43 | * @sig: true if @entry is a solicitated entry |
44 | * | 44 | * |
45 | * This may be called with one of the qp->s_lock or qp->r_rq.lock held. | 45 | * This may be called with qp->s_lock held. |
46 | */ | 46 | */ |
47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | 47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) |
48 | { | 48 | { |
49 | struct ipath_cq_wc *wc = cq->queue; | ||
49 | unsigned long flags; | 50 | unsigned long flags; |
51 | u32 head; | ||
50 | u32 next; | 52 | u32 next; |
51 | 53 | ||
52 | spin_lock_irqsave(&cq->lock, flags); | 54 | spin_lock_irqsave(&cq->lock, flags); |
53 | 55 | ||
54 | if (cq->head == cq->ibcq.cqe) | 56 | /* |
57 | * Note that the head pointer might be writable by user processes. | ||
58 | * Take care to verify it is a sane value. | ||
59 | */ | ||
60 | head = wc->head; | ||
61 | if (head >= (unsigned) cq->ibcq.cqe) { | ||
62 | head = cq->ibcq.cqe; | ||
55 | next = 0; | 63 | next = 0; |
56 | else | 64 | } else |
57 | next = cq->head + 1; | 65 | next = head + 1; |
58 | if (unlikely(next == cq->tail)) { | 66 | if (unlikely(next == wc->tail)) { |
59 | spin_unlock_irqrestore(&cq->lock, flags); | 67 | spin_unlock_irqrestore(&cq->lock, flags); |
60 | if (cq->ibcq.event_handler) { | 68 | if (cq->ibcq.event_handler) { |
61 | struct ib_event ev; | 69 | struct ib_event ev; |
@@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
67 | } | 75 | } |
68 | return; | 76 | return; |
69 | } | 77 | } |
70 | cq->queue[cq->head] = *entry; | 78 | wc->queue[head] = *entry; |
71 | cq->head = next; | 79 | wc->head = next; |
72 | 80 | ||
73 | if (cq->notify == IB_CQ_NEXT_COMP || | 81 | if (cq->notify == IB_CQ_NEXT_COMP || |
74 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | 82 | (cq->notify == IB_CQ_SOLICITED && solicited)) { |
@@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
101 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | 109 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) |
102 | { | 110 | { |
103 | struct ipath_cq *cq = to_icq(ibcq); | 111 | struct ipath_cq *cq = to_icq(ibcq); |
112 | struct ipath_cq_wc *wc = cq->queue; | ||
104 | unsigned long flags; | 113 | unsigned long flags; |
105 | int npolled; | 114 | int npolled; |
106 | 115 | ||
107 | spin_lock_irqsave(&cq->lock, flags); | 116 | spin_lock_irqsave(&cq->lock, flags); |
108 | 117 | ||
109 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | 118 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { |
110 | if (cq->tail == cq->head) | 119 | if (wc->tail == wc->head) |
111 | break; | 120 | break; |
112 | *entry = cq->queue[cq->tail]; | 121 | *entry = wc->queue[wc->tail]; |
113 | if (cq->tail == cq->ibcq.cqe) | 122 | if (wc->tail >= cq->ibcq.cqe) |
114 | cq->tail = 0; | 123 | wc->tail = 0; |
115 | else | 124 | else |
116 | cq->tail++; | 125 | wc->tail++; |
117 | } | 126 | } |
118 | 127 | ||
119 | spin_unlock_irqrestore(&cq->lock, flags); | 128 | spin_unlock_irqrestore(&cq->lock, flags); |
@@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |||
160 | { | 169 | { |
161 | struct ipath_ibdev *dev = to_idev(ibdev); | 170 | struct ipath_ibdev *dev = to_idev(ibdev); |
162 | struct ipath_cq *cq; | 171 | struct ipath_cq *cq; |
163 | struct ib_wc *wc; | 172 | struct ipath_cq_wc *wc; |
164 | struct ib_cq *ret; | 173 | struct ib_cq *ret; |
165 | 174 | ||
166 | if (entries > ib_ipath_max_cqes) { | 175 | if (entries < 1 || entries > ib_ipath_max_cqes) { |
167 | ret = ERR_PTR(-EINVAL); | 176 | ret = ERR_PTR(-EINVAL); |
168 | goto bail; | 177 | goto done; |
169 | } | 178 | } |
170 | 179 | ||
171 | if (dev->n_cqs_allocated == ib_ipath_max_cqs) { | 180 | if (dev->n_cqs_allocated == ib_ipath_max_cqs) { |
172 | ret = ERR_PTR(-ENOMEM); | 181 | ret = ERR_PTR(-ENOMEM); |
173 | goto bail; | 182 | goto done; |
174 | } | 183 | } |
175 | 184 | ||
176 | /* | 185 | /* Allocate the completion queue structure. */ |
177 | * Need to use vmalloc() if we want to support large #s of | ||
178 | * entries. | ||
179 | */ | ||
180 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | 186 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); |
181 | if (!cq) { | 187 | if (!cq) { |
182 | ret = ERR_PTR(-ENOMEM); | 188 | ret = ERR_PTR(-ENOMEM); |
183 | goto bail; | 189 | goto done; |
184 | } | 190 | } |
185 | 191 | ||
186 | /* | 192 | /* |
187 | * Need to use vmalloc() if we want to support large #s of entries. | 193 | * Allocate the completion queue entries and head/tail pointers. |
194 | * This is allocated separately so that it can be resized and | ||
195 | * also mapped into user space. | ||
196 | * We need to use vmalloc() in order to support mmap and large | ||
197 | * numbers of entries. | ||
188 | */ | 198 | */ |
189 | wc = vmalloc(sizeof(*wc) * (entries + 1)); | 199 | wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries); |
190 | if (!wc) { | 200 | if (!wc) { |
191 | kfree(cq); | ||
192 | ret = ERR_PTR(-ENOMEM); | 201 | ret = ERR_PTR(-ENOMEM); |
193 | goto bail; | 202 | goto bail_cq; |
194 | } | 203 | } |
204 | |||
205 | /* | ||
206 | * Return the address of the WC as the offset to mmap. | ||
207 | * See ipath_mmap() for details. | ||
208 | */ | ||
209 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
210 | struct ipath_mmap_info *ip; | ||
211 | __u64 offset = (__u64) wc; | ||
212 | int err; | ||
213 | |||
214 | err = ib_copy_to_udata(udata, &offset, sizeof(offset)); | ||
215 | if (err) { | ||
216 | ret = ERR_PTR(err); | ||
217 | goto bail_wc; | ||
218 | } | ||
219 | |||
220 | /* Allocate info for ipath_mmap(). */ | ||
221 | ip = kmalloc(sizeof(*ip), GFP_KERNEL); | ||
222 | if (!ip) { | ||
223 | ret = ERR_PTR(-ENOMEM); | ||
224 | goto bail_wc; | ||
225 | } | ||
226 | cq->ip = ip; | ||
227 | ip->context = context; | ||
228 | ip->obj = wc; | ||
229 | kref_init(&ip->ref); | ||
230 | ip->mmap_cnt = 0; | ||
231 | ip->size = PAGE_ALIGN(sizeof(*wc) + | ||
232 | sizeof(struct ib_wc) * entries); | ||
233 | spin_lock_irq(&dev->pending_lock); | ||
234 | ip->next = dev->pending_mmaps; | ||
235 | dev->pending_mmaps = ip; | ||
236 | spin_unlock_irq(&dev->pending_lock); | ||
237 | } else | ||
238 | cq->ip = NULL; | ||
239 | |||
195 | /* | 240 | /* |
196 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. | 241 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. |
197 | * The number of entries should be >= the number requested or return | 242 | * The number of entries should be >= the number requested or return |
@@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | |||
202 | cq->triggered = 0; | 247 | cq->triggered = 0; |
203 | spin_lock_init(&cq->lock); | 248 | spin_lock_init(&cq->lock); |
204 | tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); | 249 | tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); |
205 | cq->head = 0; | 250 | wc->head = 0; |
206 | cq->tail = 0; | 251 | wc->tail = 0; |
207 | cq->queue = wc; | 252 | cq->queue = wc; |
208 | 253 | ||
209 | ret = &cq->ibcq; | 254 | ret = &cq->ibcq; |
210 | 255 | ||
211 | dev->n_cqs_allocated++; | 256 | dev->n_cqs_allocated++; |
257 | goto done; | ||
212 | 258 | ||
213 | bail: | 259 | bail_wc: |
260 | vfree(wc); | ||
261 | |||
262 | bail_cq: | ||
263 | kfree(cq); | ||
264 | |||
265 | done: | ||
214 | return ret; | 266 | return ret; |
215 | } | 267 | } |
216 | 268 | ||
@@ -229,7 +281,10 @@ int ipath_destroy_cq(struct ib_cq *ibcq) | |||
229 | 281 | ||
230 | tasklet_kill(&cq->comptask); | 282 | tasklet_kill(&cq->comptask); |
231 | dev->n_cqs_allocated--; | 283 | dev->n_cqs_allocated--; |
232 | vfree(cq->queue); | 284 | if (cq->ip) |
285 | kref_put(&cq->ip->ref, ipath_release_mmap_info); | ||
286 | else | ||
287 | vfree(cq->queue); | ||
233 | kfree(cq); | 288 | kfree(cq); |
234 | 289 | ||
235 | return 0; | 290 | return 0; |
@@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
253 | spin_lock_irqsave(&cq->lock, flags); | 308 | spin_lock_irqsave(&cq->lock, flags); |
254 | /* | 309 | /* |
255 | * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow | 310 | * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow |
256 | * any other transitions. | 311 | * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). |
257 | */ | 312 | */ |
258 | if (cq->notify != IB_CQ_NEXT_COMP) | 313 | if (cq->notify != IB_CQ_NEXT_COMP) |
259 | cq->notify = notify; | 314 | cq->notify = notify; |
@@ -264,46 +319,86 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
264 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | 319 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
265 | { | 320 | { |
266 | struct ipath_cq *cq = to_icq(ibcq); | 321 | struct ipath_cq *cq = to_icq(ibcq); |
267 | struct ib_wc *wc, *old_wc; | 322 | struct ipath_cq_wc *old_wc = cq->queue; |
268 | u32 n; | 323 | struct ipath_cq_wc *wc; |
324 | u32 head, tail, n; | ||
269 | int ret; | 325 | int ret; |
270 | 326 | ||
327 | if (cqe < 1 || cqe > ib_ipath_max_cqes) { | ||
328 | ret = -EINVAL; | ||
329 | goto bail; | ||
330 | } | ||
331 | |||
271 | /* | 332 | /* |
272 | * Need to use vmalloc() if we want to support large #s of entries. | 333 | * Need to use vmalloc() if we want to support large #s of entries. |
273 | */ | 334 | */ |
274 | wc = vmalloc(sizeof(*wc) * (cqe + 1)); | 335 | wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe); |
275 | if (!wc) { | 336 | if (!wc) { |
276 | ret = -ENOMEM; | 337 | ret = -ENOMEM; |
277 | goto bail; | 338 | goto bail; |
278 | } | 339 | } |
279 | 340 | ||
341 | /* | ||
342 | * Return the address of the WC as the offset to mmap. | ||
343 | * See ipath_mmap() for details. | ||
344 | */ | ||
345 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
346 | __u64 offset = (__u64) wc; | ||
347 | |||
348 | ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); | ||
349 | if (ret) | ||
350 | goto bail; | ||
351 | } | ||
352 | |||
280 | spin_lock_irq(&cq->lock); | 353 | spin_lock_irq(&cq->lock); |
281 | if (cq->head < cq->tail) | 354 | /* |
282 | n = cq->ibcq.cqe + 1 + cq->head - cq->tail; | 355 | * Make sure head and tail are sane since they |
356 | * might be user writable. | ||
357 | */ | ||
358 | head = old_wc->head; | ||
359 | if (head > (u32) cq->ibcq.cqe) | ||
360 | head = (u32) cq->ibcq.cqe; | ||
361 | tail = old_wc->tail; | ||
362 | if (tail > (u32) cq->ibcq.cqe) | ||
363 | tail = (u32) cq->ibcq.cqe; | ||
364 | if (head < tail) | ||
365 | n = cq->ibcq.cqe + 1 + head - tail; | ||
283 | else | 366 | else |
284 | n = cq->head - cq->tail; | 367 | n = head - tail; |
285 | if (unlikely((u32)cqe < n)) { | 368 | if (unlikely((u32)cqe < n)) { |
286 | spin_unlock_irq(&cq->lock); | 369 | spin_unlock_irq(&cq->lock); |
287 | vfree(wc); | 370 | vfree(wc); |
288 | ret = -EOVERFLOW; | 371 | ret = -EOVERFLOW; |
289 | goto bail; | 372 | goto bail; |
290 | } | 373 | } |
291 | for (n = 0; cq->tail != cq->head; n++) { | 374 | for (n = 0; tail != head; n++) { |
292 | wc[n] = cq->queue[cq->tail]; | 375 | wc->queue[n] = old_wc->queue[tail]; |
293 | if (cq->tail == cq->ibcq.cqe) | 376 | if (tail == (u32) cq->ibcq.cqe) |
294 | cq->tail = 0; | 377 | tail = 0; |
295 | else | 378 | else |
296 | cq->tail++; | 379 | tail++; |
297 | } | 380 | } |
298 | cq->ibcq.cqe = cqe; | 381 | cq->ibcq.cqe = cqe; |
299 | cq->head = n; | 382 | wc->head = n; |
300 | cq->tail = 0; | 383 | wc->tail = 0; |
301 | old_wc = cq->queue; | ||
302 | cq->queue = wc; | 384 | cq->queue = wc; |
303 | spin_unlock_irq(&cq->lock); | 385 | spin_unlock_irq(&cq->lock); |
304 | 386 | ||
305 | vfree(old_wc); | 387 | vfree(old_wc); |
306 | 388 | ||
389 | if (cq->ip) { | ||
390 | struct ipath_ibdev *dev = to_idev(ibcq->device); | ||
391 | struct ipath_mmap_info *ip = cq->ip; | ||
392 | |||
393 | ip->obj = wc; | ||
394 | ip->size = PAGE_ALIGN(sizeof(*wc) + | ||
395 | sizeof(struct ib_wc) * cqe); | ||
396 | spin_lock_irq(&dev->pending_lock); | ||
397 | ip->next = dev->pending_mmaps; | ||
398 | dev->pending_mmaps = ip; | ||
399 | spin_unlock_irq(&dev->pending_lock); | ||
400 | } | ||
401 | |||
307 | ret = 0; | 402 | ret = 0; |
308 | 403 | ||
309 | bail: | 404 | bail: |
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index f415beda0d32..df69f0d80b8b 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h | |||
@@ -60,7 +60,6 @@ | |||
60 | #define __IPATH_USER_SEND 0x1000 /* use user mode send */ | 60 | #define __IPATH_USER_SEND 0x1000 /* use user mode send */ |
61 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ | 61 | #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ |
62 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ | 62 | #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ |
63 | #define __IPATH_SMADBG 0x8000 /* sma packet debug */ | ||
64 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ | 63 | #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ |
65 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ | 64 | #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ |
66 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ | 65 | #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ |
@@ -84,7 +83,6 @@ | |||
84 | /* print mmap/nopage stuff, not using VDBG any more */ | 83 | /* print mmap/nopage stuff, not using VDBG any more */ |
85 | #define __IPATH_MMDBG 0x0 | 84 | #define __IPATH_MMDBG 0x0 |
86 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ | 85 | #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ |
87 | #define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */ | ||
88 | #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ | 86 | #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ |
89 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ | 87 | #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ |
90 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ | 88 | #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 147dd89e21c9..28b6b46c106a 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -41,11 +41,11 @@ | |||
41 | * through the /sys/bus/pci resource mmap interface. | 41 | * through the /sys/bus/pci resource mmap interface. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/io.h> | ||
44 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
45 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
46 | 47 | ||
47 | #include "ipath_kernel.h" | 48 | #include "ipath_kernel.h" |
48 | #include "ipath_layer.h" | ||
49 | #include "ipath_common.h" | 49 | #include "ipath_common.h" |
50 | 50 | ||
51 | int ipath_diag_inuse; | 51 | int ipath_diag_inuse; |
@@ -274,6 +274,158 @@ bail: | |||
274 | return ret; | 274 | return ret; |
275 | } | 275 | } |
276 | 276 | ||
277 | static ssize_t ipath_diagpkt_write(struct file *fp, | ||
278 | const char __user *data, | ||
279 | size_t count, loff_t *off); | ||
280 | |||
281 | static struct file_operations diagpkt_file_ops = { | ||
282 | .owner = THIS_MODULE, | ||
283 | .write = ipath_diagpkt_write, | ||
284 | }; | ||
285 | |||
286 | static struct cdev *diagpkt_cdev; | ||
287 | static struct class_device *diagpkt_class_dev; | ||
288 | |||
289 | int __init ipath_diagpkt_add(void) | ||
290 | { | ||
291 | return ipath_cdev_init(IPATH_DIAGPKT_MINOR, | ||
292 | "ipath_diagpkt", &diagpkt_file_ops, | ||
293 | &diagpkt_cdev, &diagpkt_class_dev); | ||
294 | } | ||
295 | |||
296 | void __exit ipath_diagpkt_remove(void) | ||
297 | { | ||
298 | ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev); | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * ipath_diagpkt_write - write an IB packet | ||
303 | * @fp: the diag data device file pointer | ||
304 | * @data: ipath_diag_pkt structure saying where to get the packet | ||
305 | * @count: size of data to write | ||
306 | * @off: unused by this code | ||
307 | */ | ||
308 | static ssize_t ipath_diagpkt_write(struct file *fp, | ||
309 | const char __user *data, | ||
310 | size_t count, loff_t *off) | ||
311 | { | ||
312 | u32 __iomem *piobuf; | ||
313 | u32 plen, clen, pbufn; | ||
314 | struct ipath_diag_pkt dp; | ||
315 | u32 *tmpbuf = NULL; | ||
316 | struct ipath_devdata *dd; | ||
317 | ssize_t ret = 0; | ||
318 | u64 val; | ||
319 | |||
320 | if (count < sizeof(dp)) { | ||
321 | ret = -EINVAL; | ||
322 | goto bail; | ||
323 | } | ||
324 | |||
325 | if (copy_from_user(&dp, data, sizeof(dp))) { | ||
326 | ret = -EFAULT; | ||
327 | goto bail; | ||
328 | } | ||
329 | |||
330 | /* send count must be an exact number of dwords */ | ||
331 | if (dp.len & 3) { | ||
332 | ret = -EINVAL; | ||
333 | goto bail; | ||
334 | } | ||
335 | |||
336 | clen = dp.len >> 2; | ||
337 | |||
338 | dd = ipath_lookup(dp.unit); | ||
339 | if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || | ||
340 | !dd->ipath_kregbase) { | ||
341 | ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", | ||
342 | dp.unit); | ||
343 | ret = -ENODEV; | ||
344 | goto bail; | ||
345 | } | ||
346 | |||
347 | if (ipath_diag_inuse && !diag_set_link && | ||
348 | !(dd->ipath_flags & IPATH_LINKACTIVE)) { | ||
349 | diag_set_link = 1; | ||
350 | ipath_cdbg(VERBOSE, "Trying to set to set link active for " | ||
351 | "diag pkt\n"); | ||
352 | ipath_set_linkstate(dd, IPATH_IB_LINKARM); | ||
353 | ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); | ||
354 | } | ||
355 | |||
356 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
357 | /* no hardware, freeze, etc. */ | ||
358 | ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); | ||
359 | ret = -ENODEV; | ||
360 | goto bail; | ||
361 | } | ||
362 | val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; | ||
363 | if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM && | ||
364 | val != IPATH_IBSTATE_ACTIVE) { | ||
365 | ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", | ||
366 | dd->ipath_unit, (unsigned long long) val); | ||
367 | ret = -EINVAL; | ||
368 | goto bail; | ||
369 | } | ||
370 | |||
371 | /* need total length before first word written */ | ||
372 | /* +1 word is for the qword padding */ | ||
373 | plen = sizeof(u32) + dp.len; | ||
374 | |||
375 | if ((plen + 4) > dd->ipath_ibmaxlen) { | ||
376 | ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", | ||
377 | plen - 4, dd->ipath_ibmaxlen); | ||
378 | ret = -EINVAL; | ||
379 | goto bail; /* before writing pbc */ | ||
380 | } | ||
381 | tmpbuf = vmalloc(plen); | ||
382 | if (!tmpbuf) { | ||
383 | dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " | ||
384 | "failing\n"); | ||
385 | ret = -ENOMEM; | ||
386 | goto bail; | ||
387 | } | ||
388 | |||
389 | if (copy_from_user(tmpbuf, | ||
390 | (const void __user *) (unsigned long) dp.data, | ||
391 | dp.len)) { | ||
392 | ret = -EFAULT; | ||
393 | goto bail; | ||
394 | } | ||
395 | |||
396 | piobuf = ipath_getpiobuf(dd, &pbufn); | ||
397 | if (!piobuf) { | ||
398 | ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", | ||
399 | dd->ipath_unit); | ||
400 | ret = -EBUSY; | ||
401 | goto bail; | ||
402 | } | ||
403 | |||
404 | plen >>= 2; /* in dwords */ | ||
405 | |||
406 | if (ipath_debug & __IPATH_PKTDBG) | ||
407 | ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", | ||
408 | dd->ipath_unit, plen - 1, pbufn); | ||
409 | |||
410 | /* we have to flush after the PBC for correctness on some cpus | ||
411 | * or WC buffer can be written out of order */ | ||
412 | writeq(plen, piobuf); | ||
413 | ipath_flush_wc(); | ||
414 | /* copy all by the trigger word, then flush, so it's written | ||
415 | * to chip before trigger word, then write trigger word, then | ||
416 | * flush again, so packet is sent. */ | ||
417 | __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); | ||
418 | ipath_flush_wc(); | ||
419 | __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); | ||
420 | ipath_flush_wc(); | ||
421 | |||
422 | ret = sizeof(dp); | ||
423 | |||
424 | bail: | ||
425 | vfree(tmpbuf); | ||
426 | return ret; | ||
427 | } | ||
428 | |||
277 | static int ipath_diag_release(struct inode *in, struct file *fp) | 429 | static int ipath_diag_release(struct inode *in, struct file *fp) |
278 | { | 430 | { |
279 | mutex_lock(&ipath_mutex); | 431 | mutex_lock(&ipath_mutex); |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index f98518d912b5..2108466c7e33 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | 40 | ||
41 | #include "ipath_kernel.h" | 41 | #include "ipath_kernel.h" |
42 | #include "ipath_layer.h" | 42 | #include "ipath_verbs.h" |
43 | #include "ipath_common.h" | 43 | #include "ipath_common.h" |
44 | 44 | ||
45 | static void ipath_update_pio_bufs(struct ipath_devdata *); | 45 | static void ipath_update_pio_bufs(struct ipath_devdata *); |
@@ -51,8 +51,6 @@ const char *ipath_get_unit_name(int unit) | |||
51 | return iname; | 51 | return iname; |
52 | } | 52 | } |
53 | 53 | ||
54 | EXPORT_SYMBOL_GPL(ipath_get_unit_name); | ||
55 | |||
56 | #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: " | 54 | #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: " |
57 | #define PFX IPATH_DRV_NAME ": " | 55 | #define PFX IPATH_DRV_NAME ": " |
58 | 56 | ||
@@ -60,13 +58,13 @@ EXPORT_SYMBOL_GPL(ipath_get_unit_name); | |||
60 | * The size has to be longer than this string, so we can append | 58 | * The size has to be longer than this string, so we can append |
61 | * board/chip information to it in the init code. | 59 | * board/chip information to it in the init code. |
62 | */ | 60 | */ |
63 | const char ipath_core_version[] = IPATH_IDSTR "\n"; | 61 | const char ib_ipath_version[] = IPATH_IDSTR "\n"; |
64 | 62 | ||
65 | static struct idr unit_table; | 63 | static struct idr unit_table; |
66 | DEFINE_SPINLOCK(ipath_devs_lock); | 64 | DEFINE_SPINLOCK(ipath_devs_lock); |
67 | LIST_HEAD(ipath_dev_list); | 65 | LIST_HEAD(ipath_dev_list); |
68 | 66 | ||
69 | wait_queue_head_t ipath_sma_state_wait; | 67 | wait_queue_head_t ipath_state_wait; |
70 | 68 | ||
71 | unsigned ipath_debug = __IPATH_INFO; | 69 | unsigned ipath_debug = __IPATH_INFO; |
72 | 70 | ||
@@ -403,10 +401,10 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
403 | /* setup the chip-specific functions, as early as possible. */ | 401 | /* setup the chip-specific functions, as early as possible. */ |
404 | switch (ent->device) { | 402 | switch (ent->device) { |
405 | case PCI_DEVICE_ID_INFINIPATH_HT: | 403 | case PCI_DEVICE_ID_INFINIPATH_HT: |
406 | ipath_init_ht400_funcs(dd); | 404 | ipath_init_iba6110_funcs(dd); |
407 | break; | 405 | break; |
408 | case PCI_DEVICE_ID_INFINIPATH_PE800: | 406 | case PCI_DEVICE_ID_INFINIPATH_PE800: |
409 | ipath_init_pe800_funcs(dd); | 407 | ipath_init_iba6120_funcs(dd); |
410 | break; | 408 | break; |
411 | default: | 409 | default: |
412 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " | 410 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " |
@@ -440,7 +438,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
440 | } | 438 | } |
441 | dd->ipath_pcirev = rev; | 439 | dd->ipath_pcirev = rev; |
442 | 440 | ||
441 | #if defined(__powerpc__) | ||
442 | /* There isn't a generic way to specify writethrough mappings */ | ||
443 | dd->ipath_kregbase = __ioremap(addr, len, | ||
444 | (_PAGE_NO_CACHE|_PAGE_WRITETHRU)); | ||
445 | #else | ||
443 | dd->ipath_kregbase = ioremap_nocache(addr, len); | 446 | dd->ipath_kregbase = ioremap_nocache(addr, len); |
447 | #endif | ||
444 | 448 | ||
445 | if (!dd->ipath_kregbase) { | 449 | if (!dd->ipath_kregbase) { |
446 | ipath_dbg("Unable to map io addr %llx to kvirt, failing\n", | 450 | ipath_dbg("Unable to map io addr %llx to kvirt, failing\n", |
@@ -503,7 +507,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
503 | ipathfs_add_device(dd); | 507 | ipathfs_add_device(dd); |
504 | ipath_user_add(dd); | 508 | ipath_user_add(dd); |
505 | ipath_diag_add(dd); | 509 | ipath_diag_add(dd); |
506 | ipath_layer_add(dd); | 510 | ipath_register_ib_device(dd); |
507 | 511 | ||
508 | goto bail; | 512 | goto bail; |
509 | 513 | ||
@@ -532,7 +536,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) | |||
532 | return; | 536 | return; |
533 | 537 | ||
534 | dd = pci_get_drvdata(pdev); | 538 | dd = pci_get_drvdata(pdev); |
535 | ipath_layer_remove(dd); | 539 | ipath_unregister_ib_device(dd->verbs_dev); |
536 | ipath_diag_remove(dd); | 540 | ipath_diag_remove(dd); |
537 | ipath_user_remove(dd); | 541 | ipath_user_remove(dd); |
538 | ipathfs_remove_device(dd); | 542 | ipathfs_remove_device(dd); |
@@ -607,21 +611,23 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, | |||
607 | * | 611 | * |
608 | * wait up to msecs milliseconds for IB link state change to occur for | 612 | * wait up to msecs milliseconds for IB link state change to occur for |
609 | * now, take the easy polling route. Currently used only by | 613 | * now, take the easy polling route. Currently used only by |
610 | * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise | 614 | * ipath_set_linkstate. Returns 0 if state reached, otherwise |
611 | * -ETIMEDOUT state can have multiple states set, for any of several | 615 | * -ETIMEDOUT state can have multiple states set, for any of several |
612 | * transitions. | 616 | * transitions. |
613 | */ | 617 | */ |
614 | int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) | 618 | static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, |
619 | int msecs) | ||
615 | { | 620 | { |
616 | dd->ipath_sma_state_wanted = state; | 621 | dd->ipath_state_wanted = state; |
617 | wait_event_interruptible_timeout(ipath_sma_state_wait, | 622 | wait_event_interruptible_timeout(ipath_state_wait, |
618 | (dd->ipath_flags & state), | 623 | (dd->ipath_flags & state), |
619 | msecs_to_jiffies(msecs)); | 624 | msecs_to_jiffies(msecs)); |
620 | dd->ipath_sma_state_wanted = 0; | 625 | dd->ipath_state_wanted = 0; |
621 | 626 | ||
622 | if (!(dd->ipath_flags & state)) { | 627 | if (!(dd->ipath_flags & state)) { |
623 | u64 val; | 628 | u64 val; |
624 | ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n", | 629 | ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u" |
630 | " ms\n", | ||
625 | /* test INIT ahead of DOWN, both can be set */ | 631 | /* test INIT ahead of DOWN, both can be set */ |
626 | (state & IPATH_LINKINIT) ? "INIT" : | 632 | (state & IPATH_LINKINIT) ? "INIT" : |
627 | ((state & IPATH_LINKDOWN) ? "DOWN" : | 633 | ((state & IPATH_LINKDOWN) ? "DOWN" : |
@@ -807,58 +813,6 @@ bail: | |||
807 | return skb; | 813 | return skb; |
808 | } | 814 | } |
809 | 815 | ||
810 | /** | ||
811 | * ipath_rcv_layer - receive a packet for the layered (ethernet) driver | ||
812 | * @dd: the infinipath device | ||
813 | * @etail: the sk_buff number | ||
814 | * @tlen: the total packet length | ||
815 | * @hdr: the ethernet header | ||
816 | * | ||
817 | * Separate routine for better overall optimization | ||
818 | */ | ||
819 | static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, | ||
820 | u32 tlen, struct ether_header *hdr) | ||
821 | { | ||
822 | u32 elen; | ||
823 | u8 pad, *bthbytes; | ||
824 | struct sk_buff *skb, *nskb; | ||
825 | |||
826 | if (dd->ipath_port0_skbs && | ||
827 | hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) { | ||
828 | /* | ||
829 | * Allocate a new sk_buff to replace the one we give | ||
830 | * to the network stack. | ||
831 | */ | ||
832 | nskb = ipath_alloc_skb(dd, GFP_ATOMIC); | ||
833 | if (!nskb) { | ||
834 | /* count OK packets that we drop */ | ||
835 | ipath_stats.sps_krdrops++; | ||
836 | return; | ||
837 | } | ||
838 | |||
839 | bthbytes = (u8 *) hdr->bth; | ||
840 | pad = (bthbytes[1] >> 4) & 3; | ||
841 | /* +CRC32 */ | ||
842 | elen = tlen - (sizeof(*hdr) + pad + sizeof(u32)); | ||
843 | |||
844 | skb = dd->ipath_port0_skbs[etail]; | ||
845 | dd->ipath_port0_skbs[etail] = nskb; | ||
846 | skb_put(skb, elen); | ||
847 | |||
848 | dd->ipath_f_put_tid(dd, etail + (u64 __iomem *) | ||
849 | ((char __iomem *) dd->ipath_kregbase | ||
850 | + dd->ipath_rcvegrbase), 0, | ||
851 | virt_to_phys(nskb->data)); | ||
852 | |||
853 | __ipath_layer_rcv(dd, hdr, skb); | ||
854 | |||
855 | /* another ether packet received */ | ||
856 | ipath_stats.sps_ether_rpkts++; | ||
857 | } | ||
858 | else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP) | ||
859 | __ipath_layer_rcv_lid(dd, hdr); | ||
860 | } | ||
861 | |||
862 | static void ipath_rcv_hdrerr(struct ipath_devdata *dd, | 816 | static void ipath_rcv_hdrerr(struct ipath_devdata *dd, |
863 | u32 eflags, | 817 | u32 eflags, |
864 | u32 l, | 818 | u32 l, |
@@ -972,26 +926,17 @@ reloop: | |||
972 | if (unlikely(eflags)) | 926 | if (unlikely(eflags)) |
973 | ipath_rcv_hdrerr(dd, eflags, l, etail, rc); | 927 | ipath_rcv_hdrerr(dd, eflags, l, etail, rc); |
974 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { | 928 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { |
975 | int ret = __ipath_verbs_rcv(dd, rc + 1, | 929 | ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen); |
976 | ebuf, tlen); | 930 | if (dd->ipath_lli_counter) |
977 | if (ret == -ENODEV) | 931 | dd->ipath_lli_counter--; |
978 | ipath_cdbg(VERBOSE, | 932 | ipath_cdbg(PKT, "typ %x, opcode %x (eager, " |
979 | "received IB packet, " | 933 | "qp=%x), len %x; ignored\n", |
980 | "not SMA (QP=%x)\n", qp); | 934 | etype, bthbytes[0], qp, tlen); |
981 | if (dd->ipath_lli_counter) | ||
982 | dd->ipath_lli_counter--; | ||
983 | |||
984 | } else if (etype == RCVHQ_RCV_TYPE_EAGER) { | ||
985 | if (qp == IPATH_KD_QP && | ||
986 | bthbytes[0] == ipath_layer_rcv_opcode && | ||
987 | ebuf) | ||
988 | ipath_rcv_layer(dd, etail, tlen, | ||
989 | (struct ether_header *)hdr); | ||
990 | else | ||
991 | ipath_cdbg(PKT, "typ %x, opcode %x (eager, " | ||
992 | "qp=%x), len %x; ignored\n", | ||
993 | etype, bthbytes[0], qp, tlen); | ||
994 | } | 935 | } |
936 | else if (etype == RCVHQ_RCV_TYPE_EAGER) | ||
937 | ipath_cdbg(PKT, "typ %x, opcode %x (eager, " | ||
938 | "qp=%x), len %x; ignored\n", | ||
939 | etype, bthbytes[0], qp, tlen); | ||
995 | else if (etype == RCVHQ_RCV_TYPE_EXPECTED) | 940 | else if (etype == RCVHQ_RCV_TYPE_EXPECTED) |
996 | ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", | 941 | ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", |
997 | be32_to_cpu(hdr->bth[0]) & 0xff); | 942 | be32_to_cpu(hdr->bth[0]) & 0xff); |
@@ -1024,7 +969,8 @@ reloop: | |||
1024 | */ | 969 | */ |
1025 | if (l == hdrqtail || (i && !(i&0xf))) { | 970 | if (l == hdrqtail || (i && !(i&0xf))) { |
1026 | u64 lval; | 971 | u64 lval; |
1027 | if (l == hdrqtail) /* PE-800 interrupt only on last */ | 972 | if (l == hdrqtail) |
973 | /* request IBA6120 interrupt only on last */ | ||
1028 | lval = dd->ipath_rhdrhead_intr_off | l; | 974 | lval = dd->ipath_rhdrhead_intr_off | l; |
1029 | else | 975 | else |
1030 | lval = l; | 976 | lval = l; |
@@ -1038,7 +984,7 @@ reloop: | |||
1038 | } | 984 | } |
1039 | 985 | ||
1040 | if (!dd->ipath_rhdrhead_intr_off && !reloop) { | 986 | if (!dd->ipath_rhdrhead_intr_off && !reloop) { |
1041 | /* HT-400 workaround; we can have a race clearing chip | 987 | /* IBA6110 workaround; we can have a race clearing chip |
1042 | * interrupt with another interrupt about to be delivered, | 988 | * interrupt with another interrupt about to be delivered, |
1043 | * and can clear it before it is delivered on the GPIO | 989 | * and can clear it before it is delivered on the GPIO |
1044 | * workaround. By doing the extra check here for the | 990 | * workaround. By doing the extra check here for the |
@@ -1211,7 +1157,7 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize) | |||
1211 | * | 1157 | * |
1212 | * do appropriate marking as busy, etc. | 1158 | * do appropriate marking as busy, etc. |
1213 | * returns buffer number if one found (>=0), negative number is error. | 1159 | * returns buffer number if one found (>=0), negative number is error. |
1214 | * Used by ipath_sma_send_pkt and ipath_layer_send | 1160 | * Used by ipath_layer_send |
1215 | */ | 1161 | */ |
1216 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) | 1162 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) |
1217 | { | 1163 | { |
@@ -1317,13 +1263,6 @@ rescan: | |||
1317 | goto bail; | 1263 | goto bail; |
1318 | } | 1264 | } |
1319 | 1265 | ||
1320 | if (updated) | ||
1321 | /* | ||
1322 | * ran out of bufs, now some (at least this one we just | ||
1323 | * got) are now available, so tell the layered driver. | ||
1324 | */ | ||
1325 | __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); | ||
1326 | |||
1327 | /* | 1266 | /* |
1328 | * set next starting place. Since it's just an optimization, | 1267 | * set next starting place. Since it's just an optimization, |
1329 | * it doesn't matter who wins on this, so no locking | 1268 | * it doesn't matter who wins on this, so no locking |
@@ -1500,7 +1439,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd) | |||
1500 | return ret; | 1439 | return ret; |
1501 | } | 1440 | } |
1502 | 1441 | ||
1503 | void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | 1442 | static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) |
1504 | { | 1443 | { |
1505 | static const char *what[4] = { | 1444 | static const char *what[4] = { |
1506 | [0] = "DOWN", | 1445 | [0] = "DOWN", |
@@ -1511,7 +1450,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | |||
1511 | int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & | 1450 | int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & |
1512 | INFINIPATH_IBCC_LINKCMD_MASK; | 1451 | INFINIPATH_IBCC_LINKCMD_MASK; |
1513 | 1452 | ||
1514 | ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate " | 1453 | ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate " |
1515 | "is %s\n", dd->ipath_unit, | 1454 | "is %s\n", dd->ipath_unit, |
1516 | what[linkcmd], | 1455 | what[linkcmd], |
1517 | ipath_ibcstatus_str[ | 1456 | ipath_ibcstatus_str[ |
@@ -1520,7 +1459,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | |||
1520 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & | 1459 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & |
1521 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); | 1460 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); |
1522 | /* flush all queued sends when going to DOWN or INIT, to be sure that | 1461 | /* flush all queued sends when going to DOWN or INIT, to be sure that |
1523 | * they don't block SMA and other MAD packets */ | 1462 | * they don't block MAD packets */ |
1524 | if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { | 1463 | if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { |
1525 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | 1464 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, |
1526 | INFINIPATH_S_ABORT); | 1465 | INFINIPATH_S_ABORT); |
@@ -1534,6 +1473,180 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | |||
1534 | dd->ipath_ibcctrl | which); | 1473 | dd->ipath_ibcctrl | which); |
1535 | } | 1474 | } |
1536 | 1475 | ||
1476 | int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) | ||
1477 | { | ||
1478 | u32 lstate; | ||
1479 | int ret; | ||
1480 | |||
1481 | switch (newstate) { | ||
1482 | case IPATH_IB_LINKDOWN: | ||
1483 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << | ||
1484 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
1485 | /* don't wait */ | ||
1486 | ret = 0; | ||
1487 | goto bail; | ||
1488 | |||
1489 | case IPATH_IB_LINKDOWN_SLEEP: | ||
1490 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << | ||
1491 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
1492 | /* don't wait */ | ||
1493 | ret = 0; | ||
1494 | goto bail; | ||
1495 | |||
1496 | case IPATH_IB_LINKDOWN_DISABLE: | ||
1497 | ipath_set_ib_lstate(dd, | ||
1498 | INFINIPATH_IBCC_LINKINITCMD_DISABLE << | ||
1499 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
1500 | /* don't wait */ | ||
1501 | ret = 0; | ||
1502 | goto bail; | ||
1503 | |||
1504 | case IPATH_IB_LINKINIT: | ||
1505 | if (dd->ipath_flags & IPATH_LINKINIT) { | ||
1506 | ret = 0; | ||
1507 | goto bail; | ||
1508 | } | ||
1509 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << | ||
1510 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
1511 | lstate = IPATH_LINKINIT; | ||
1512 | break; | ||
1513 | |||
1514 | case IPATH_IB_LINKARM: | ||
1515 | if (dd->ipath_flags & IPATH_LINKARMED) { | ||
1516 | ret = 0; | ||
1517 | goto bail; | ||
1518 | } | ||
1519 | if (!(dd->ipath_flags & | ||
1520 | (IPATH_LINKINIT | IPATH_LINKACTIVE))) { | ||
1521 | ret = -EINVAL; | ||
1522 | goto bail; | ||
1523 | } | ||
1524 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << | ||
1525 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
1526 | /* | ||
1527 | * Since the port can transition to ACTIVE by receiving | ||
1528 | * a non VL 15 packet, wait for either state. | ||
1529 | */ | ||
1530 | lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; | ||
1531 | break; | ||
1532 | |||
1533 | case IPATH_IB_LINKACTIVE: | ||
1534 | if (dd->ipath_flags & IPATH_LINKACTIVE) { | ||
1535 | ret = 0; | ||
1536 | goto bail; | ||
1537 | } | ||
1538 | if (!(dd->ipath_flags & IPATH_LINKARMED)) { | ||
1539 | ret = -EINVAL; | ||
1540 | goto bail; | ||
1541 | } | ||
1542 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << | ||
1543 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
1544 | lstate = IPATH_LINKACTIVE; | ||
1545 | break; | ||
1546 | |||
1547 | default: | ||
1548 | ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); | ||
1549 | ret = -EINVAL; | ||
1550 | goto bail; | ||
1551 | } | ||
1552 | ret = ipath_wait_linkstate(dd, lstate, 2000); | ||
1553 | |||
1554 | bail: | ||
1555 | return ret; | ||
1556 | } | ||
1557 | |||
1558 | /** | ||
1559 | * ipath_set_mtu - set the MTU | ||
1560 | * @dd: the infinipath device | ||
1561 | * @arg: the new MTU | ||
1562 | * | ||
1563 | * we can handle "any" incoming size, the issue here is whether we | ||
1564 | * need to restrict our outgoing size. For now, we don't do any | ||
1565 | * sanity checking on this, and we don't deal with what happens to | ||
1566 | * programs that are already running when the size changes. | ||
1567 | * NOTE: changing the MTU will usually cause the IBC to go back to | ||
1568 | * link initialize (IPATH_IBSTATE_INIT) state... | ||
1569 | */ | ||
1570 | int ipath_set_mtu(struct ipath_devdata *dd, u16 arg) | ||
1571 | { | ||
1572 | u32 piosize; | ||
1573 | int changed = 0; | ||
1574 | int ret; | ||
1575 | |||
1576 | /* | ||
1577 | * mtu is IB data payload max. It's the largest power of 2 less | ||
1578 | * than piosize (or even larger, since it only really controls the | ||
1579 | * largest we can receive; we can send the max of the mtu and | ||
1580 | * piosize). We check that it's one of the valid IB sizes. | ||
1581 | */ | ||
1582 | if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && | ||
1583 | arg != 4096) { | ||
1584 | ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); | ||
1585 | ret = -EINVAL; | ||
1586 | goto bail; | ||
1587 | } | ||
1588 | if (dd->ipath_ibmtu == arg) { | ||
1589 | ret = 0; /* same as current */ | ||
1590 | goto bail; | ||
1591 | } | ||
1592 | |||
1593 | piosize = dd->ipath_ibmaxlen; | ||
1594 | dd->ipath_ibmtu = arg; | ||
1595 | |||
1596 | if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { | ||
1597 | /* Only if it's not the initial value (or reset to it) */ | ||
1598 | if (piosize != dd->ipath_init_ibmaxlen) { | ||
1599 | dd->ipath_ibmaxlen = piosize; | ||
1600 | changed = 1; | ||
1601 | } | ||
1602 | } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { | ||
1603 | piosize = arg + IPATH_PIO_MAXIBHDR; | ||
1604 | ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " | ||
1605 | "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, | ||
1606 | arg); | ||
1607 | dd->ipath_ibmaxlen = piosize; | ||
1608 | changed = 1; | ||
1609 | } | ||
1610 | |||
1611 | if (changed) { | ||
1612 | /* | ||
1613 | * set the IBC maxpktlength to the size of our pio | ||
1614 | * buffers in words | ||
1615 | */ | ||
1616 | u64 ibc = dd->ipath_ibcctrl; | ||
1617 | ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << | ||
1618 | INFINIPATH_IBCC_MAXPKTLEN_SHIFT); | ||
1619 | |||
1620 | piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ | ||
1621 | dd->ipath_ibmaxlen = piosize; | ||
1622 | piosize /= sizeof(u32); /* in words */ | ||
1623 | /* | ||
1624 | * for ICRC, which we only send in diag test pkt mode, and | ||
1625 | * we don't need to worry about that for mtu | ||
1626 | */ | ||
1627 | piosize += 1; | ||
1628 | |||
1629 | ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; | ||
1630 | dd->ipath_ibcctrl = ibc; | ||
1631 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1632 | dd->ipath_ibcctrl); | ||
1633 | dd->ipath_f_tidtemplate(dd); | ||
1634 | } | ||
1635 | |||
1636 | ret = 0; | ||
1637 | |||
1638 | bail: | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) | ||
1643 | { | ||
1644 | dd->ipath_lid = arg; | ||
1645 | dd->ipath_lmc = lmc; | ||
1646 | |||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1537 | /** | 1650 | /** |
1538 | * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register | 1651 | * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register |
1539 | * @dd: the infinipath device | 1652 | * @dd: the infinipath device |
@@ -1637,13 +1750,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd) | |||
1637 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << | 1750 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << |
1638 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | 1751 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); |
1639 | 1752 | ||
1640 | /* | ||
1641 | * we are shutting down, so tell the layered driver. We don't do | ||
1642 | * this on just a link state change, much like ethernet, a cable | ||
1643 | * unplug, etc. doesn't change driver state | ||
1644 | */ | ||
1645 | ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN); | ||
1646 | |||
1647 | /* disable IBC */ | 1753 | /* disable IBC */ |
1648 | dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; | 1754 | dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; |
1649 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | 1755 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, |
@@ -1743,7 +1849,7 @@ static int __init infinipath_init(void) | |||
1743 | { | 1849 | { |
1744 | int ret; | 1850 | int ret; |
1745 | 1851 | ||
1746 | ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version); | 1852 | ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version); |
1747 | 1853 | ||
1748 | /* | 1854 | /* |
1749 | * These must be called before the driver is registered with | 1855 | * These must be called before the driver is registered with |
@@ -1776,8 +1882,18 @@ static int __init infinipath_init(void) | |||
1776 | goto bail_group; | 1882 | goto bail_group; |
1777 | } | 1883 | } |
1778 | 1884 | ||
1885 | ret = ipath_diagpkt_add(); | ||
1886 | if (ret < 0) { | ||
1887 | printk(KERN_ERR IPATH_DRV_NAME ": Unable to create " | ||
1888 | "diag data device: error %d\n", -ret); | ||
1889 | goto bail_ipathfs; | ||
1890 | } | ||
1891 | |||
1779 | goto bail; | 1892 | goto bail; |
1780 | 1893 | ||
1894 | bail_ipathfs: | ||
1895 | ipath_exit_ipathfs(); | ||
1896 | |||
1781 | bail_group: | 1897 | bail_group: |
1782 | ipath_driver_remove_group(&ipath_driver.driver); | 1898 | ipath_driver_remove_group(&ipath_driver.driver); |
1783 | 1899 | ||
@@ -1888,6 +2004,8 @@ static void __exit infinipath_cleanup(void) | |||
1888 | struct ipath_devdata *dd, *tmp; | 2004 | struct ipath_devdata *dd, *tmp; |
1889 | unsigned long flags; | 2005 | unsigned long flags; |
1890 | 2006 | ||
2007 | ipath_diagpkt_remove(); | ||
2008 | |||
1891 | ipath_exit_ipathfs(); | 2009 | ipath_exit_ipathfs(); |
1892 | 2010 | ||
1893 | ipath_driver_remove_group(&ipath_driver.driver); | 2011 | ipath_driver_remove_group(&ipath_driver.driver); |
@@ -1998,5 +2116,22 @@ bail: | |||
1998 | return ret; | 2116 | return ret; |
1999 | } | 2117 | } |
2000 | 2118 | ||
2119 | int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv) | ||
2120 | { | ||
2121 | u64 val; | ||
2122 | if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) { | ||
2123 | return -1; | ||
2124 | } | ||
2125 | if ( dd->ipath_rx_pol_inv != new_pol_inv ) { | ||
2126 | dd->ipath_rx_pol_inv = new_pol_inv; | ||
2127 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
2128 | val &= ~(INFINIPATH_XGXS_RX_POL_MASK << | ||
2129 | INFINIPATH_XGXS_RX_POL_SHIFT); | ||
2130 | val |= ((u64)dd->ipath_rx_pol_inv) << | ||
2131 | INFINIPATH_XGXS_RX_POL_SHIFT; | ||
2132 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
2133 | } | ||
2134 | return 0; | ||
2135 | } | ||
2001 | module_init(infinipath_init); | 2136 | module_init(infinipath_init); |
2002 | module_exit(infinipath_cleanup); | 2137 | module_exit(infinipath_cleanup); |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index bbaa70e57db1..29930e22318e 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
40 | 40 | ||
41 | #include "ipath_kernel.h" | 41 | #include "ipath_kernel.h" |
42 | #include "ipath_layer.h" | ||
43 | #include "ipath_common.h" | 42 | #include "ipath_common.h" |
44 | 43 | ||
45 | static int ipath_open(struct inode *, struct file *); | 44 | static int ipath_open(struct inode *, struct file *); |
@@ -985,15 +984,17 @@ static int mmap_piobufs(struct vm_area_struct *vma, | |||
985 | * write combining behavior we want on the PIO buffers! | 984 | * write combining behavior we want on the PIO buffers! |
986 | */ | 985 | */ |
987 | 986 | ||
988 | if (vma->vm_flags & VM_READ) { | 987 | #if defined(__powerpc__) |
989 | dev_info(&dd->pcidev->dev, | 988 | /* There isn't a generic way to specify writethrough mappings */ |
990 | "Can't map piobufs as readable (flags=%lx)\n", | 989 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; |
991 | vma->vm_flags); | 990 | pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; |
992 | ret = -EPERM; | 991 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; |
993 | goto bail; | 992 | #endif |
994 | } | ||
995 | 993 | ||
996 | /* don't allow them to later change to readable with mprotect */ | 994 | /* |
995 | * don't allow them to later change to readable with mprotect (for when | ||
996 | * not initially mapped readable, as is normally the case) | ||
997 | */ | ||
997 | vma->vm_flags &= ~VM_MAYREAD; | 998 | vma->vm_flags &= ~VM_MAYREAD; |
998 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | 999 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; |
999 | 1000 | ||
@@ -1109,7 +1110,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |||
1109 | ret = mmap_rcvegrbufs(vma, pd); | 1110 | ret = mmap_rcvegrbufs(vma, pd); |
1110 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) { | 1111 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) { |
1111 | /* | 1112 | /* |
1112 | * The rcvhdrq itself; readonly except on HT-400 (so have | 1113 | * The rcvhdrq itself; readonly except on HT (so have |
1113 | * to allow writable mapping), multiple pages, contiguous | 1114 | * to allow writable mapping), multiple pages, contiguous |
1114 | * from an i/o perspective. | 1115 | * from an i/o perspective. |
1115 | */ | 1116 | */ |
@@ -1149,6 +1150,7 @@ static unsigned int ipath_poll(struct file *fp, | |||
1149 | struct ipath_portdata *pd; | 1150 | struct ipath_portdata *pd; |
1150 | u32 head, tail; | 1151 | u32 head, tail; |
1151 | int bit; | 1152 | int bit; |
1153 | unsigned pollflag = 0; | ||
1152 | struct ipath_devdata *dd; | 1154 | struct ipath_devdata *dd; |
1153 | 1155 | ||
1154 | pd = port_fp(fp); | 1156 | pd = port_fp(fp); |
@@ -1185,9 +1187,12 @@ static unsigned int ipath_poll(struct file *fp, | |||
1185 | clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | 1187 | clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); |
1186 | pd->port_rcvwait_to++; | 1188 | pd->port_rcvwait_to++; |
1187 | } | 1189 | } |
1190 | else | ||
1191 | pollflag = POLLIN | POLLRDNORM; | ||
1188 | } | 1192 | } |
1189 | else { | 1193 | else { |
1190 | /* it's already happened; don't do wait_event overhead */ | 1194 | /* it's already happened; don't do wait_event overhead */ |
1195 | pollflag = POLLIN | POLLRDNORM; | ||
1191 | pd->port_rcvnowait++; | 1196 | pd->port_rcvnowait++; |
1192 | } | 1197 | } |
1193 | 1198 | ||
@@ -1195,7 +1200,7 @@ static unsigned int ipath_poll(struct file *fp, | |||
1195 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | 1200 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
1196 | dd->ipath_rcvctrl); | 1201 | dd->ipath_rcvctrl); |
1197 | 1202 | ||
1198 | return 0; | 1203 | return pollflag; |
1199 | } | 1204 | } |
1200 | 1205 | ||
1201 | static int try_alloc_port(struct ipath_devdata *dd, int port, | 1206 | static int try_alloc_port(struct ipath_devdata *dd, int port, |
@@ -1297,14 +1302,14 @@ static int find_best_unit(struct file *fp) | |||
1297 | * This code is present to allow a knowledgeable person to | 1302 | * This code is present to allow a knowledgeable person to |
1298 | * specify the layout of processes to processors before opening | 1303 | * specify the layout of processes to processors before opening |
1299 | * this driver, and then we'll assign the process to the "closest" | 1304 | * this driver, and then we'll assign the process to the "closest" |
1300 | * HT-400 to that processor (we assume reasonable connectivity, | 1305 | * InfiniPath chip to that processor (we assume reasonable connectivity, |
1301 | * for now). This code assumes that if affinity has been set | 1306 | * for now). This code assumes that if affinity has been set |
1302 | * before this point, that at most one cpu is set; for now this | 1307 | * before this point, that at most one cpu is set; for now this |
1303 | * is reasonable. I check for both cpus_empty() and cpus_full(), | 1308 | * is reasonable. I check for both cpus_empty() and cpus_full(), |
1304 | * in case some kernel variant sets none of the bits when no | 1309 | * in case some kernel variant sets none of the bits when no |
1305 | * affinity is set. 2.6.11 and 12 kernels have all present | 1310 | * affinity is set. 2.6.11 and 12 kernels have all present |
1306 | * cpus set. Some day we'll have to fix it up further to handle | 1311 | * cpus set. Some day we'll have to fix it up further to handle |
1307 | * a cpu subset. This algorithm fails for two HT-400's connected | 1312 | * a cpu subset. This algorithm fails for two HT chips connected |
1308 | * in tunnel fashion. Eventually this needs real topology | 1313 | * in tunnel fashion. Eventually this needs real topology |
1309 | * information. There may be some issues with dual core numbering | 1314 | * information. There may be some issues with dual core numbering |
1310 | * as well. This needs more work prior to release. | 1315 | * as well. This needs more work prior to release. |
@@ -1815,7 +1820,7 @@ int ipath_user_add(struct ipath_devdata *dd) | |||
1815 | if (ret < 0) { | 1820 | if (ret < 0) { |
1816 | ipath_dev_err(dd, "Could not create wildcard " | 1821 | ipath_dev_err(dd, "Could not create wildcard " |
1817 | "minor: error %d\n", -ret); | 1822 | "minor: error %d\n", -ret); |
1818 | goto bail_sma; | 1823 | goto bail_user; |
1819 | } | 1824 | } |
1820 | 1825 | ||
1821 | atomic_set(&user_setup, 1); | 1826 | atomic_set(&user_setup, 1); |
@@ -1831,7 +1836,7 @@ int ipath_user_add(struct ipath_devdata *dd) | |||
1831 | 1836 | ||
1832 | goto bail; | 1837 | goto bail; |
1833 | 1838 | ||
1834 | bail_sma: | 1839 | bail_user: |
1835 | user_cleanup(); | 1840 | user_cleanup(); |
1836 | bail: | 1841 | bail: |
1837 | return ret; | 1842 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 0936d8e8d704..a5eb30a06a5c 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -191,8 +191,8 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf, | |||
191 | portinfo[4] = (dd->ipath_lid << 16); | 191 | portinfo[4] = (dd->ipath_lid << 16); |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Notimpl yet SMLID (should we store this in the driver, in case | 194 | * Notimpl yet SMLID. |
195 | * SMA dies?) CapabilityMask is 0, we don't support any of these | 195 | * CapabilityMask is 0, we don't support any of these |
196 | * DiagCode is 0; we don't store any diag info for now Notimpl yet | 196 | * DiagCode is 0; we don't store any diag info for now Notimpl yet |
197 | * M_KeyLeasePeriod (we don't support M_Key) | 197 | * M_KeyLeasePeriod (we don't support M_Key) |
198 | */ | 198 | */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 3db015da6e77..bf2455a6d562 100644 --- a/drivers/infiniband/hw/ipath/ipath_ht400.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | /* | 34 | /* |
35 | * This file contains all of the code that is specific to the InfiniPath | 35 | * This file contains all of the code that is specific to the InfiniPath |
36 | * HT-400 chip. | 36 | * HT chip. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
@@ -43,7 +43,7 @@ | |||
43 | #include "ipath_registers.h" | 43 | #include "ipath_registers.h" |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * This lists the InfiniPath HT400 registers, in the actual chip layout. | 46 | * This lists the InfiniPath registers, in the actual chip layout. |
47 | * This structure should never be directly accessed. | 47 | * This structure should never be directly accessed. |
48 | * | 48 | * |
49 | * The names are in InterCap form because they're taken straight from | 49 | * The names are in InterCap form because they're taken straight from |
@@ -461,8 +461,9 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
461 | * times. | 461 | * times. |
462 | */ | 462 | */ |
463 | if (dd->ipath_flags & IPATH_INITTED) { | 463 | if (dd->ipath_flags & IPATH_INITTED) { |
464 | ipath_dev_err(dd, "Fatal Error (freeze " | 464 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " |
465 | "mode), no longer usable\n"); | 465 | "mode), no longer usable, SN %.16s\n", |
466 | dd->ipath_serial); | ||
466 | isfatal = 1; | 467 | isfatal = 1; |
467 | } | 468 | } |
468 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; | 469 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; |
@@ -537,7 +538,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
537 | if (hwerrs & INFINIPATH_HWE_HTCMISCERR7) | 538 | if (hwerrs & INFINIPATH_HWE_HTCMISCERR7) |
538 | strlcat(msg, "[HT core Misc7]", msgl); | 539 | strlcat(msg, "[HT core Misc7]", msgl); |
539 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { | 540 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { |
540 | strlcat(msg, "[Memory BIST test failed, HT-400 unusable]", | 541 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", |
541 | msgl); | 542 | msgl); |
542 | /* ignore from now on, so disable until driver reloaded */ | 543 | /* ignore from now on, so disable until driver reloaded */ |
543 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; | 544 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; |
@@ -553,7 +554,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
553 | 554 | ||
554 | if (hwerrs & _IPATH_PLL_FAIL) { | 555 | if (hwerrs & _IPATH_PLL_FAIL) { |
555 | snprintf(bitsmsg, sizeof bitsmsg, | 556 | snprintf(bitsmsg, sizeof bitsmsg, |
556 | "[PLL failed (%llx), HT-400 unusable]", | 557 | "[PLL failed (%llx), InfiniPath hardware unusable]", |
557 | (unsigned long long) (hwerrs & _IPATH_PLL_FAIL)); | 558 | (unsigned long long) (hwerrs & _IPATH_PLL_FAIL)); |
558 | strlcat(msg, bitsmsg, msgl); | 559 | strlcat(msg, bitsmsg, msgl); |
559 | /* ignore from now on, so disable until driver reloaded */ | 560 | /* ignore from now on, so disable until driver reloaded */ |
@@ -610,18 +611,18 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
610 | break; | 611 | break; |
611 | case 5: | 612 | case 5: |
612 | /* | 613 | /* |
613 | * HT-460 original production board; two production levels, with | 614 | * original production board; two production levels, with |
614 | * different serial number ranges. See ipath_ht_early_init() for | 615 | * different serial number ranges. See ipath_ht_early_init() for |
615 | * case where we enable IPATH_GPIO_INTR for later serial # range. | 616 | * case where we enable IPATH_GPIO_INTR for later serial # range. |
616 | */ | 617 | */ |
617 | n = "InfiniPath_HT-460"; | 618 | n = "InfiniPath_QHT7040"; |
618 | break; | 619 | break; |
619 | case 6: | 620 | case 6: |
620 | n = "OEM_Board_3"; | 621 | n = "OEM_Board_3"; |
621 | break; | 622 | break; |
622 | case 7: | 623 | case 7: |
623 | /* HT-460 small form factor production board */ | 624 | /* small form factor production board */ |
624 | n = "InfiniPath_HT-465"; | 625 | n = "InfiniPath_QHT7140"; |
625 | break; | 626 | break; |
626 | case 8: | 627 | case 8: |
627 | n = "LS/X-1"; | 628 | n = "LS/X-1"; |
@@ -633,7 +634,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
633 | n = "OEM_Board_2"; | 634 | n = "OEM_Board_2"; |
634 | break; | 635 | break; |
635 | case 11: | 636 | case 11: |
636 | n = "InfiniPath_HT-470"; | 637 | n = "InfiniPath_HT-470"; /* obsoleted */ |
637 | break; | 638 | break; |
638 | case 12: | 639 | case 12: |
639 | n = "OEM_Board_4"; | 640 | n = "OEM_Board_4"; |
@@ -641,7 +642,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
641 | default: /* don't know, just print the number */ | 642 | default: /* don't know, just print the number */ |
642 | ipath_dev_err(dd, "Don't yet know about board " | 643 | ipath_dev_err(dd, "Don't yet know about board " |
643 | "with ID %u\n", boardrev); | 644 | "with ID %u\n", boardrev); |
644 | snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u", | 645 | snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u", |
645 | boardrev); | 646 | boardrev); |
646 | break; | 647 | break; |
647 | } | 648 | } |
@@ -650,11 +651,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, | |||
650 | 651 | ||
651 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { | 652 | if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { |
652 | /* | 653 | /* |
653 | * This version of the driver only supports the HT-400 | 654 | * This version of the driver only supports Rev 3.2 and 3.3 |
654 | * Rev 3.2 | ||
655 | */ | 655 | */ |
656 | ipath_dev_err(dd, | 656 | ipath_dev_err(dd, |
657 | "Unsupported HT-400 revision %u.%u!\n", | 657 | "Unsupported InfiniPath hardware revision %u.%u!\n", |
658 | dd->ipath_majrev, dd->ipath_minrev); | 658 | dd->ipath_majrev, dd->ipath_minrev); |
659 | ret = 1; | 659 | ret = 1; |
660 | goto bail; | 660 | goto bail; |
@@ -738,7 +738,7 @@ static void ipath_check_htlink(struct ipath_devdata *dd) | |||
738 | 738 | ||
739 | static int ipath_setup_ht_reset(struct ipath_devdata *dd) | 739 | static int ipath_setup_ht_reset(struct ipath_devdata *dd) |
740 | { | 740 | { |
741 | ipath_dbg("No reset possible for HT-400\n"); | 741 | ipath_dbg("No reset possible for this InfiniPath hardware\n"); |
742 | return 0; | 742 | return 0; |
743 | } | 743 | } |
744 | 744 | ||
@@ -925,7 +925,7 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev, | |||
925 | 925 | ||
926 | /* | 926 | /* |
927 | * kernels with CONFIG_PCI_MSI set the vector in the irq field of | 927 | * kernels with CONFIG_PCI_MSI set the vector in the irq field of |
928 | * struct pci_device, so we use that to program the HT-400 internal | 928 | * struct pci_device, so we use that to program the internal |
929 | * interrupt register (not config space) with that value. The BIOS | 929 | * interrupt register (not config space) with that value. The BIOS |
930 | * must still have done the basic MSI setup. | 930 | * must still have done the basic MSI setup. |
931 | */ | 931 | */ |
@@ -1013,7 +1013,7 @@ bail: | |||
1013 | * @dd: the infinipath device | 1013 | * @dd: the infinipath device |
1014 | * | 1014 | * |
1015 | * Called during driver unload. | 1015 | * Called during driver unload. |
1016 | * This is currently a nop for the HT-400, not for all chips | 1016 | * This is currently a nop for the HT chip, not for all chips |
1017 | */ | 1017 | */ |
1018 | static void ipath_setup_ht_cleanup(struct ipath_devdata *dd) | 1018 | static void ipath_setup_ht_cleanup(struct ipath_devdata *dd) |
1019 | { | 1019 | { |
@@ -1290,6 +1290,15 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd) | |||
1290 | val &= ~INFINIPATH_XGXS_RESET; | 1290 | val &= ~INFINIPATH_XGXS_RESET; |
1291 | change = 1; | 1291 | change = 1; |
1292 | } | 1292 | } |
1293 | if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & | ||
1294 | INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { | ||
1295 | /* need to compensate for Tx inversion in partner */ | ||
1296 | val &= ~(INFINIPATH_XGXS_RX_POL_MASK << | ||
1297 | INFINIPATH_XGXS_RX_POL_SHIFT); | ||
1298 | val |= dd->ipath_rx_pol_inv << | ||
1299 | INFINIPATH_XGXS_RX_POL_SHIFT; | ||
1300 | change = 1; | ||
1301 | } | ||
1293 | if (change) | 1302 | if (change) |
1294 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | 1303 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); |
1295 | 1304 | ||
@@ -1470,7 +1479,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1470 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; | 1479 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; |
1471 | 1480 | ||
1472 | /* | 1481 | /* |
1473 | * For HT-400, we allocate a somewhat overly large eager buffer, | 1482 | * For HT, we allocate a somewhat overly large eager buffer, |
1474 | * such that we can guarantee that we can receive the largest | 1483 | * such that we can guarantee that we can receive the largest |
1475 | * packet that we can send out. To truly support a 4KB MTU, | 1484 | * packet that we can send out. To truly support a 4KB MTU, |
1476 | * we need to bump this to a large value. To date, other than | 1485 | * we need to bump this to a large value. To date, other than |
@@ -1531,7 +1540,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) | |||
1531 | if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && | 1540 | if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && |
1532 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { | 1541 | dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { |
1533 | /* | 1542 | /* |
1534 | * Later production HT-460 has same changes as HT-465, so | 1543 | * Later production QHT7040 has same changes as QHT7140, so |
1535 | * can use GPIO interrupts. They have serial #'s starting | 1544 | * can use GPIO interrupts. They have serial #'s starting |
1536 | * with 128, rather than 112. | 1545 | * with 128, rather than 112. |
1537 | */ | 1546 | */ |
@@ -1560,13 +1569,13 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase) | |||
1560 | } | 1569 | } |
1561 | 1570 | ||
1562 | /** | 1571 | /** |
1563 | * ipath_init_ht400_funcs - set up the chip-specific function pointers | 1572 | * ipath_init_iba6110_funcs - set up the chip-specific function pointers |
1564 | * @dd: the infinipath device | 1573 | * @dd: the infinipath device |
1565 | * | 1574 | * |
1566 | * This is global, and is called directly at init to set up the | 1575 | * This is global, and is called directly at init to set up the |
1567 | * chip-specific function pointers for later use. | 1576 | * chip-specific function pointers for later use. |
1568 | */ | 1577 | */ |
1569 | void ipath_init_ht400_funcs(struct ipath_devdata *dd) | 1578 | void ipath_init_iba6110_funcs(struct ipath_devdata *dd) |
1570 | { | 1579 | { |
1571 | dd->ipath_f_intrsetup = ipath_ht_intconfig; | 1580 | dd->ipath_f_intrsetup = ipath_ht_intconfig; |
1572 | dd->ipath_f_bus = ipath_setup_ht_config; | 1581 | dd->ipath_f_bus = ipath_setup_ht_config; |
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index b83f66d8262c..d86516d23df6 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | /* | 33 | /* |
34 | * This file contains all of the code that is specific to the | 34 | * This file contains all of the code that is specific to the |
35 | * InfiniPath PE-800 chip. | 35 | * InfiniPath PCIe chip. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
@@ -45,9 +45,9 @@ | |||
45 | 45 | ||
46 | /* | 46 | /* |
47 | * This file contains all the chip-specific register information and | 47 | * This file contains all the chip-specific register information and |
48 | * access functions for the QLogic InfiniPath PE800, the PCI-Express chip. | 48 | * access functions for the QLogic InfiniPath PCI-Express chip. |
49 | * | 49 | * |
50 | * This lists the InfiniPath PE800 registers, in the actual chip layout. | 50 | * This lists the InfiniPath registers, in the actual chip layout. |
51 | * This structure should never be directly accessed. | 51 | * This structure should never be directly accessed. |
52 | */ | 52 | */ |
53 | struct _infinipath_do_not_use_kernel_regs { | 53 | struct _infinipath_do_not_use_kernel_regs { |
@@ -213,7 +213,6 @@ static const struct ipath_kregs ipath_pe_kregs = { | |||
213 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), | 213 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), |
214 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), | 214 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), |
215 | 215 | ||
216 | /* This group is pe-800-specific; and used only in this file */ | ||
217 | /* The rcvpktled register controls one of the debug port signals, so | 216 | /* The rcvpktled register controls one of the debug port signals, so |
218 | * a packet activity LED can be connected to it. */ | 217 | * a packet activity LED can be connected to it. */ |
219 | .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), | 218 | .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), |
@@ -364,8 +363,9 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
364 | * and we get here multiple times | 363 | * and we get here multiple times |
365 | */ | 364 | */ |
366 | if (dd->ipath_flags & IPATH_INITTED) { | 365 | if (dd->ipath_flags & IPATH_INITTED) { |
367 | ipath_dev_err(dd, "Fatal Error (freeze " | 366 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " |
368 | "mode), no longer usable\n"); | 367 | "mode), no longer usable, SN %.16s\n", |
368 | dd->ipath_serial); | ||
369 | isfatal = 1; | 369 | isfatal = 1; |
370 | } | 370 | } |
371 | /* | 371 | /* |
@@ -388,7 +388,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
388 | *msg = '\0'; | 388 | *msg = '\0'; |
389 | 389 | ||
390 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { | 390 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { |
391 | strlcat(msg, "[Memory BIST test failed, PE-800 unusable]", | 391 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", |
392 | msgl); | 392 | msgl); |
393 | /* ignore from now on, so disable until driver reloaded */ | 393 | /* ignore from now on, so disable until driver reloaded */ |
394 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | 394 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; |
@@ -433,7 +433,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
433 | 433 | ||
434 | if (hwerrs & _IPATH_PLL_FAIL) { | 434 | if (hwerrs & _IPATH_PLL_FAIL) { |
435 | snprintf(bitsmsg, sizeof bitsmsg, | 435 | snprintf(bitsmsg, sizeof bitsmsg, |
436 | "[PLL failed (%llx), PE-800 unusable]", | 436 | "[PLL failed (%llx), InfiniPath hardware unusable]", |
437 | (unsigned long long) hwerrs & _IPATH_PLL_FAIL); | 437 | (unsigned long long) hwerrs & _IPATH_PLL_FAIL); |
438 | strlcat(msg, bitsmsg, msgl); | 438 | strlcat(msg, bitsmsg, msgl); |
439 | /* ignore from now on, so disable until driver reloaded */ | 439 | /* ignore from now on, so disable until driver reloaded */ |
@@ -511,22 +511,25 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
511 | n = "InfiniPath_Emulation"; | 511 | n = "InfiniPath_Emulation"; |
512 | break; | 512 | break; |
513 | case 1: | 513 | case 1: |
514 | n = "InfiniPath_PE-800-Bringup"; | 514 | n = "InfiniPath_QLE7140-Bringup"; |
515 | break; | 515 | break; |
516 | case 2: | 516 | case 2: |
517 | n = "InfiniPath_PE-880"; | 517 | n = "InfiniPath_QLE7140"; |
518 | break; | 518 | break; |
519 | case 3: | 519 | case 3: |
520 | n = "InfiniPath_PE-850"; | 520 | n = "InfiniPath_QMI7140"; |
521 | break; | 521 | break; |
522 | case 4: | 522 | case 4: |
523 | n = "InfiniPath_PE-860"; | 523 | n = "InfiniPath_QEM7140"; |
524 | break; | ||
525 | case 5: | ||
526 | n = "InfiniPath_QMH7140"; | ||
524 | break; | 527 | break; |
525 | default: | 528 | default: |
526 | ipath_dev_err(dd, | 529 | ipath_dev_err(dd, |
527 | "Don't yet know about board with ID %u\n", | 530 | "Don't yet know about board with ID %u\n", |
528 | boardrev); | 531 | boardrev); |
529 | snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u", | 532 | snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", |
530 | boardrev); | 533 | boardrev); |
531 | break; | 534 | break; |
532 | } | 535 | } |
@@ -534,7 +537,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
534 | snprintf(name, namelen, "%s", n); | 537 | snprintf(name, namelen, "%s", n); |
535 | 538 | ||
536 | if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { | 539 | if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { |
537 | ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n", | 540 | ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", |
538 | dd->ipath_majrev, dd->ipath_minrev); | 541 | dd->ipath_majrev, dd->ipath_minrev); |
539 | ret = 1; | 542 | ret = 1; |
540 | } else | 543 | } else |
@@ -651,6 +654,15 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
651 | val &= ~INFINIPATH_XGXS_RESET; | 654 | val &= ~INFINIPATH_XGXS_RESET; |
652 | change = 1; | 655 | change = 1; |
653 | } | 656 | } |
657 | if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & | ||
658 | INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { | ||
659 | /* need to compensate for Tx inversion in partner */ | ||
660 | val &= ~(INFINIPATH_XGXS_RX_POL_MASK << | ||
661 | INFINIPATH_XGXS_RX_POL_SHIFT); | ||
662 | val |= dd->ipath_rx_pol_inv << | ||
663 | INFINIPATH_XGXS_RX_POL_SHIFT; | ||
664 | change = 1; | ||
665 | } | ||
654 | if (change) | 666 | if (change) |
655 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | 667 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); |
656 | 668 | ||
@@ -705,7 +717,7 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | |||
705 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | 717 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); |
706 | } | 718 | } |
707 | 719 | ||
708 | /* this is not yet needed on the PE800, so just return 0. */ | 720 | /* this is not yet needed on this chip, so just return 0. */ |
709 | static int ipath_pe_intconfig(struct ipath_devdata *dd) | 721 | static int ipath_pe_intconfig(struct ipath_devdata *dd) |
710 | { | 722 | { |
711 | return 0; | 723 | return 0; |
@@ -759,8 +771,8 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst, | |||
759 | * | 771 | * |
760 | * This is called during driver unload. | 772 | * This is called during driver unload. |
761 | * We do the pci_disable_msi here, not in generic code, because it | 773 | * We do the pci_disable_msi here, not in generic code, because it |
762 | * isn't used for the HT-400. If we do end up needing pci_enable_msi | 774 | * isn't used for the HT chips. If we do end up needing pci_enable_msi |
763 | * at some point in the future for HT-400, we'll move the call back | 775 | * at some point in the future for HT, we'll move the call back |
764 | * into the main init_one code. | 776 | * into the main init_one code. |
765 | */ | 777 | */ |
766 | static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) | 778 | static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) |
@@ -780,10 +792,10 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) | |||
780 | * late in 2.6.16). | 792 | * late in 2.6.16). |
781 | * All that can be done is to edit the kernel source to remove the quirk | 793 | * All that can be done is to edit the kernel source to remove the quirk |
782 | * check until that is fixed. | 794 | * check until that is fixed. |
783 | * We do not need to call enable_msi() for our HyperTransport chip (HT-400), | 795 | * We do not need to call enable_msi() for our HyperTransport chip, |
784 | * even those it uses MSI, and we want to avoid the quirk warning, so | 796 | * even though it uses MSI, and we want to avoid the quirk warning, so |
785 | * So we call enable_msi only for the PE-800. If we do end up needing | 797 | * So we call enable_msi only for PCIe. If we do end up needing |
786 | * pci_enable_msi at some point in the future for HT-400, we'll move the | 798 | * pci_enable_msi at some point in the future for HT, we'll move the |
787 | * call back into the main init_one code. | 799 | * call back into the main init_one code. |
788 | * We save the msi lo and hi values, so we can restore them after | 800 | * We save the msi lo and hi values, so we can restore them after |
789 | * chip reset (the kernel PCI infrastructure doesn't yet handle that | 801 | * chip reset (the kernel PCI infrastructure doesn't yet handle that |
@@ -971,8 +983,7 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) | |||
971 | int ret; | 983 | int ret; |
972 | 984 | ||
973 | /* Use ERROR so it shows up in logs, etc. */ | 985 | /* Use ERROR so it shows up in logs, etc. */ |
974 | ipath_dev_err(dd, "Resetting PE-800 unit %u\n", | 986 | ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); |
975 | dd->ipath_unit); | ||
976 | /* keep chip from being accessed in a few places */ | 987 | /* keep chip from being accessed in a few places */ |
977 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); | 988 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); |
978 | val = dd->ipath_control | INFINIPATH_C_RESET; | 989 | val = dd->ipath_control | INFINIPATH_C_RESET; |
@@ -1078,7 +1089,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | |||
1078 | * @port: the port | 1089 | * @port: the port |
1079 | * | 1090 | * |
1080 | * clear all TID entries for a port, expected and eager. | 1091 | * clear all TID entries for a port, expected and eager. |
1081 | * Used from ipath_close(). On PE800, TIDs are only 32 bits, | 1092 | * Used from ipath_close(). On this chip, TIDs are only 32 bits, |
1082 | * not 64, but they are still on 64 bit boundaries, so tidbase | 1093 | * not 64, but they are still on 64 bit boundaries, so tidbase |
1083 | * is declared as u64 * for the pointer math, even though we write 32 bits | 1094 | * is declared as u64 * for the pointer math, even though we write 32 bits |
1084 | */ | 1095 | */ |
@@ -1148,9 +1159,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) | |||
1148 | dd->ipath_flags |= IPATH_4BYTE_TID; | 1159 | dd->ipath_flags |= IPATH_4BYTE_TID; |
1149 | 1160 | ||
1150 | /* | 1161 | /* |
1151 | * For openib, we need to be able to handle an IB header of 96 bytes | 1162 | * For openfabrics, we need to be able to handle an IB header of |
1152 | * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we | 1163 | * 24 dwords. HT chip has arbitrary sized receive buffers, so we |
1153 | * made them the same size as the PIO buffers. The PE-800 does not | 1164 | * made them the same size as the PIO buffers. This chip does not |
1154 | * handle arbitrary size buffers, so we need the header large enough | 1165 | * handle arbitrary size buffers, so we need the header large enough |
1155 | * to handle largest IB header, but still have room for a 2KB MTU | 1166 | * to handle largest IB header, but still have room for a 2KB MTU |
1156 | * standard IB packet. | 1167 | * standard IB packet. |
@@ -1158,11 +1169,10 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) | |||
1158 | dd->ipath_rcvhdrentsize = 24; | 1169 | dd->ipath_rcvhdrentsize = 24; |
1159 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; | 1170 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; |
1160 | 1171 | ||
1161 | /* For HT-400, we allocate a somewhat overly large eager buffer, | 1172 | /* |
1162 | * such that we can guarantee that we can receive the largest packet | 1173 | * To truly support a 4KB MTU (for usermode), we need to |
1163 | * that we can send out. To truly support a 4KB MTU, we need to | 1174 | * bump this to a larger value. For now, we use them for |
1164 | * bump this to a larger value. We'll do this when I get around to | 1175 | * the kernel only. |
1165 | * testing 4KB sends on the PE-800, which I have not yet done. | ||
1166 | */ | 1176 | */ |
1167 | dd->ipath_rcvegrbufsize = 2048; | 1177 | dd->ipath_rcvegrbufsize = 2048; |
1168 | /* | 1178 | /* |
@@ -1175,9 +1185,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) | |||
1175 | dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; | 1185 | dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; |
1176 | 1186 | ||
1177 | /* | 1187 | /* |
1178 | * For PE-800, we can request a receive interrupt for 1 or | 1188 | * We can request a receive interrupt for 1 or |
1179 | * more packets from current offset. For now, we set this | 1189 | * more packets from current offset. For now, we set this |
1180 | * up for a single packet, to match the HT-400 behavior. | 1190 | * up for a single packet. |
1181 | */ | 1191 | */ |
1182 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; | 1192 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; |
1183 | 1193 | ||
@@ -1216,13 +1226,13 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) | |||
1216 | } | 1226 | } |
1217 | 1227 | ||
1218 | /** | 1228 | /** |
1219 | * ipath_init_pe800_funcs - set up the chip-specific function pointers | 1229 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers |
1220 | * @dd: the infinipath device | 1230 | * @dd: the infinipath device |
1221 | * | 1231 | * |
1222 | * This is global, and is called directly at init to set up the | 1232 | * This is global, and is called directly at init to set up the |
1223 | * chip-specific function pointers for later use. | 1233 | * chip-specific function pointers for later use. |
1224 | */ | 1234 | */ |
1225 | void ipath_init_pe800_funcs(struct ipath_devdata *dd) | 1235 | void ipath_init_iba6120_funcs(struct ipath_devdata *dd) |
1226 | { | 1236 | { |
1227 | dd->ipath_f_intrsetup = ipath_pe_intconfig; | 1237 | dd->ipath_f_intrsetup = ipath_pe_intconfig; |
1228 | dd->ipath_f_bus = ipath_setup_pe_config; | 1238 | dd->ipath_f_bus = ipath_setup_pe_config; |
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 414cdd1d80a6..44669dc2e22d 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -53,8 +53,8 @@ module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO); | |||
53 | MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); | 53 | MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Number of buffers reserved for driver (layered drivers and SMA | 56 | * Number of buffers reserved for driver (verbs and layered drivers.) |
57 | * send). Reserved at end of buffer list. Initialized based on | 57 | * Reserved at end of buffer list. Initialized based on |
58 | * number of PIO buffers if not set via module interface. | 58 | * number of PIO buffers if not set via module interface. |
59 | * The problem with this is that it's global, but we'll use different | 59 | * The problem with this is that it's global, but we'll use different |
60 | * numbers for different chip types. So the default value is not | 60 | * numbers for different chip types. So the default value is not |
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); | |||
80 | * | 80 | * |
81 | * Allocate the eager TID buffers and program them into infinipath. | 81 | * Allocate the eager TID buffers and program them into infinipath. |
82 | * We use the network layer alloc_skb() allocator to allocate the | 82 | * We use the network layer alloc_skb() allocator to allocate the |
83 | * memory, and either use the buffers as is for things like SMA | 83 | * memory, and either use the buffers as is for things like verbs |
84 | * packets, or pass the buffers up to the ipath layered driver and | 84 | * packets, or pass the buffers up to the ipath layered driver and |
85 | * thence the network layer, replacing them as we do so (see | 85 | * thence the network layer, replacing them as we do so (see |
86 | * ipath_rcv_layer()). | 86 | * ipath_rcv_layer()). |
@@ -240,7 +240,11 @@ static int init_chip_first(struct ipath_devdata *dd, | |||
240 | "only supports %u\n", ipath_cfgports, | 240 | "only supports %u\n", ipath_cfgports, |
241 | dd->ipath_portcnt); | 241 | dd->ipath_portcnt); |
242 | } | 242 | } |
243 | dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports, | 243 | /* |
244 | * Allocate full portcnt array, rather than just cfgports, because | ||
245 | * cleanup iterates across all possible ports. | ||
246 | */ | ||
247 | dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt, | ||
244 | GFP_KERNEL); | 248 | GFP_KERNEL); |
245 | 249 | ||
246 | if (!dd->ipath_pd) { | 250 | if (!dd->ipath_pd) { |
@@ -446,9 +450,9 @@ static void enable_chip(struct ipath_devdata *dd, | |||
446 | u32 val; | 450 | u32 val; |
447 | int i; | 451 | int i; |
448 | 452 | ||
449 | if (!reinit) { | 453 | if (!reinit) |
450 | init_waitqueue_head(&ipath_sma_state_wait); | 454 | init_waitqueue_head(&ipath_state_wait); |
451 | } | 455 | |
452 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | 456 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
453 | dd->ipath_rcvctrl); | 457 | dd->ipath_rcvctrl); |
454 | 458 | ||
@@ -687,7 +691,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
687 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) | 691 | dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) |
688 | / (sizeof(u64) * BITS_PER_BYTE / 2); | 692 | / (sizeof(u64) * BITS_PER_BYTE / 2); |
689 | if (ipath_kpiobufs == 0) { | 693 | if (ipath_kpiobufs == 0) { |
690 | /* not set by user, or set explictly to default */ | 694 | /* not set by user (this is default) */ |
691 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) | 695 | if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) |
692 | kpiobufs = 32; | 696 | kpiobufs = 32; |
693 | else | 697 | else |
@@ -946,6 +950,7 @@ static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp) | |||
946 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; | 950 | dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; |
947 | } | 951 | } |
948 | 952 | ||
953 | ipath_kpiobufs = val; | ||
949 | ret = 0; | 954 | ret = 0; |
950 | bail: | 955 | bail: |
951 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 956 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 280e732660a1..49bf7bb15b04 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | 35 | ||
36 | #include "ipath_kernel.h" | 36 | #include "ipath_kernel.h" |
37 | #include "ipath_layer.h" | 37 | #include "ipath_verbs.h" |
38 | #include "ipath_common.h" | 38 | #include "ipath_common.h" |
39 | 39 | ||
40 | /* These are all rcv-related errors which we want to count for stats */ | 40 | /* These are all rcv-related errors which we want to count for stats */ |
@@ -201,7 +201,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
201 | ib_linkstate(lstate)); | 201 | ib_linkstate(lstate)); |
202 | } | 202 | } |
203 | else | 203 | else |
204 | ipath_cdbg(SMA, "Unit %u link state %s, last " | 204 | ipath_cdbg(VERBOSE, "Unit %u link state %s, last " |
205 | "was %s\n", dd->ipath_unit, | 205 | "was %s\n", dd->ipath_unit, |
206 | ib_linkstate(lstate), | 206 | ib_linkstate(lstate), |
207 | ib_linkstate((unsigned) | 207 | ib_linkstate((unsigned) |
@@ -213,7 +213,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
213 | if (lstate == IPATH_IBSTATE_INIT || | 213 | if (lstate == IPATH_IBSTATE_INIT || |
214 | lstate == IPATH_IBSTATE_ARM || | 214 | lstate == IPATH_IBSTATE_ARM || |
215 | lstate == IPATH_IBSTATE_ACTIVE) | 215 | lstate == IPATH_IBSTATE_ACTIVE) |
216 | ipath_cdbg(SMA, "Unit %u link state down" | 216 | ipath_cdbg(VERBOSE, "Unit %u link state down" |
217 | " (state 0x%x), from %s\n", | 217 | " (state 0x%x), from %s\n", |
218 | dd->ipath_unit, | 218 | dd->ipath_unit, |
219 | (u32)val & IPATH_IBSTATE_MASK, | 219 | (u32)val & IPATH_IBSTATE_MASK, |
@@ -269,7 +269,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
269 | INFINIPATH_IBCS_LINKSTATE_MASK) | 269 | INFINIPATH_IBCS_LINKSTATE_MASK) |
270 | == INFINIPATH_IBCS_L_STATE_ACTIVE) | 270 | == INFINIPATH_IBCS_L_STATE_ACTIVE) |
271 | /* if from up to down be more vocal */ | 271 | /* if from up to down be more vocal */ |
272 | ipath_cdbg(SMA, | 272 | ipath_cdbg(VERBOSE, |
273 | "Unit %u link now down (%s)\n", | 273 | "Unit %u link now down (%s)\n", |
274 | dd->ipath_unit, | 274 | dd->ipath_unit, |
275 | ipath_ibcstatus_str[ltstate]); | 275 | ipath_ibcstatus_str[ltstate]); |
@@ -289,8 +289,6 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
289 | *dd->ipath_statusp |= | 289 | *dd->ipath_statusp |= |
290 | IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; | 290 | IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; |
291 | dd->ipath_f_setextled(dd, lstate, ltstate); | 291 | dd->ipath_f_setextled(dd, lstate, ltstate); |
292 | |||
293 | __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP); | ||
294 | } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { | 292 | } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { |
295 | /* | 293 | /* |
296 | * set INIT and DOWN. Down is checked by most of the other | 294 | * set INIT and DOWN. Down is checked by most of the other |
@@ -598,11 +596,11 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) | |||
598 | 596 | ||
599 | if (!noprint && *msg) | 597 | if (!noprint && *msg) |
600 | ipath_dev_err(dd, "%s error\n", msg); | 598 | ipath_dev_err(dd, "%s error\n", msg); |
601 | if (dd->ipath_sma_state_wanted & dd->ipath_flags) { | 599 | if (dd->ipath_state_wanted & dd->ipath_flags) { |
602 | ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, " | 600 | ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " |
603 | "waking\n", dd->ipath_sma_state_wanted, | 601 | "waking\n", dd->ipath_state_wanted, |
604 | dd->ipath_flags); | 602 | dd->ipath_flags); |
605 | wake_up_interruptible(&ipath_sma_state_wait); | 603 | wake_up_interruptible(&ipath_state_wait); |
606 | } | 604 | } |
607 | 605 | ||
608 | return chkerrpkts; | 606 | return chkerrpkts; |
@@ -708,11 +706,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) | |||
708 | { | 706 | { |
709 | int ret; | 707 | int ret; |
710 | 708 | ||
711 | ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); | 709 | ret = ipath_ib_piobufavail(dd->verbs_dev); |
712 | if (ret > 0) | ||
713 | goto set; | ||
714 | |||
715 | ret = __ipath_verbs_piobufavail(dd); | ||
716 | if (ret > 0) | 710 | if (ret > 0) |
717 | goto set; | 711 | goto set; |
718 | 712 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e9f374fb641e..a8a56276ff1d 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -132,12 +132,6 @@ struct _ipath_layer { | |||
132 | void *l_arg; | 132 | void *l_arg; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | /* Verbs layer interface */ | ||
136 | struct _verbs_layer { | ||
137 | void *l_arg; | ||
138 | struct timer_list l_timer; | ||
139 | }; | ||
140 | |||
141 | struct ipath_devdata { | 135 | struct ipath_devdata { |
142 | struct list_head ipath_list; | 136 | struct list_head ipath_list; |
143 | 137 | ||
@@ -198,7 +192,8 @@ struct ipath_devdata { | |||
198 | void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); | 192 | void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); |
199 | /* fill out chip-specific fields */ | 193 | /* fill out chip-specific fields */ |
200 | int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); | 194 | int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); |
201 | struct _verbs_layer verbs_layer; | 195 | struct ipath_ibdev *verbs_dev; |
196 | struct timer_list verbs_timer; | ||
202 | /* total dwords sent (summed from counter) */ | 197 | /* total dwords sent (summed from counter) */ |
203 | u64 ipath_sword; | 198 | u64 ipath_sword; |
204 | /* total dwords rcvd (summed from counter) */ | 199 | /* total dwords rcvd (summed from counter) */ |
@@ -241,7 +236,7 @@ struct ipath_devdata { | |||
241 | u64 ipath_tidtemplate; | 236 | u64 ipath_tidtemplate; |
242 | /* value to write to free TIDs */ | 237 | /* value to write to free TIDs */ |
243 | u64 ipath_tidinvalid; | 238 | u64 ipath_tidinvalid; |
244 | /* PE-800 rcv interrupt setup */ | 239 | /* IBA6120 rcv interrupt setup */ |
245 | u64 ipath_rhdrhead_intr_off; | 240 | u64 ipath_rhdrhead_intr_off; |
246 | 241 | ||
247 | /* size of memory at ipath_kregbase */ | 242 | /* size of memory at ipath_kregbase */ |
@@ -250,8 +245,8 @@ struct ipath_devdata { | |||
250 | u32 ipath_pioavregs; | 245 | u32 ipath_pioavregs; |
251 | /* IPATH_POLL, etc. */ | 246 | /* IPATH_POLL, etc. */ |
252 | u32 ipath_flags; | 247 | u32 ipath_flags; |
253 | /* ipath_flags sma is waiting for */ | 248 | /* ipath_flags driver is waiting for */ |
254 | u32 ipath_sma_state_wanted; | 249 | u32 ipath_state_wanted; |
255 | /* last buffer for user use, first buf for kernel use is this | 250 | /* last buffer for user use, first buf for kernel use is this |
256 | * index. */ | 251 | * index. */ |
257 | u32 ipath_lastport_piobuf; | 252 | u32 ipath_lastport_piobuf; |
@@ -311,10 +306,6 @@ struct ipath_devdata { | |||
311 | u32 ipath_pcibar0; | 306 | u32 ipath_pcibar0; |
312 | /* so we can rewrite it after a chip reset */ | 307 | /* so we can rewrite it after a chip reset */ |
313 | u32 ipath_pcibar1; | 308 | u32 ipath_pcibar1; |
314 | /* sequential tries for SMA send and no bufs */ | ||
315 | u32 ipath_nosma_bufs; | ||
316 | /* duration (seconds) ipath_nosma_bufs set */ | ||
317 | u32 ipath_nosma_secs; | ||
318 | 309 | ||
319 | /* HT/PCI Vendor ID (here for NodeInfo) */ | 310 | /* HT/PCI Vendor ID (here for NodeInfo) */ |
320 | u16 ipath_vendorid; | 311 | u16 ipath_vendorid; |
@@ -512,6 +503,8 @@ struct ipath_devdata { | |||
512 | u8 ipath_pci_cacheline; | 503 | u8 ipath_pci_cacheline; |
513 | /* LID mask control */ | 504 | /* LID mask control */ |
514 | u8 ipath_lmc; | 505 | u8 ipath_lmc; |
506 | /* Rx Polarity inversion (compensate for ~tx on partner) */ | ||
507 | u8 ipath_rx_pol_inv; | ||
515 | 508 | ||
516 | /* local link integrity counter */ | 509 | /* local link integrity counter */ |
517 | u32 ipath_lli_counter; | 510 | u32 ipath_lli_counter; |
@@ -523,18 +516,6 @@ extern struct list_head ipath_dev_list; | |||
523 | extern spinlock_t ipath_devs_lock; | 516 | extern spinlock_t ipath_devs_lock; |
524 | extern struct ipath_devdata *ipath_lookup(int unit); | 517 | extern struct ipath_devdata *ipath_lookup(int unit); |
525 | 518 | ||
526 | extern u16 ipath_layer_rcv_opcode; | ||
527 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); | ||
528 | extern int ipath_layer_intr(struct ipath_devdata *, u32); | ||
529 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, | ||
530 | struct sk_buff *); | ||
531 | extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *); | ||
532 | extern int __ipath_verbs_piobufavail(struct ipath_devdata *); | ||
533 | extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32); | ||
534 | |||
535 | void ipath_layer_add(struct ipath_devdata *); | ||
536 | void ipath_layer_remove(struct ipath_devdata *); | ||
537 | |||
538 | int ipath_init_chip(struct ipath_devdata *, int); | 519 | int ipath_init_chip(struct ipath_devdata *, int); |
539 | int ipath_enable_wc(struct ipath_devdata *dd); | 520 | int ipath_enable_wc(struct ipath_devdata *dd); |
540 | void ipath_disable_wc(struct ipath_devdata *dd); | 521 | void ipath_disable_wc(struct ipath_devdata *dd); |
@@ -549,9 +530,8 @@ void ipath_cdev_cleanup(struct cdev **cdevp, | |||
549 | 530 | ||
550 | int ipath_diag_add(struct ipath_devdata *); | 531 | int ipath_diag_add(struct ipath_devdata *); |
551 | void ipath_diag_remove(struct ipath_devdata *); | 532 | void ipath_diag_remove(struct ipath_devdata *); |
552 | void ipath_diag_bringup_link(struct ipath_devdata *); | ||
553 | 533 | ||
554 | extern wait_queue_head_t ipath_sma_state_wait; | 534 | extern wait_queue_head_t ipath_state_wait; |
555 | 535 | ||
556 | int ipath_user_add(struct ipath_devdata *dd); | 536 | int ipath_user_add(struct ipath_devdata *dd); |
557 | void ipath_user_remove(struct ipath_devdata *dd); | 537 | void ipath_user_remove(struct ipath_devdata *dd); |
@@ -582,12 +562,14 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *); | |||
582 | 562 | ||
583 | int ipath_parse_ushort(const char *str, unsigned short *valp); | 563 | int ipath_parse_ushort(const char *str, unsigned short *valp); |
584 | 564 | ||
585 | int ipath_wait_linkstate(struct ipath_devdata *, u32, int); | ||
586 | void ipath_set_ib_lstate(struct ipath_devdata *, int); | ||
587 | void ipath_kreceive(struct ipath_devdata *); | 565 | void ipath_kreceive(struct ipath_devdata *); |
588 | int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); | 566 | int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); |
589 | int ipath_reset_device(int); | 567 | int ipath_reset_device(int); |
590 | void ipath_get_faststats(unsigned long); | 568 | void ipath_get_faststats(unsigned long); |
569 | int ipath_set_linkstate(struct ipath_devdata *, u8); | ||
570 | int ipath_set_mtu(struct ipath_devdata *, u16); | ||
571 | int ipath_set_lid(struct ipath_devdata *, u32, u8); | ||
572 | int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); | ||
591 | 573 | ||
592 | /* for use in system calls, where we want to know device type, etc. */ | 574 | /* for use in system calls, where we want to know device type, etc. */ |
593 | #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) | 575 | #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) |
@@ -642,10 +624,8 @@ void ipath_free_data(struct ipath_portdata *dd); | |||
642 | int ipath_waitfor_mdio_cmdready(struct ipath_devdata *); | 624 | int ipath_waitfor_mdio_cmdready(struct ipath_devdata *); |
643 | int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *); | 625 | int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *); |
644 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); | 626 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); |
645 | /* init PE-800-specific func */ | 627 | void ipath_init_iba6120_funcs(struct ipath_devdata *); |
646 | void ipath_init_pe800_funcs(struct ipath_devdata *); | 628 | void ipath_init_iba6110_funcs(struct ipath_devdata *); |
647 | /* init HT-400-specific func */ | ||
648 | void ipath_init_ht400_funcs(struct ipath_devdata *); | ||
649 | void ipath_get_eeprom_info(struct ipath_devdata *); | 629 | void ipath_get_eeprom_info(struct ipath_devdata *); |
650 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); | 630 | u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); |
651 | 631 | ||
@@ -801,7 +781,7 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, | |||
801 | 781 | ||
802 | struct device_driver; | 782 | struct device_driver; |
803 | 783 | ||
804 | extern const char ipath_core_version[]; | 784 | extern const char ib_ipath_version[]; |
805 | 785 | ||
806 | int ipath_driver_create_group(struct device_driver *); | 786 | int ipath_driver_create_group(struct device_driver *); |
807 | void ipath_driver_remove_group(struct device_driver *); | 787 | void ipath_driver_remove_group(struct device_driver *); |
@@ -810,6 +790,9 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *); | |||
810 | void ipath_device_remove_group(struct device *, struct ipath_devdata *); | 790 | void ipath_device_remove_group(struct device *, struct ipath_devdata *); |
811 | int ipath_expose_reset(struct device *); | 791 | int ipath_expose_reset(struct device *); |
812 | 792 | ||
793 | int ipath_diagpkt_add(void); | ||
794 | void ipath_diagpkt_remove(void); | ||
795 | |||
813 | int ipath_init_ipathfs(void); | 796 | int ipath_init_ipathfs(void); |
814 | void ipath_exit_ipathfs(void); | 797 | void ipath_exit_ipathfs(void); |
815 | int ipathfs_add_device(struct ipath_devdata *); | 798 | int ipathfs_add_device(struct ipath_devdata *); |
@@ -831,10 +814,10 @@ const char *ipath_get_unit_name(int unit); | |||
831 | 814 | ||
832 | extern struct mutex ipath_mutex; | 815 | extern struct mutex ipath_mutex; |
833 | 816 | ||
834 | #define IPATH_DRV_NAME "ipath_core" | 817 | #define IPATH_DRV_NAME "ib_ipath" |
835 | #define IPATH_MAJOR 233 | 818 | #define IPATH_MAJOR 233 |
836 | #define IPATH_USER_MINOR_BASE 0 | 819 | #define IPATH_USER_MINOR_BASE 0 |
837 | #define IPATH_SMA_MINOR 128 | 820 | #define IPATH_DIAGPKT_MINOR 127 |
838 | #define IPATH_DIAG_MINOR_BASE 129 | 821 | #define IPATH_DIAG_MINOR_BASE 129 |
839 | #define IPATH_NMINORS 255 | 822 | #define IPATH_NMINORS 255 |
840 | 823 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index a5ca279370aa..ba1b93226caa 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | 35 | ||
36 | #include "ipath_verbs.h" | 36 | #include "ipath_verbs.h" |
37 | #include "ipath_kernel.h" | ||
37 | 38 | ||
38 | /** | 39 | /** |
39 | * ipath_alloc_lkey - allocate an lkey | 40 | * ipath_alloc_lkey - allocate an lkey |
@@ -60,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) | |||
60 | r = (r + 1) & (rkt->max - 1); | 61 | r = (r + 1) & (rkt->max - 1); |
61 | if (r == n) { | 62 | if (r == n) { |
62 | spin_unlock_irqrestore(&rkt->lock, flags); | 63 | spin_unlock_irqrestore(&rkt->lock, flags); |
63 | _VERBS_INFO("LKEY table full\n"); | 64 | ipath_dbg(KERN_INFO "LKEY table full\n"); |
64 | ret = 0; | 65 | ret = 0; |
65 | goto bail; | 66 | goto bail; |
66 | } | 67 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index b28c6f81c731..e46aa4ed2a7e 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -42,26 +42,20 @@ | |||
42 | 42 | ||
43 | #include "ipath_kernel.h" | 43 | #include "ipath_kernel.h" |
44 | #include "ipath_layer.h" | 44 | #include "ipath_layer.h" |
45 | #include "ipath_verbs.h" | ||
45 | #include "ipath_common.h" | 46 | #include "ipath_common.h" |
46 | 47 | ||
47 | /* Acquire before ipath_devs_lock. */ | 48 | /* Acquire before ipath_devs_lock. */ |
48 | static DEFINE_MUTEX(ipath_layer_mutex); | 49 | static DEFINE_MUTEX(ipath_layer_mutex); |
49 | 50 | ||
50 | static int ipath_verbs_registered; | ||
51 | |||
52 | u16 ipath_layer_rcv_opcode; | 51 | u16 ipath_layer_rcv_opcode; |
53 | 52 | ||
54 | static int (*layer_intr)(void *, u32); | 53 | static int (*layer_intr)(void *, u32); |
55 | static int (*layer_rcv)(void *, void *, struct sk_buff *); | 54 | static int (*layer_rcv)(void *, void *, struct sk_buff *); |
56 | static int (*layer_rcv_lid)(void *, void *); | 55 | static int (*layer_rcv_lid)(void *, void *); |
57 | static int (*verbs_piobufavail)(void *); | ||
58 | static void (*verbs_rcv)(void *, void *, void *, u32); | ||
59 | 56 | ||
60 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 57 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
61 | static void (*layer_remove_one)(void *); | 58 | static void (*layer_remove_one)(void *); |
62 | static void *(*verbs_add_one)(int, struct ipath_devdata *); | ||
63 | static void (*verbs_remove_one)(void *); | ||
64 | static void (*verbs_timer_cb)(void *); | ||
65 | 59 | ||
66 | int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) | 60 | int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) |
67 | { | 61 | { |
@@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr) | |||
107 | return ret; | 101 | return ret; |
108 | } | 102 | } |
109 | 103 | ||
110 | int __ipath_verbs_piobufavail(struct ipath_devdata *dd) | 104 | void ipath_layer_lid_changed(struct ipath_devdata *dd) |
111 | { | ||
112 | int ret = -ENODEV; | ||
113 | |||
114 | if (dd->verbs_layer.l_arg && verbs_piobufavail) | ||
115 | ret = verbs_piobufavail(dd->verbs_layer.l_arg); | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf, | ||
121 | u32 tlen) | ||
122 | { | ||
123 | int ret = -ENODEV; | ||
124 | |||
125 | if (dd->verbs_layer.l_arg && verbs_rcv) { | ||
126 | verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen); | ||
127 | ret = 0; | ||
128 | } | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate) | ||
134 | { | 105 | { |
135 | u32 lstate; | ||
136 | int ret; | ||
137 | |||
138 | switch (newstate) { | ||
139 | case IPATH_IB_LINKDOWN: | ||
140 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << | ||
141 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
142 | /* don't wait */ | ||
143 | ret = 0; | ||
144 | goto bail; | ||
145 | |||
146 | case IPATH_IB_LINKDOWN_SLEEP: | ||
147 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << | ||
148 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
149 | /* don't wait */ | ||
150 | ret = 0; | ||
151 | goto bail; | ||
152 | |||
153 | case IPATH_IB_LINKDOWN_DISABLE: | ||
154 | ipath_set_ib_lstate(dd, | ||
155 | INFINIPATH_IBCC_LINKINITCMD_DISABLE << | ||
156 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
157 | /* don't wait */ | ||
158 | ret = 0; | ||
159 | goto bail; | ||
160 | |||
161 | case IPATH_IB_LINKINIT: | ||
162 | if (dd->ipath_flags & IPATH_LINKINIT) { | ||
163 | ret = 0; | ||
164 | goto bail; | ||
165 | } | ||
166 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << | ||
167 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
168 | lstate = IPATH_LINKINIT; | ||
169 | break; | ||
170 | |||
171 | case IPATH_IB_LINKARM: | ||
172 | if (dd->ipath_flags & IPATH_LINKARMED) { | ||
173 | ret = 0; | ||
174 | goto bail; | ||
175 | } | ||
176 | if (!(dd->ipath_flags & | ||
177 | (IPATH_LINKINIT | IPATH_LINKACTIVE))) { | ||
178 | ret = -EINVAL; | ||
179 | goto bail; | ||
180 | } | ||
181 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << | ||
182 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
183 | /* | ||
184 | * Since the port can transition to ACTIVE by receiving | ||
185 | * a non VL 15 packet, wait for either state. | ||
186 | */ | ||
187 | lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; | ||
188 | break; | ||
189 | |||
190 | case IPATH_IB_LINKACTIVE: | ||
191 | if (dd->ipath_flags & IPATH_LINKACTIVE) { | ||
192 | ret = 0; | ||
193 | goto bail; | ||
194 | } | ||
195 | if (!(dd->ipath_flags & IPATH_LINKARMED)) { | ||
196 | ret = -EINVAL; | ||
197 | goto bail; | ||
198 | } | ||
199 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << | ||
200 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
201 | lstate = IPATH_LINKACTIVE; | ||
202 | break; | ||
203 | |||
204 | default: | ||
205 | ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); | ||
206 | ret = -EINVAL; | ||
207 | goto bail; | ||
208 | } | ||
209 | ret = ipath_wait_linkstate(dd, lstate, 2000); | ||
210 | |||
211 | bail: | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate); | ||
216 | |||
217 | /** | ||
218 | * ipath_layer_set_mtu - set the MTU | ||
219 | * @dd: the infinipath device | ||
220 | * @arg: the new MTU | ||
221 | * | ||
222 | * we can handle "any" incoming size, the issue here is whether we | ||
223 | * need to restrict our outgoing size. For now, we don't do any | ||
224 | * sanity checking on this, and we don't deal with what happens to | ||
225 | * programs that are already running when the size changes. | ||
226 | * NOTE: changing the MTU will usually cause the IBC to go back to | ||
227 | * link initialize (IPATH_IBSTATE_INIT) state... | ||
228 | */ | ||
229 | int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg) | ||
230 | { | ||
231 | u32 piosize; | ||
232 | int changed = 0; | ||
233 | int ret; | ||
234 | |||
235 | /* | ||
236 | * mtu is IB data payload max. It's the largest power of 2 less | ||
237 | * than piosize (or even larger, since it only really controls the | ||
238 | * largest we can receive; we can send the max of the mtu and | ||
239 | * piosize). We check that it's one of the valid IB sizes. | ||
240 | */ | ||
241 | if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && | ||
242 | arg != 4096) { | ||
243 | ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); | ||
244 | ret = -EINVAL; | ||
245 | goto bail; | ||
246 | } | ||
247 | if (dd->ipath_ibmtu == arg) { | ||
248 | ret = 0; /* same as current */ | ||
249 | goto bail; | ||
250 | } | ||
251 | |||
252 | piosize = dd->ipath_ibmaxlen; | ||
253 | dd->ipath_ibmtu = arg; | ||
254 | |||
255 | if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { | ||
256 | /* Only if it's not the initial value (or reset to it) */ | ||
257 | if (piosize != dd->ipath_init_ibmaxlen) { | ||
258 | dd->ipath_ibmaxlen = piosize; | ||
259 | changed = 1; | ||
260 | } | ||
261 | } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { | ||
262 | piosize = arg + IPATH_PIO_MAXIBHDR; | ||
263 | ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " | ||
264 | "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, | ||
265 | arg); | ||
266 | dd->ipath_ibmaxlen = piosize; | ||
267 | changed = 1; | ||
268 | } | ||
269 | |||
270 | if (changed) { | ||
271 | /* | ||
272 | * set the IBC maxpktlength to the size of our pio | ||
273 | * buffers in words | ||
274 | */ | ||
275 | u64 ibc = dd->ipath_ibcctrl; | ||
276 | ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << | ||
277 | INFINIPATH_IBCC_MAXPKTLEN_SHIFT); | ||
278 | |||
279 | piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ | ||
280 | dd->ipath_ibmaxlen = piosize; | ||
281 | piosize /= sizeof(u32); /* in words */ | ||
282 | /* | ||
283 | * for ICRC, which we only send in diag test pkt mode, and | ||
284 | * we don't need to worry about that for mtu | ||
285 | */ | ||
286 | piosize += 1; | ||
287 | |||
288 | ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; | ||
289 | dd->ipath_ibcctrl = ibc; | ||
290 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
291 | dd->ipath_ibcctrl); | ||
292 | dd->ipath_f_tidtemplate(dd); | ||
293 | } | ||
294 | |||
295 | ret = 0; | ||
296 | |||
297 | bail: | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL_GPL(ipath_layer_set_mtu); | ||
302 | |||
303 | int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) | ||
304 | { | ||
305 | dd->ipath_lid = arg; | ||
306 | dd->ipath_lmc = lmc; | ||
307 | |||
308 | mutex_lock(&ipath_layer_mutex); | 106 | mutex_lock(&ipath_layer_mutex); |
309 | 107 | ||
310 | if (dd->ipath_layer.l_arg && layer_intr) | 108 | if (dd->ipath_layer.l_arg && layer_intr) |
311 | layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); | 109 | layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); |
312 | 110 | ||
313 | mutex_unlock(&ipath_layer_mutex); | 111 | mutex_unlock(&ipath_layer_mutex); |
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | EXPORT_SYMBOL_GPL(ipath_set_lid); | ||
319 | |||
320 | int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid) | ||
321 | { | ||
322 | /* XXX - need to inform anyone who cares this just happened. */ | ||
323 | dd->ipath_guid = guid; | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL_GPL(ipath_layer_set_guid); | ||
328 | |||
329 | __be64 ipath_layer_get_guid(struct ipath_devdata *dd) | ||
330 | { | ||
331 | return dd->ipath_guid; | ||
332 | } | ||
333 | |||
334 | EXPORT_SYMBOL_GPL(ipath_layer_get_guid); | ||
335 | |||
336 | u32 ipath_layer_get_nguid(struct ipath_devdata *dd) | ||
337 | { | ||
338 | return dd->ipath_nguid; | ||
339 | } | ||
340 | |||
341 | EXPORT_SYMBOL_GPL(ipath_layer_get_nguid); | ||
342 | |||
343 | u32 ipath_layer_get_majrev(struct ipath_devdata *dd) | ||
344 | { | ||
345 | return dd->ipath_majrev; | ||
346 | } | 112 | } |
347 | 113 | ||
348 | EXPORT_SYMBOL_GPL(ipath_layer_get_majrev); | ||
349 | |||
350 | u32 ipath_layer_get_minrev(struct ipath_devdata *dd) | ||
351 | { | ||
352 | return dd->ipath_minrev; | ||
353 | } | ||
354 | |||
355 | EXPORT_SYMBOL_GPL(ipath_layer_get_minrev); | ||
356 | |||
357 | u32 ipath_layer_get_pcirev(struct ipath_devdata *dd) | ||
358 | { | ||
359 | return dd->ipath_pcirev; | ||
360 | } | ||
361 | |||
362 | EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev); | ||
363 | |||
364 | u32 ipath_layer_get_flags(struct ipath_devdata *dd) | ||
365 | { | ||
366 | return dd->ipath_flags; | ||
367 | } | ||
368 | |||
369 | EXPORT_SYMBOL_GPL(ipath_layer_get_flags); | ||
370 | |||
371 | struct device *ipath_layer_get_device(struct ipath_devdata *dd) | ||
372 | { | ||
373 | return &dd->pcidev->dev; | ||
374 | } | ||
375 | |||
376 | EXPORT_SYMBOL_GPL(ipath_layer_get_device); | ||
377 | |||
378 | u16 ipath_layer_get_deviceid(struct ipath_devdata *dd) | ||
379 | { | ||
380 | return dd->ipath_deviceid; | ||
381 | } | ||
382 | |||
383 | EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid); | ||
384 | |||
385 | u32 ipath_layer_get_vendorid(struct ipath_devdata *dd) | ||
386 | { | ||
387 | return dd->ipath_vendorid; | ||
388 | } | ||
389 | |||
390 | EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid); | ||
391 | |||
392 | u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd) | ||
393 | { | ||
394 | return dd->ipath_lastibcstat; | ||
395 | } | ||
396 | |||
397 | EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat); | ||
398 | |||
399 | u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd) | ||
400 | { | ||
401 | return dd->ipath_ibmtu; | ||
402 | } | ||
403 | |||
404 | EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu); | ||
405 | |||
406 | void ipath_layer_add(struct ipath_devdata *dd) | 114 | void ipath_layer_add(struct ipath_devdata *dd) |
407 | { | 115 | { |
408 | mutex_lock(&ipath_layer_mutex); | 116 | mutex_lock(&ipath_layer_mutex); |
@@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdata *dd) | |||
411 | dd->ipath_layer.l_arg = | 119 | dd->ipath_layer.l_arg = |
412 | layer_add_one(dd->ipath_unit, dd); | 120 | layer_add_one(dd->ipath_unit, dd); |
413 | 121 | ||
414 | if (verbs_add_one) | ||
415 | dd->verbs_layer.l_arg = | ||
416 | verbs_add_one(dd->ipath_unit, dd); | ||
417 | |||
418 | mutex_unlock(&ipath_layer_mutex); | 122 | mutex_unlock(&ipath_layer_mutex); |
419 | } | 123 | } |
420 | 124 | ||
@@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_devdata *dd) | |||
427 | dd->ipath_layer.l_arg = NULL; | 131 | dd->ipath_layer.l_arg = NULL; |
428 | } | 132 | } |
429 | 133 | ||
430 | if (dd->verbs_layer.l_arg && verbs_remove_one) { | ||
431 | verbs_remove_one(dd->verbs_layer.l_arg); | ||
432 | dd->verbs_layer.l_arg = NULL; | ||
433 | } | ||
434 | |||
435 | mutex_unlock(&ipath_layer_mutex); | 134 | mutex_unlock(&ipath_layer_mutex); |
436 | } | 135 | } |
437 | 136 | ||
@@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), | |||
463 | if (dd->ipath_layer.l_arg) | 162 | if (dd->ipath_layer.l_arg) |
464 | continue; | 163 | continue; |
465 | 164 | ||
466 | if (!(*dd->ipath_statusp & IPATH_STATUS_SMA)) | ||
467 | *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA; | ||
468 | |||
469 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 165 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
470 | dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); | 166 | dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); |
471 | spin_lock_irqsave(&ipath_devs_lock, flags); | 167 | spin_lock_irqsave(&ipath_devs_lock, flags); |
@@ -509,107 +205,6 @@ void ipath_layer_unregister(void) | |||
509 | 205 | ||
510 | EXPORT_SYMBOL_GPL(ipath_layer_unregister); | 206 | EXPORT_SYMBOL_GPL(ipath_layer_unregister); |
511 | 207 | ||
512 | static void __ipath_verbs_timer(unsigned long arg) | ||
513 | { | ||
514 | struct ipath_devdata *dd = (struct ipath_devdata *) arg; | ||
515 | |||
516 | /* | ||
517 | * If port 0 receive packet interrupts are not available, or | ||
518 | * can be missed, poll the receive queue | ||
519 | */ | ||
520 | if (dd->ipath_flags & IPATH_POLL_RX_INTR) | ||
521 | ipath_kreceive(dd); | ||
522 | |||
523 | /* Handle verbs layer timeouts. */ | ||
524 | if (dd->verbs_layer.l_arg && verbs_timer_cb) | ||
525 | verbs_timer_cb(dd->verbs_layer.l_arg); | ||
526 | |||
527 | mod_timer(&dd->verbs_layer.l_timer, jiffies + 1); | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * ipath_verbs_register - verbs layer registration | ||
532 | * @l_piobufavail: callback for when PIO buffers become available | ||
533 | * @l_rcv: callback for receiving a packet | ||
534 | * @l_timer_cb: timer callback | ||
535 | * @ipath_devdata: device data structure is put here | ||
536 | */ | ||
537 | int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), | ||
538 | void (*l_remove)(void *arg), | ||
539 | int (*l_piobufavail) (void *arg), | ||
540 | void (*l_rcv) (void *arg, void *rhdr, | ||
541 | void *data, u32 tlen), | ||
542 | void (*l_timer_cb) (void *arg)) | ||
543 | { | ||
544 | struct ipath_devdata *dd, *tmp; | ||
545 | unsigned long flags; | ||
546 | |||
547 | mutex_lock(&ipath_layer_mutex); | ||
548 | |||
549 | verbs_add_one = l_add; | ||
550 | verbs_remove_one = l_remove; | ||
551 | verbs_piobufavail = l_piobufavail; | ||
552 | verbs_rcv = l_rcv; | ||
553 | verbs_timer_cb = l_timer_cb; | ||
554 | |||
555 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
556 | |||
557 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
558 | if (!(dd->ipath_flags & IPATH_INITTED)) | ||
559 | continue; | ||
560 | |||
561 | if (dd->verbs_layer.l_arg) | ||
562 | continue; | ||
563 | |||
564 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
565 | dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd); | ||
566 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
567 | } | ||
568 | |||
569 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
570 | mutex_unlock(&ipath_layer_mutex); | ||
571 | |||
572 | ipath_verbs_registered = 1; | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | EXPORT_SYMBOL_GPL(ipath_verbs_register); | ||
578 | |||
579 | void ipath_verbs_unregister(void) | ||
580 | { | ||
581 | struct ipath_devdata *dd, *tmp; | ||
582 | unsigned long flags; | ||
583 | |||
584 | mutex_lock(&ipath_layer_mutex); | ||
585 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
586 | |||
587 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
588 | *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; | ||
589 | |||
590 | if (dd->verbs_layer.l_arg && verbs_remove_one) { | ||
591 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
592 | verbs_remove_one(dd->verbs_layer.l_arg); | ||
593 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
594 | dd->verbs_layer.l_arg = NULL; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
599 | |||
600 | verbs_add_one = NULL; | ||
601 | verbs_remove_one = NULL; | ||
602 | verbs_piobufavail = NULL; | ||
603 | verbs_rcv = NULL; | ||
604 | verbs_timer_cb = NULL; | ||
605 | |||
606 | ipath_verbs_registered = 0; | ||
607 | |||
608 | mutex_unlock(&ipath_layer_mutex); | ||
609 | } | ||
610 | |||
611 | EXPORT_SYMBOL_GPL(ipath_verbs_unregister); | ||
612 | |||
613 | int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) | 208 | int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) |
614 | { | 209 | { |
615 | int ret; | 210 | int ret; |
@@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd) | |||
698 | 293 | ||
699 | EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); | 294 | EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); |
700 | 295 | ||
701 | u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd) | ||
702 | { | ||
703 | return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); | ||
704 | } | ||
705 | |||
706 | EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey); | ||
707 | |||
708 | static void update_sge(struct ipath_sge_state *ss, u32 length) | ||
709 | { | ||
710 | struct ipath_sge *sge = &ss->sge; | ||
711 | |||
712 | sge->vaddr += length; | ||
713 | sge->length -= length; | ||
714 | sge->sge_length -= length; | ||
715 | if (sge->sge_length == 0) { | ||
716 | if (--ss->num_sge) | ||
717 | *sge = *ss->sg_list++; | ||
718 | } else if (sge->length == 0 && sge->mr != NULL) { | ||
719 | if (++sge->n >= IPATH_SEGSZ) { | ||
720 | if (++sge->m >= sge->mr->mapsz) | ||
721 | return; | ||
722 | sge->n = 0; | ||
723 | } | ||
724 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
725 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | ||
726 | } | ||
727 | } | ||
728 | |||
729 | #ifdef __LITTLE_ENDIAN | ||
730 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
731 | { | ||
732 | return data >> shift; | ||
733 | } | ||
734 | |||
735 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
736 | { | ||
737 | return data << shift; | ||
738 | } | ||
739 | |||
740 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
741 | { | ||
742 | data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
743 | data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
744 | return data; | ||
745 | } | ||
746 | #else | ||
747 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
748 | { | ||
749 | return data << shift; | ||
750 | } | ||
751 | |||
752 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
753 | { | ||
754 | return data >> shift; | ||
755 | } | ||
756 | |||
757 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
758 | { | ||
759 | data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
760 | data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
761 | return data; | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, | ||
766 | u32 length) | ||
767 | { | ||
768 | u32 extra = 0; | ||
769 | u32 data = 0; | ||
770 | u32 last; | ||
771 | |||
772 | while (1) { | ||
773 | u32 len = ss->sge.length; | ||
774 | u32 off; | ||
775 | |||
776 | BUG_ON(len == 0); | ||
777 | if (len > length) | ||
778 | len = length; | ||
779 | if (len > ss->sge.sge_length) | ||
780 | len = ss->sge.sge_length; | ||
781 | /* If the source address is not aligned, try to align it. */ | ||
782 | off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); | ||
783 | if (off) { | ||
784 | u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & | ||
785 | ~(sizeof(u32) - 1)); | ||
786 | u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); | ||
787 | u32 y; | ||
788 | |||
789 | y = sizeof(u32) - off; | ||
790 | if (len > y) | ||
791 | len = y; | ||
792 | if (len + extra >= sizeof(u32)) { | ||
793 | data |= set_upper_bits(v, extra * | ||
794 | BITS_PER_BYTE); | ||
795 | len = sizeof(u32) - extra; | ||
796 | if (len == length) { | ||
797 | last = data; | ||
798 | break; | ||
799 | } | ||
800 | __raw_writel(data, piobuf); | ||
801 | piobuf++; | ||
802 | extra = 0; | ||
803 | data = 0; | ||
804 | } else { | ||
805 | /* Clear unused upper bytes */ | ||
806 | data |= clear_upper_bytes(v, len, extra); | ||
807 | if (len == length) { | ||
808 | last = data; | ||
809 | break; | ||
810 | } | ||
811 | extra += len; | ||
812 | } | ||
813 | } else if (extra) { | ||
814 | /* Source address is aligned. */ | ||
815 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
816 | int shift = extra * BITS_PER_BYTE; | ||
817 | int ushift = 32 - shift; | ||
818 | u32 l = len; | ||
819 | |||
820 | while (l >= sizeof(u32)) { | ||
821 | u32 v = *addr; | ||
822 | |||
823 | data |= set_upper_bits(v, shift); | ||
824 | __raw_writel(data, piobuf); | ||
825 | data = get_upper_bits(v, ushift); | ||
826 | piobuf++; | ||
827 | addr++; | ||
828 | l -= sizeof(u32); | ||
829 | } | ||
830 | /* | ||
831 | * We still have 'extra' number of bytes leftover. | ||
832 | */ | ||
833 | if (l) { | ||
834 | u32 v = *addr; | ||
835 | |||
836 | if (l + extra >= sizeof(u32)) { | ||
837 | data |= set_upper_bits(v, shift); | ||
838 | len -= l + extra - sizeof(u32); | ||
839 | if (len == length) { | ||
840 | last = data; | ||
841 | break; | ||
842 | } | ||
843 | __raw_writel(data, piobuf); | ||
844 | piobuf++; | ||
845 | extra = 0; | ||
846 | data = 0; | ||
847 | } else { | ||
848 | /* Clear unused upper bytes */ | ||
849 | data |= clear_upper_bytes(v, l, | ||
850 | extra); | ||
851 | if (len == length) { | ||
852 | last = data; | ||
853 | break; | ||
854 | } | ||
855 | extra += l; | ||
856 | } | ||
857 | } else if (len == length) { | ||
858 | last = data; | ||
859 | break; | ||
860 | } | ||
861 | } else if (len == length) { | ||
862 | u32 w; | ||
863 | |||
864 | /* | ||
865 | * Need to round up for the last dword in the | ||
866 | * packet. | ||
867 | */ | ||
868 | w = (len + 3) >> 2; | ||
869 | __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); | ||
870 | piobuf += w - 1; | ||
871 | last = ((u32 *) ss->sge.vaddr)[w - 1]; | ||
872 | break; | ||
873 | } else { | ||
874 | u32 w = len >> 2; | ||
875 | |||
876 | __iowrite32_copy(piobuf, ss->sge.vaddr, w); | ||
877 | piobuf += w; | ||
878 | |||
879 | extra = len & (sizeof(u32) - 1); | ||
880 | if (extra) { | ||
881 | u32 v = ((u32 *) ss->sge.vaddr)[w]; | ||
882 | |||
883 | /* Clear unused upper bytes */ | ||
884 | data = clear_upper_bytes(v, extra, 0); | ||
885 | } | ||
886 | } | ||
887 | update_sge(ss, len); | ||
888 | length -= len; | ||
889 | } | ||
890 | /* Update address before sending packet. */ | ||
891 | update_sge(ss, length); | ||
892 | /* must flush early everything before trigger word */ | ||
893 | ipath_flush_wc(); | ||
894 | __raw_writel(last, piobuf); | ||
895 | /* be sure trigger word is written */ | ||
896 | ipath_flush_wc(); | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * ipath_verbs_send - send a packet from the verbs layer | ||
901 | * @dd: the infinipath device | ||
902 | * @hdrwords: the number of words in the header | ||
903 | * @hdr: the packet header | ||
904 | * @len: the length of the packet in bytes | ||
905 | * @ss: the SGE to send | ||
906 | * | ||
907 | * This is like ipath_sma_send_pkt() in that we need to be able to send | ||
908 | * packets after the chip is initialized (MADs) but also like | ||
909 | * ipath_layer_send_hdr() since its used by the verbs layer. | ||
910 | */ | ||
911 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
912 | u32 *hdr, u32 len, struct ipath_sge_state *ss) | ||
913 | { | ||
914 | u32 __iomem *piobuf; | ||
915 | u32 plen; | ||
916 | int ret; | ||
917 | |||
918 | /* +1 is for the qword padding of pbc */ | ||
919 | plen = hdrwords + ((len + 3) >> 2) + 1; | ||
920 | if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { | ||
921 | ipath_dbg("packet len 0x%x too long, failing\n", plen); | ||
922 | ret = -EINVAL; | ||
923 | goto bail; | ||
924 | } | ||
925 | |||
926 | /* Get a PIO buffer to use. */ | ||
927 | piobuf = ipath_getpiobuf(dd, NULL); | ||
928 | if (unlikely(piobuf == NULL)) { | ||
929 | ret = -EBUSY; | ||
930 | goto bail; | ||
931 | } | ||
932 | |||
933 | /* | ||
934 | * Write len to control qword, no flags. | ||
935 | * We have to flush after the PBC for correctness on some cpus | ||
936 | * or WC buffer can be written out of order. | ||
937 | */ | ||
938 | writeq(plen, piobuf); | ||
939 | ipath_flush_wc(); | ||
940 | piobuf += 2; | ||
941 | if (len == 0) { | ||
942 | /* | ||
943 | * If there is just the header portion, must flush before | ||
944 | * writing last word of header for correctness, and after | ||
945 | * the last header word (trigger word). | ||
946 | */ | ||
947 | __iowrite32_copy(piobuf, hdr, hdrwords - 1); | ||
948 | ipath_flush_wc(); | ||
949 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); | ||
950 | ipath_flush_wc(); | ||
951 | ret = 0; | ||
952 | goto bail; | ||
953 | } | ||
954 | |||
955 | __iowrite32_copy(piobuf, hdr, hdrwords); | ||
956 | piobuf += hdrwords; | ||
957 | |||
958 | /* The common case is aligned and contained in one segment. */ | ||
959 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | ||
960 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | ||
961 | u32 w; | ||
962 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
963 | |||
964 | /* Update address before sending packet. */ | ||
965 | update_sge(ss, len); | ||
966 | /* Need to round up for the last dword in the packet. */ | ||
967 | w = (len + 3) >> 2; | ||
968 | __iowrite32_copy(piobuf, addr, w - 1); | ||
969 | /* must flush early everything before trigger word */ | ||
970 | ipath_flush_wc(); | ||
971 | __raw_writel(addr[w - 1], piobuf + w - 1); | ||
972 | /* be sure trigger word is written */ | ||
973 | ipath_flush_wc(); | ||
974 | ret = 0; | ||
975 | goto bail; | ||
976 | } | ||
977 | copy_io(piobuf, ss, len); | ||
978 | ret = 0; | ||
979 | |||
980 | bail: | ||
981 | return ret; | ||
982 | } | ||
983 | |||
984 | EXPORT_SYMBOL_GPL(ipath_verbs_send); | ||
985 | |||
986 | int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
987 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
988 | u64 *xmit_wait) | ||
989 | { | ||
990 | int ret; | ||
991 | |||
992 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
993 | /* no hardware, freeze, etc. */ | ||
994 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
995 | ret = -EINVAL; | ||
996 | goto bail; | ||
997 | } | ||
998 | *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
999 | *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
1000 | *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
1001 | *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
1002 | *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); | ||
1003 | |||
1004 | ret = 0; | ||
1005 | |||
1006 | bail: | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters); | ||
1011 | |||
1012 | /** | ||
1013 | * ipath_layer_get_counters - get various chip counters | ||
1014 | * @dd: the infinipath device | ||
1015 | * @cntrs: counters are placed here | ||
1016 | * | ||
1017 | * Return the counters needed by recv_pma_get_portcounters(). | ||
1018 | */ | ||
1019 | int ipath_layer_get_counters(struct ipath_devdata *dd, | ||
1020 | struct ipath_layer_counters *cntrs) | ||
1021 | { | ||
1022 | int ret; | ||
1023 | |||
1024 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
1025 | /* no hardware, freeze, etc. */ | ||
1026 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
1027 | ret = -EINVAL; | ||
1028 | goto bail; | ||
1029 | } | ||
1030 | cntrs->symbol_error_counter = | ||
1031 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1032 | cntrs->link_error_recovery_counter = | ||
1033 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1034 | /* | ||
1035 | * The link downed counter counts when the other side downs the | ||
1036 | * connection. We add in the number of times we downed the link | ||
1037 | * due to local link integrity errors to compensate. | ||
1038 | */ | ||
1039 | cntrs->link_downed_counter = | ||
1040 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); | ||
1041 | cntrs->port_rcv_errors = | ||
1042 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + | ||
1043 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + | ||
1044 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + | ||
1045 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + | ||
1046 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + | ||
1047 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + | ||
1048 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + | ||
1049 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + | ||
1050 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); | ||
1051 | cntrs->port_rcv_remphys_errors = | ||
1052 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); | ||
1053 | cntrs->port_xmit_discards = | ||
1054 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); | ||
1055 | cntrs->port_xmit_data = | ||
1056 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
1057 | cntrs->port_rcv_data = | ||
1058 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
1059 | cntrs->port_xmit_packets = | ||
1060 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
1061 | cntrs->port_rcv_packets = | ||
1062 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
1063 | cntrs->local_link_integrity_errors = dd->ipath_lli_errors; | ||
1064 | cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ | ||
1065 | |||
1066 | ret = 0; | ||
1067 | |||
1068 | bail: | ||
1069 | return ret; | ||
1070 | } | ||
1071 | |||
1072 | EXPORT_SYMBOL_GPL(ipath_layer_get_counters); | ||
1073 | |||
1074 | int ipath_layer_want_buffer(struct ipath_devdata *dd) | ||
1075 | { | ||
1076 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | ||
1077 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
1078 | dd->ipath_sendctrl); | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | EXPORT_SYMBOL_GPL(ipath_layer_want_buffer); | ||
1084 | |||
1085 | int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) | 296 | int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) |
1086 | { | 297 | { |
1087 | int ret = 0; | 298 | int ret = 0; |
@@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd) | |||
1153 | } | 364 | } |
1154 | 365 | ||
1155 | EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); | 366 | EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); |
1156 | |||
1157 | int ipath_layer_enable_timer(struct ipath_devdata *dd) | ||
1158 | { | ||
1159 | /* | ||
1160 | * HT-400 has a design flaw where the chip and kernel idea | ||
1161 | * of the tail register don't always agree, and therefore we won't | ||
1162 | * get an interrupt on the next packet received. | ||
1163 | * If the board supports per packet receive interrupts, use it. | ||
1164 | * Otherwise, the timer function periodically checks for packets | ||
1165 | * to cover this case. | ||
1166 | * Either way, the timer is needed for verbs layer related | ||
1167 | * processing. | ||
1168 | */ | ||
1169 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | ||
1170 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | ||
1171 | 0x2074076542310ULL); | ||
1172 | /* Enable GPIO bit 2 interrupt */ | ||
1173 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, | ||
1174 | (u64) (1 << 2)); | ||
1175 | } | ||
1176 | |||
1177 | init_timer(&dd->verbs_layer.l_timer); | ||
1178 | dd->verbs_layer.l_timer.function = __ipath_verbs_timer; | ||
1179 | dd->verbs_layer.l_timer.data = (unsigned long)dd; | ||
1180 | dd->verbs_layer.l_timer.expires = jiffies + 1; | ||
1181 | add_timer(&dd->verbs_layer.l_timer); | ||
1182 | |||
1183 | return 0; | ||
1184 | } | ||
1185 | |||
1186 | EXPORT_SYMBOL_GPL(ipath_layer_enable_timer); | ||
1187 | |||
1188 | int ipath_layer_disable_timer(struct ipath_devdata *dd) | ||
1189 | { | ||
1190 | /* Disable GPIO bit 2 interrupt */ | ||
1191 | if (dd->ipath_flags & IPATH_GPIO_INTR) | ||
1192 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); | ||
1193 | |||
1194 | del_timer_sync(&dd->verbs_layer.l_timer); | ||
1195 | |||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | EXPORT_SYMBOL_GPL(ipath_layer_disable_timer); | ||
1200 | |||
1201 | /** | ||
1202 | * ipath_layer_set_verbs_flags - set the verbs layer flags | ||
1203 | * @dd: the infinipath device | ||
1204 | * @flags: the flags to set | ||
1205 | */ | ||
1206 | int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags) | ||
1207 | { | ||
1208 | struct ipath_devdata *ss; | ||
1209 | unsigned long lflags; | ||
1210 | |||
1211 | spin_lock_irqsave(&ipath_devs_lock, lflags); | ||
1212 | |||
1213 | list_for_each_entry(ss, &ipath_dev_list, ipath_list) { | ||
1214 | if (!(ss->ipath_flags & IPATH_INITTED)) | ||
1215 | continue; | ||
1216 | if ((flags & IPATH_VERBS_KERNEL_SMA) && | ||
1217 | !(*ss->ipath_statusp & IPATH_STATUS_SMA)) | ||
1218 | *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA; | ||
1219 | else | ||
1220 | *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; | ||
1221 | } | ||
1222 | |||
1223 | spin_unlock_irqrestore(&ipath_devs_lock, lflags); | ||
1224 | |||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags); | ||
1229 | |||
1230 | /** | ||
1231 | * ipath_layer_get_npkeys - return the size of the PKEY table for port 0 | ||
1232 | * @dd: the infinipath device | ||
1233 | */ | ||
1234 | unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd) | ||
1235 | { | ||
1236 | return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); | ||
1237 | } | ||
1238 | |||
1239 | EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys); | ||
1240 | |||
1241 | /** | ||
1242 | * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table | ||
1243 | * @dd: the infinipath device | ||
1244 | * @index: the PKEY index | ||
1245 | */ | ||
1246 | unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index) | ||
1247 | { | ||
1248 | unsigned ret; | ||
1249 | |||
1250 | if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) | ||
1251 | ret = 0; | ||
1252 | else | ||
1253 | ret = dd->ipath_pd[0]->port_pkeys[index]; | ||
1254 | |||
1255 | return ret; | ||
1256 | } | ||
1257 | |||
1258 | EXPORT_SYMBOL_GPL(ipath_layer_get_pkey); | ||
1259 | |||
1260 | /** | ||
1261 | * ipath_layer_get_pkeys - return the PKEY table for port 0 | ||
1262 | * @dd: the infinipath device | ||
1263 | * @pkeys: the pkey table is placed here | ||
1264 | */ | ||
1265 | int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys) | ||
1266 | { | ||
1267 | struct ipath_portdata *pd = dd->ipath_pd[0]; | ||
1268 | |||
1269 | memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); | ||
1270 | |||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys); | ||
1275 | |||
1276 | /** | ||
1277 | * rm_pkey - decrecment the reference count for the given PKEY | ||
1278 | * @dd: the infinipath device | ||
1279 | * @key: the PKEY index | ||
1280 | * | ||
1281 | * Return true if this was the last reference and the hardware table entry | ||
1282 | * needs to be changed. | ||
1283 | */ | ||
1284 | static int rm_pkey(struct ipath_devdata *dd, u16 key) | ||
1285 | { | ||
1286 | int i; | ||
1287 | int ret; | ||
1288 | |||
1289 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1290 | if (dd->ipath_pkeys[i] != key) | ||
1291 | continue; | ||
1292 | if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { | ||
1293 | dd->ipath_pkeys[i] = 0; | ||
1294 | ret = 1; | ||
1295 | goto bail; | ||
1296 | } | ||
1297 | break; | ||
1298 | } | ||
1299 | |||
1300 | ret = 0; | ||
1301 | |||
1302 | bail: | ||
1303 | return ret; | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1307 | * add_pkey - add the given PKEY to the hardware table | ||
1308 | * @dd: the infinipath device | ||
1309 | * @key: the PKEY | ||
1310 | * | ||
1311 | * Return an error code if unable to add the entry, zero if no change, | ||
1312 | * or 1 if the hardware PKEY register needs to be updated. | ||
1313 | */ | ||
1314 | static int add_pkey(struct ipath_devdata *dd, u16 key) | ||
1315 | { | ||
1316 | int i; | ||
1317 | u16 lkey = key & 0x7FFF; | ||
1318 | int any = 0; | ||
1319 | int ret; | ||
1320 | |||
1321 | if (lkey == 0x7FFF) { | ||
1322 | ret = 0; | ||
1323 | goto bail; | ||
1324 | } | ||
1325 | |||
1326 | /* Look for an empty slot or a matching PKEY. */ | ||
1327 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1328 | if (!dd->ipath_pkeys[i]) { | ||
1329 | any++; | ||
1330 | continue; | ||
1331 | } | ||
1332 | /* If it matches exactly, try to increment the ref count */ | ||
1333 | if (dd->ipath_pkeys[i] == key) { | ||
1334 | if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { | ||
1335 | ret = 0; | ||
1336 | goto bail; | ||
1337 | } | ||
1338 | /* Lost the race. Look for an empty slot below. */ | ||
1339 | atomic_dec(&dd->ipath_pkeyrefs[i]); | ||
1340 | any++; | ||
1341 | } | ||
1342 | /* | ||
1343 | * It makes no sense to have both the limited and unlimited | ||
1344 | * PKEY set at the same time since the unlimited one will | ||
1345 | * disable the limited one. | ||
1346 | */ | ||
1347 | if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { | ||
1348 | ret = -EEXIST; | ||
1349 | goto bail; | ||
1350 | } | ||
1351 | } | ||
1352 | if (!any) { | ||
1353 | ret = -EBUSY; | ||
1354 | goto bail; | ||
1355 | } | ||
1356 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1357 | if (!dd->ipath_pkeys[i] && | ||
1358 | atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { | ||
1359 | /* for ipathstats, etc. */ | ||
1360 | ipath_stats.sps_pkeys[i] = lkey; | ||
1361 | dd->ipath_pkeys[i] = key; | ||
1362 | ret = 1; | ||
1363 | goto bail; | ||
1364 | } | ||
1365 | } | ||
1366 | ret = -EBUSY; | ||
1367 | |||
1368 | bail: | ||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | /** | ||
1373 | * ipath_layer_set_pkeys - set the PKEY table for port 0 | ||
1374 | * @dd: the infinipath device | ||
1375 | * @pkeys: the PKEY table | ||
1376 | */ | ||
1377 | int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys) | ||
1378 | { | ||
1379 | struct ipath_portdata *pd; | ||
1380 | int i; | ||
1381 | int changed = 0; | ||
1382 | |||
1383 | pd = dd->ipath_pd[0]; | ||
1384 | |||
1385 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | ||
1386 | u16 key = pkeys[i]; | ||
1387 | u16 okey = pd->port_pkeys[i]; | ||
1388 | |||
1389 | if (key == okey) | ||
1390 | continue; | ||
1391 | /* | ||
1392 | * The value of this PKEY table entry is changing. | ||
1393 | * Remove the old entry in the hardware's array of PKEYs. | ||
1394 | */ | ||
1395 | if (okey & 0x7FFF) | ||
1396 | changed |= rm_pkey(dd, okey); | ||
1397 | if (key & 0x7FFF) { | ||
1398 | int ret = add_pkey(dd, key); | ||
1399 | |||
1400 | if (ret < 0) | ||
1401 | key = 0; | ||
1402 | else | ||
1403 | changed |= ret; | ||
1404 | } | ||
1405 | pd->port_pkeys[i] = key; | ||
1406 | } | ||
1407 | if (changed) { | ||
1408 | u64 pkey; | ||
1409 | |||
1410 | pkey = (u64) dd->ipath_pkeys[0] | | ||
1411 | ((u64) dd->ipath_pkeys[1] << 16) | | ||
1412 | ((u64) dd->ipath_pkeys[2] << 32) | | ||
1413 | ((u64) dd->ipath_pkeys[3] << 48); | ||
1414 | ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", | ||
1415 | (unsigned long long) pkey); | ||
1416 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | ||
1417 | pkey); | ||
1418 | } | ||
1419 | return 0; | ||
1420 | } | ||
1421 | |||
1422 | EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys); | ||
1423 | |||
1424 | /** | ||
1425 | * ipath_layer_get_linkdowndefaultstate - get the default linkdown state | ||
1426 | * @dd: the infinipath device | ||
1427 | * | ||
1428 | * Returns zero if the default is POLL, 1 if the default is SLEEP. | ||
1429 | */ | ||
1430 | int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd) | ||
1431 | { | ||
1432 | return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); | ||
1433 | } | ||
1434 | |||
1435 | EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate); | ||
1436 | |||
1437 | /** | ||
1438 | * ipath_layer_set_linkdowndefaultstate - set the default linkdown state | ||
1439 | * @dd: the infinipath device | ||
1440 | * @sleep: the new state | ||
1441 | * | ||
1442 | * Note that this will only take effect when the link state changes. | ||
1443 | */ | ||
1444 | int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, | ||
1445 | int sleep) | ||
1446 | { | ||
1447 | if (sleep) | ||
1448 | dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
1449 | else | ||
1450 | dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
1451 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1452 | dd->ipath_ibcctrl); | ||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1456 | EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate); | ||
1457 | |||
1458 | int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd) | ||
1459 | { | ||
1460 | return (dd->ipath_ibcctrl >> | ||
1461 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
1462 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
1463 | } | ||
1464 | |||
1465 | EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold); | ||
1466 | |||
1467 | /** | ||
1468 | * ipath_layer_set_phyerrthreshold - set the physical error threshold | ||
1469 | * @dd: the infinipath device | ||
1470 | * @n: the new threshold | ||
1471 | * | ||
1472 | * Note that this will only take effect when the link state changes. | ||
1473 | */ | ||
1474 | int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) | ||
1475 | { | ||
1476 | unsigned v; | ||
1477 | |||
1478 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
1479 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
1480 | if (v != n) { | ||
1481 | dd->ipath_ibcctrl &= | ||
1482 | ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << | ||
1483 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); | ||
1484 | dd->ipath_ibcctrl |= | ||
1485 | (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; | ||
1486 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1487 | dd->ipath_ibcctrl); | ||
1488 | } | ||
1489 | return 0; | ||
1490 | } | ||
1491 | |||
1492 | EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold); | ||
1493 | |||
1494 | int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd) | ||
1495 | { | ||
1496 | return (dd->ipath_ibcctrl >> | ||
1497 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
1498 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
1499 | } | ||
1500 | |||
1501 | EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold); | ||
1502 | |||
1503 | /** | ||
1504 | * ipath_layer_set_overrunthreshold - set the overrun threshold | ||
1505 | * @dd: the infinipath device | ||
1506 | * @n: the new threshold | ||
1507 | * | ||
1508 | * Note that this will only take effect when the link state changes. | ||
1509 | */ | ||
1510 | int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n) | ||
1511 | { | ||
1512 | unsigned v; | ||
1513 | |||
1514 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
1515 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
1516 | if (v != n) { | ||
1517 | dd->ipath_ibcctrl &= | ||
1518 | ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << | ||
1519 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); | ||
1520 | dd->ipath_ibcctrl |= | ||
1521 | (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; | ||
1522 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1523 | dd->ipath_ibcctrl); | ||
1524 | } | ||
1525 | return 0; | ||
1526 | } | ||
1527 | |||
1528 | EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold); | ||
1529 | |||
1530 | int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, | ||
1531 | size_t namelen) | ||
1532 | { | ||
1533 | return dd->ipath_f_get_boardname(dd, name, namelen); | ||
1534 | } | ||
1535 | EXPORT_SYMBOL_GPL(ipath_layer_get_boardname); | ||
1536 | |||
1537 | u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd) | ||
1538 | { | ||
1539 | return dd->ipath_rcvhdrentsize; | ||
1540 | } | ||
1541 | EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize); | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h index 71485096fcac..3854a4eae684 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.h +++ b/drivers/infiniband/hw/ipath/ipath_layer.h | |||
@@ -40,73 +40,9 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | struct sk_buff; | 42 | struct sk_buff; |
43 | struct ipath_sge_state; | ||
44 | struct ipath_devdata; | 43 | struct ipath_devdata; |
45 | struct ether_header; | 44 | struct ether_header; |
46 | 45 | ||
47 | struct ipath_layer_counters { | ||
48 | u64 symbol_error_counter; | ||
49 | u64 link_error_recovery_counter; | ||
50 | u64 link_downed_counter; | ||
51 | u64 port_rcv_errors; | ||
52 | u64 port_rcv_remphys_errors; | ||
53 | u64 port_xmit_discards; | ||
54 | u64 port_xmit_data; | ||
55 | u64 port_rcv_data; | ||
56 | u64 port_xmit_packets; | ||
57 | u64 port_rcv_packets; | ||
58 | u32 local_link_integrity_errors; | ||
59 | u32 excessive_buffer_overrun_errors; | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * A segment is a linear region of low physical memory. | ||
64 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | ||
65 | * Used by the verbs layer. | ||
66 | */ | ||
67 | struct ipath_seg { | ||
68 | void *vaddr; | ||
69 | size_t length; | ||
70 | }; | ||
71 | |||
72 | /* The number of ipath_segs that fit in a page. */ | ||
73 | #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) | ||
74 | |||
75 | struct ipath_segarray { | ||
76 | struct ipath_seg segs[IPATH_SEGSZ]; | ||
77 | }; | ||
78 | |||
79 | struct ipath_mregion { | ||
80 | u64 user_base; /* User's address for this region */ | ||
81 | u64 iova; /* IB start address of this region */ | ||
82 | size_t length; | ||
83 | u32 lkey; | ||
84 | u32 offset; /* offset (bytes) to start of region */ | ||
85 | int access_flags; | ||
86 | u32 max_segs; /* number of ipath_segs in all the arrays */ | ||
87 | u32 mapsz; /* size of the map array */ | ||
88 | struct ipath_segarray *map[0]; /* the segments */ | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * These keep track of the copy progress within a memory region. | ||
93 | * Used by the verbs layer. | ||
94 | */ | ||
95 | struct ipath_sge { | ||
96 | struct ipath_mregion *mr; | ||
97 | void *vaddr; /* current pointer into the segment */ | ||
98 | u32 sge_length; /* length of the SGE */ | ||
99 | u32 length; /* remaining length of the segment */ | ||
100 | u16 m; /* current index: mr->map[m] */ | ||
101 | u16 n; /* current index: mr->map[m]->segs[n] */ | ||
102 | }; | ||
103 | |||
104 | struct ipath_sge_state { | ||
105 | struct ipath_sge *sg_list; /* next SGE to be used if any */ | ||
106 | struct ipath_sge sge; /* progress state for the current SGE */ | ||
107 | u8 num_sge; | ||
108 | }; | ||
109 | |||
110 | int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), | 46 | int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), |
111 | void (*l_remove)(void *), | 47 | void (*l_remove)(void *), |
112 | int (*l_intr)(void *, u32), | 48 | int (*l_intr)(void *, u32), |
@@ -114,62 +50,14 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), | |||
114 | struct sk_buff *), | 50 | struct sk_buff *), |
115 | u16 rcv_opcode, | 51 | u16 rcv_opcode, |
116 | int (*l_rcv_lid)(void *, void *)); | 52 | int (*l_rcv_lid)(void *, void *)); |
117 | int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), | ||
118 | void (*l_remove)(void *arg), | ||
119 | int (*l_piobufavail)(void *arg), | ||
120 | void (*l_rcv)(void *arg, void *rhdr, | ||
121 | void *data, u32 tlen), | ||
122 | void (*l_timer_cb)(void *arg)); | ||
123 | void ipath_layer_unregister(void); | 53 | void ipath_layer_unregister(void); |
124 | void ipath_verbs_unregister(void); | ||
125 | int ipath_layer_open(struct ipath_devdata *, u32 * pktmax); | 54 | int ipath_layer_open(struct ipath_devdata *, u32 * pktmax); |
126 | u16 ipath_layer_get_lid(struct ipath_devdata *dd); | 55 | u16 ipath_layer_get_lid(struct ipath_devdata *dd); |
127 | int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *); | 56 | int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *); |
128 | u16 ipath_layer_get_bcast(struct ipath_devdata *dd); | 57 | u16 ipath_layer_get_bcast(struct ipath_devdata *dd); |
129 | u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd); | ||
130 | int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state); | ||
131 | int ipath_layer_set_mtu(struct ipath_devdata *, u16); | ||
132 | int ipath_set_lid(struct ipath_devdata *, u32, u8); | ||
133 | int ipath_layer_send_hdr(struct ipath_devdata *dd, | 58 | int ipath_layer_send_hdr(struct ipath_devdata *dd, |
134 | struct ether_header *hdr); | 59 | struct ether_header *hdr); |
135 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
136 | u32 * hdr, u32 len, struct ipath_sge_state *ss); | ||
137 | int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd); | 60 | int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd); |
138 | int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, | ||
139 | size_t namelen); | ||
140 | int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
141 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
142 | u64 *xmit_wait); | ||
143 | int ipath_layer_get_counters(struct ipath_devdata *dd, | ||
144 | struct ipath_layer_counters *cntrs); | ||
145 | int ipath_layer_want_buffer(struct ipath_devdata *dd); | ||
146 | int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid); | ||
147 | __be64 ipath_layer_get_guid(struct ipath_devdata *); | ||
148 | u32 ipath_layer_get_nguid(struct ipath_devdata *); | ||
149 | u32 ipath_layer_get_majrev(struct ipath_devdata *); | ||
150 | u32 ipath_layer_get_minrev(struct ipath_devdata *); | ||
151 | u32 ipath_layer_get_pcirev(struct ipath_devdata *); | ||
152 | u32 ipath_layer_get_flags(struct ipath_devdata *dd); | ||
153 | struct device *ipath_layer_get_device(struct ipath_devdata *dd); | ||
154 | u16 ipath_layer_get_deviceid(struct ipath_devdata *dd); | ||
155 | u32 ipath_layer_get_vendorid(struct ipath_devdata *); | ||
156 | u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd); | ||
157 | u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd); | ||
158 | int ipath_layer_enable_timer(struct ipath_devdata *dd); | ||
159 | int ipath_layer_disable_timer(struct ipath_devdata *dd); | ||
160 | int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags); | ||
161 | unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd); | ||
162 | unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index); | ||
163 | int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys); | ||
164 | int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys); | ||
165 | int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd); | ||
166 | int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, | ||
167 | int sleep); | ||
168 | int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd); | ||
169 | int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n); | ||
170 | int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd); | ||
171 | int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n); | ||
172 | u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd); | ||
173 | 61 | ||
174 | /* ipath_ether interrupt values */ | 62 | /* ipath_ether interrupt values */ |
175 | #define IPATH_LAYER_INT_IF_UP 0x2 | 63 | #define IPATH_LAYER_INT_IF_UP 0x2 |
@@ -178,9 +66,6 @@ u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd); | |||
178 | #define IPATH_LAYER_INT_SEND_CONTINUE 0x10 | 66 | #define IPATH_LAYER_INT_SEND_CONTINUE 0x10 |
179 | #define IPATH_LAYER_INT_BCAST 0x40 | 67 | #define IPATH_LAYER_INT_BCAST 0x40 |
180 | 68 | ||
181 | /* _verbs_layer.l_flags */ | ||
182 | #define IPATH_VERBS_KERNEL_SMA 0x1 | ||
183 | |||
184 | extern unsigned ipath_debug; /* debugging bit mask */ | 69 | extern unsigned ipath_debug; /* debugging bit mask */ |
185 | 70 | ||
186 | #endif /* _IPATH_LAYER_H */ | 71 | #endif /* _IPATH_LAYER_H */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index d3402341b7d0..72d1db89db8f 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, | |||
101 | nip->num_ports = ibdev->phys_port_cnt; | 101 | nip->num_ports = ibdev->phys_port_cnt; |
102 | /* This is already in network order */ | 102 | /* This is already in network order */ |
103 | nip->sys_guid = to_idev(ibdev)->sys_image_guid; | 103 | nip->sys_guid = to_idev(ibdev)->sys_image_guid; |
104 | nip->node_guid = ipath_layer_get_guid(dd); | 104 | nip->node_guid = dd->ipath_guid; |
105 | nip->port_guid = nip->sys_guid; | 105 | nip->port_guid = nip->sys_guid; |
106 | nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd)); | 106 | nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); |
107 | nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd)); | 107 | nip->device_id = cpu_to_be16(dd->ipath_deviceid); |
108 | majrev = ipath_layer_get_majrev(dd); | 108 | majrev = dd->ipath_majrev; |
109 | minrev = ipath_layer_get_minrev(dd); | 109 | minrev = dd->ipath_minrev; |
110 | nip->revision = cpu_to_be32((majrev << 16) | minrev); | 110 | nip->revision = cpu_to_be32((majrev << 16) | minrev); |
111 | nip->local_port_num = port; | 111 | nip->local_port_num = port; |
112 | vendor = ipath_layer_get_vendorid(dd); | 112 | vendor = dd->ipath_vendorid; |
113 | nip->vendor_id[0] = 0; | 113 | nip->vendor_id[0] = 0; |
114 | nip->vendor_id[1] = vendor >> 8; | 114 | nip->vendor_id[1] = vendor >> 8; |
115 | nip->vendor_id[2] = vendor; | 115 | nip->vendor_id[2] = vendor; |
@@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp, | |||
133 | */ | 133 | */ |
134 | if (startgx == 0) | 134 | if (startgx == 0) |
135 | /* The first is a copy of the read-only HW GUID. */ | 135 | /* The first is a copy of the read-only HW GUID. */ |
136 | *p = ipath_layer_get_guid(to_idev(ibdev)->dd); | 136 | *p = to_idev(ibdev)->dd->ipath_guid; |
137 | else | 137 | else |
138 | smp->status |= IB_SMP_INVALID_FIELD; | 138 | smp->status |= IB_SMP_INVALID_FIELD; |
139 | 139 | ||
140 | return reply(smp); | 140 | return reply(smp); |
141 | } | 141 | } |
142 | 142 | ||
143 | |||
144 | static int get_overrunthreshold(struct ipath_devdata *dd) | ||
145 | { | ||
146 | return (dd->ipath_ibcctrl >> | ||
147 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
148 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * set_overrunthreshold - set the overrun threshold | ||
153 | * @dd: the infinipath device | ||
154 | * @n: the new threshold | ||
155 | * | ||
156 | * Note that this will only take effect when the link state changes. | ||
157 | */ | ||
158 | static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n) | ||
159 | { | ||
160 | unsigned v; | ||
161 | |||
162 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
163 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
164 | if (v != n) { | ||
165 | dd->ipath_ibcctrl &= | ||
166 | ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << | ||
167 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); | ||
168 | dd->ipath_ibcctrl |= | ||
169 | (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; | ||
170 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
171 | dd->ipath_ibcctrl); | ||
172 | } | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int get_phyerrthreshold(struct ipath_devdata *dd) | ||
177 | { | ||
178 | return (dd->ipath_ibcctrl >> | ||
179 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
180 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * set_phyerrthreshold - set the physical error threshold | ||
185 | * @dd: the infinipath device | ||
186 | * @n: the new threshold | ||
187 | * | ||
188 | * Note that this will only take effect when the link state changes. | ||
189 | */ | ||
190 | static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) | ||
191 | { | ||
192 | unsigned v; | ||
193 | |||
194 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
195 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
196 | if (v != n) { | ||
197 | dd->ipath_ibcctrl &= | ||
198 | ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << | ||
199 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); | ||
200 | dd->ipath_ibcctrl |= | ||
201 | (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; | ||
202 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
203 | dd->ipath_ibcctrl); | ||
204 | } | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * get_linkdowndefaultstate - get the default linkdown state | ||
210 | * @dd: the infinipath device | ||
211 | * | ||
212 | * Returns zero if the default is POLL, 1 if the default is SLEEP. | ||
213 | */ | ||
214 | static int get_linkdowndefaultstate(struct ipath_devdata *dd) | ||
215 | { | ||
216 | return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); | ||
217 | } | ||
218 | |||
143 | static int recv_subn_get_portinfo(struct ib_smp *smp, | 219 | static int recv_subn_get_portinfo(struct ib_smp *smp, |
144 | struct ib_device *ibdev, u8 port) | 220 | struct ib_device *ibdev, u8 port) |
145 | { | 221 | { |
@@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, | |||
166 | (dev->mkeyprot_resv_lmc >> 6) == 0) | 242 | (dev->mkeyprot_resv_lmc >> 6) == 0) |
167 | pip->mkey = dev->mkey; | 243 | pip->mkey = dev->mkey; |
168 | pip->gid_prefix = dev->gid_prefix; | 244 | pip->gid_prefix = dev->gid_prefix; |
169 | lid = ipath_layer_get_lid(dev->dd); | 245 | lid = dev->dd->ipath_lid; |
170 | pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; | 246 | pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; |
171 | pip->sm_lid = cpu_to_be16(dev->sm_lid); | 247 | pip->sm_lid = cpu_to_be16(dev->sm_lid); |
172 | pip->cap_mask = cpu_to_be32(dev->port_cap_flags); | 248 | pip->cap_mask = cpu_to_be32(dev->port_cap_flags); |
@@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, | |||
177 | pip->link_width_supported = 3; /* 1x or 4x */ | 253 | pip->link_width_supported = 3; /* 1x or 4x */ |
178 | pip->link_width_active = 2; /* 4x */ | 254 | pip->link_width_active = 2; /* 4x */ |
179 | pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ | 255 | pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ |
180 | ibcstat = ipath_layer_get_lastibcstat(dev->dd); | 256 | ibcstat = dev->dd->ipath_lastibcstat; |
181 | pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; | 257 | pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; |
182 | pip->portphysstate_linkdown = | 258 | pip->portphysstate_linkdown = |
183 | (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | | 259 | (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | |
184 | (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2); | 260 | (get_linkdowndefaultstate(dev->dd) ? 1 : 2); |
185 | pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; | 261 | pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; |
186 | pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ | 262 | pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ |
187 | switch (ipath_layer_get_ibmtu(dev->dd)) { | 263 | switch (dev->dd->ipath_ibmtu) { |
188 | case 4096: | 264 | case 4096: |
189 | mtu = IB_MTU_4096; | 265 | mtu = IB_MTU_4096; |
190 | break; | 266 | break; |
@@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, | |||
217 | pip->mkey_violations = cpu_to_be16(dev->mkey_violations); | 293 | pip->mkey_violations = cpu_to_be16(dev->mkey_violations); |
218 | /* P_KeyViolations are counted by hardware. */ | 294 | /* P_KeyViolations are counted by hardware. */ |
219 | pip->pkey_violations = | 295 | pip->pkey_violations = |
220 | cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) - | 296 | cpu_to_be16((ipath_get_cr_errpkey(dev->dd) - |
221 | dev->z_pkey_violations) & 0xFFFF); | 297 | dev->z_pkey_violations) & 0xFFFF); |
222 | pip->qkey_violations = cpu_to_be16(dev->qkey_violations); | 298 | pip->qkey_violations = cpu_to_be16(dev->qkey_violations); |
223 | /* Only the hardware GUID is supported for now */ | 299 | /* Only the hardware GUID is supported for now */ |
@@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, | |||
226 | /* 32.768 usec. response time (guessing) */ | 302 | /* 32.768 usec. response time (guessing) */ |
227 | pip->resv_resptimevalue = 3; | 303 | pip->resv_resptimevalue = 3; |
228 | pip->localphyerrors_overrunerrors = | 304 | pip->localphyerrors_overrunerrors = |
229 | (ipath_layer_get_phyerrthreshold(dev->dd) << 4) | | 305 | (get_phyerrthreshold(dev->dd) << 4) | |
230 | ipath_layer_get_overrunthreshold(dev->dd); | 306 | get_overrunthreshold(dev->dd); |
231 | /* pip->max_credit_hint; */ | 307 | /* pip->max_credit_hint; */ |
232 | /* pip->link_roundtrip_latency[3]; */ | 308 | /* pip->link_roundtrip_latency[3]; */ |
233 | 309 | ||
@@ -237,6 +313,20 @@ bail: | |||
237 | return ret; | 313 | return ret; |
238 | } | 314 | } |
239 | 315 | ||
316 | /** | ||
317 | * get_pkeys - return the PKEY table for port 0 | ||
318 | * @dd: the infinipath device | ||
319 | * @pkeys: the pkey table is placed here | ||
320 | */ | ||
321 | static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) | ||
322 | { | ||
323 | struct ipath_portdata *pd = dd->ipath_pd[0]; | ||
324 | |||
325 | memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
240 | static int recv_subn_get_pkeytable(struct ib_smp *smp, | 330 | static int recv_subn_get_pkeytable(struct ib_smp *smp, |
241 | struct ib_device *ibdev) | 331 | struct ib_device *ibdev) |
242 | { | 332 | { |
@@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struct ib_smp *smp, | |||
249 | memset(smp->data, 0, sizeof(smp->data)); | 339 | memset(smp->data, 0, sizeof(smp->data)); |
250 | if (startpx == 0) { | 340 | if (startpx == 0) { |
251 | struct ipath_ibdev *dev = to_idev(ibdev); | 341 | struct ipath_ibdev *dev = to_idev(ibdev); |
252 | unsigned i, n = ipath_layer_get_npkeys(dev->dd); | 342 | unsigned i, n = ipath_get_npkeys(dev->dd); |
253 | 343 | ||
254 | ipath_layer_get_pkeys(dev->dd, p); | 344 | get_pkeys(dev->dd, p); |
255 | 345 | ||
256 | for (i = 0; i < n; i++) | 346 | for (i = 0; i < n; i++) |
257 | q[i] = cpu_to_be16(p[i]); | 347 | q[i] = cpu_to_be16(p[i]); |
@@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp, | |||
269 | } | 359 | } |
270 | 360 | ||
271 | /** | 361 | /** |
362 | * set_linkdowndefaultstate - set the default linkdown state | ||
363 | * @dd: the infinipath device | ||
364 | * @sleep: the new state | ||
365 | * | ||
366 | * Note that this will only take effect when the link state changes. | ||
367 | */ | ||
368 | static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep) | ||
369 | { | ||
370 | if (sleep) | ||
371 | dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
372 | else | ||
373 | dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
374 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
375 | dd->ipath_ibcctrl); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | /** | ||
272 | * recv_subn_set_portinfo - set port information | 380 | * recv_subn_set_portinfo - set port information |
273 | * @smp: the incoming SM packet | 381 | * @smp: the incoming SM packet |
274 | * @ibdev: the infiniband device | 382 | * @ibdev: the infiniband device |
@@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
290 | u8 state; | 398 | u8 state; |
291 | u16 lstate; | 399 | u16 lstate; |
292 | u32 mtu; | 400 | u32 mtu; |
293 | int ret; | 401 | int ret, ore; |
294 | 402 | ||
295 | if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) | 403 | if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) |
296 | goto err; | 404 | goto err; |
@@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
304 | dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); | 412 | dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); |
305 | 413 | ||
306 | lid = be16_to_cpu(pip->lid); | 414 | lid = be16_to_cpu(pip->lid); |
307 | if (lid != ipath_layer_get_lid(dev->dd)) { | 415 | if (lid != dev->dd->ipath_lid) { |
308 | /* Must be a valid unicast LID address. */ | 416 | /* Must be a valid unicast LID address. */ |
309 | if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) | 417 | if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) |
310 | goto err; | 418 | goto err; |
@@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
342 | case 0: /* NOP */ | 450 | case 0: /* NOP */ |
343 | break; | 451 | break; |
344 | case 1: /* SLEEP */ | 452 | case 1: /* SLEEP */ |
345 | if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1)) | 453 | if (set_linkdowndefaultstate(dev->dd, 1)) |
346 | goto err; | 454 | goto err; |
347 | break; | 455 | break; |
348 | case 2: /* POLL */ | 456 | case 2: /* POLL */ |
349 | if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0)) | 457 | if (set_linkdowndefaultstate(dev->dd, 0)) |
350 | goto err; | 458 | goto err; |
351 | break; | 459 | break; |
352 | default: | 460 | default: |
@@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
376 | /* XXX We have already partially updated our state! */ | 484 | /* XXX We have already partially updated our state! */ |
377 | goto err; | 485 | goto err; |
378 | } | 486 | } |
379 | ipath_layer_set_mtu(dev->dd, mtu); | 487 | ipath_set_mtu(dev->dd, mtu); |
380 | 488 | ||
381 | dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; | 489 | dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; |
382 | 490 | ||
@@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
392 | * later. | 500 | * later. |
393 | */ | 501 | */ |
394 | if (pip->pkey_violations == 0) | 502 | if (pip->pkey_violations == 0) |
395 | dev->z_pkey_violations = | 503 | dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd); |
396 | ipath_layer_get_cr_errpkey(dev->dd); | ||
397 | 504 | ||
398 | if (pip->qkey_violations == 0) | 505 | if (pip->qkey_violations == 0) |
399 | dev->qkey_violations = 0; | 506 | dev->qkey_violations = 0; |
400 | 507 | ||
401 | if (ipath_layer_set_phyerrthreshold( | 508 | ore = pip->localphyerrors_overrunerrors; |
402 | dev->dd, | 509 | if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF)) |
403 | (pip->localphyerrors_overrunerrors >> 4) & 0xF)) | ||
404 | goto err; | 510 | goto err; |
405 | 511 | ||
406 | if (ipath_layer_set_overrunthreshold( | 512 | if (set_overrunthreshold(dev->dd, (ore & 0xF))) |
407 | dev->dd, | ||
408 | (pip->localphyerrors_overrunerrors & 0xF))) | ||
409 | goto err; | 513 | goto err; |
410 | 514 | ||
411 | dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | 515 | dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
@@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
423 | * is down or is being set to down. | 527 | * is down or is being set to down. |
424 | */ | 528 | */ |
425 | state = pip->linkspeed_portstate & 0xF; | 529 | state = pip->linkspeed_portstate & 0xF; |
426 | flags = ipath_layer_get_flags(dev->dd); | 530 | flags = dev->dd->ipath_flags; |
427 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | 531 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; |
428 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | 532 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) |
429 | goto err; | 533 | goto err; |
@@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
439 | /* FALLTHROUGH */ | 543 | /* FALLTHROUGH */ |
440 | case IB_PORT_DOWN: | 544 | case IB_PORT_DOWN: |
441 | if (lstate == 0) | 545 | if (lstate == 0) |
442 | if (ipath_layer_get_linkdowndefaultstate(dev->dd)) | 546 | if (get_linkdowndefaultstate(dev->dd)) |
443 | lstate = IPATH_IB_LINKDOWN_SLEEP; | 547 | lstate = IPATH_IB_LINKDOWN_SLEEP; |
444 | else | 548 | else |
445 | lstate = IPATH_IB_LINKDOWN; | 549 | lstate = IPATH_IB_LINKDOWN; |
@@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
451 | lstate = IPATH_IB_LINKDOWN_DISABLE; | 555 | lstate = IPATH_IB_LINKDOWN_DISABLE; |
452 | else | 556 | else |
453 | goto err; | 557 | goto err; |
454 | ipath_layer_set_linkstate(dev->dd, lstate); | 558 | ipath_set_linkstate(dev->dd, lstate); |
455 | if (flags & IPATH_LINKACTIVE) { | 559 | if (flags & IPATH_LINKACTIVE) { |
456 | event.event = IB_EVENT_PORT_ERR; | 560 | event.event = IB_EVENT_PORT_ERR; |
457 | ib_dispatch_event(&event); | 561 | ib_dispatch_event(&event); |
@@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
460 | case IB_PORT_ARMED: | 564 | case IB_PORT_ARMED: |
461 | if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) | 565 | if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) |
462 | break; | 566 | break; |
463 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM); | 567 | ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM); |
464 | if (flags & IPATH_LINKACTIVE) { | 568 | if (flags & IPATH_LINKACTIVE) { |
465 | event.event = IB_EVENT_PORT_ERR; | 569 | event.event = IB_EVENT_PORT_ERR; |
466 | ib_dispatch_event(&event); | 570 | ib_dispatch_event(&event); |
@@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
469 | case IB_PORT_ACTIVE: | 573 | case IB_PORT_ACTIVE: |
470 | if (!(flags & IPATH_LINKARMED)) | 574 | if (!(flags & IPATH_LINKARMED)) |
471 | break; | 575 | break; |
472 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); | 576 | ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); |
473 | event.event = IB_EVENT_PORT_ACTIVE; | 577 | event.event = IB_EVENT_PORT_ACTIVE; |
474 | ib_dispatch_event(&event); | 578 | ib_dispatch_event(&event); |
475 | break; | 579 | break; |
@@ -493,6 +597,152 @@ done: | |||
493 | return ret; | 597 | return ret; |
494 | } | 598 | } |
495 | 599 | ||
600 | /** | ||
601 | * rm_pkey - decrecment the reference count for the given PKEY | ||
602 | * @dd: the infinipath device | ||
603 | * @key: the PKEY index | ||
604 | * | ||
605 | * Return true if this was the last reference and the hardware table entry | ||
606 | * needs to be changed. | ||
607 | */ | ||
608 | static int rm_pkey(struct ipath_devdata *dd, u16 key) | ||
609 | { | ||
610 | int i; | ||
611 | int ret; | ||
612 | |||
613 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
614 | if (dd->ipath_pkeys[i] != key) | ||
615 | continue; | ||
616 | if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { | ||
617 | dd->ipath_pkeys[i] = 0; | ||
618 | ret = 1; | ||
619 | goto bail; | ||
620 | } | ||
621 | break; | ||
622 | } | ||
623 | |||
624 | ret = 0; | ||
625 | |||
626 | bail: | ||
627 | return ret; | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * add_pkey - add the given PKEY to the hardware table | ||
632 | * @dd: the infinipath device | ||
633 | * @key: the PKEY | ||
634 | * | ||
635 | * Return an error code if unable to add the entry, zero if no change, | ||
636 | * or 1 if the hardware PKEY register needs to be updated. | ||
637 | */ | ||
638 | static int add_pkey(struct ipath_devdata *dd, u16 key) | ||
639 | { | ||
640 | int i; | ||
641 | u16 lkey = key & 0x7FFF; | ||
642 | int any = 0; | ||
643 | int ret; | ||
644 | |||
645 | if (lkey == 0x7FFF) { | ||
646 | ret = 0; | ||
647 | goto bail; | ||
648 | } | ||
649 | |||
650 | /* Look for an empty slot or a matching PKEY. */ | ||
651 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
652 | if (!dd->ipath_pkeys[i]) { | ||
653 | any++; | ||
654 | continue; | ||
655 | } | ||
656 | /* If it matches exactly, try to increment the ref count */ | ||
657 | if (dd->ipath_pkeys[i] == key) { | ||
658 | if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { | ||
659 | ret = 0; | ||
660 | goto bail; | ||
661 | } | ||
662 | /* Lost the race. Look for an empty slot below. */ | ||
663 | atomic_dec(&dd->ipath_pkeyrefs[i]); | ||
664 | any++; | ||
665 | } | ||
666 | /* | ||
667 | * It makes no sense to have both the limited and unlimited | ||
668 | * PKEY set at the same time since the unlimited one will | ||
669 | * disable the limited one. | ||
670 | */ | ||
671 | if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { | ||
672 | ret = -EEXIST; | ||
673 | goto bail; | ||
674 | } | ||
675 | } | ||
676 | if (!any) { | ||
677 | ret = -EBUSY; | ||
678 | goto bail; | ||
679 | } | ||
680 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
681 | if (!dd->ipath_pkeys[i] && | ||
682 | atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { | ||
683 | /* for ipathstats, etc. */ | ||
684 | ipath_stats.sps_pkeys[i] = lkey; | ||
685 | dd->ipath_pkeys[i] = key; | ||
686 | ret = 1; | ||
687 | goto bail; | ||
688 | } | ||
689 | } | ||
690 | ret = -EBUSY; | ||
691 | |||
692 | bail: | ||
693 | return ret; | ||
694 | } | ||
695 | |||
696 | /** | ||
697 | * set_pkeys - set the PKEY table for port 0 | ||
698 | * @dd: the infinipath device | ||
699 | * @pkeys: the PKEY table | ||
700 | */ | ||
701 | static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) | ||
702 | { | ||
703 | struct ipath_portdata *pd; | ||
704 | int i; | ||
705 | int changed = 0; | ||
706 | |||
707 | pd = dd->ipath_pd[0]; | ||
708 | |||
709 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | ||
710 | u16 key = pkeys[i]; | ||
711 | u16 okey = pd->port_pkeys[i]; | ||
712 | |||
713 | if (key == okey) | ||
714 | continue; | ||
715 | /* | ||
716 | * The value of this PKEY table entry is changing. | ||
717 | * Remove the old entry in the hardware's array of PKEYs. | ||
718 | */ | ||
719 | if (okey & 0x7FFF) | ||
720 | changed |= rm_pkey(dd, okey); | ||
721 | if (key & 0x7FFF) { | ||
722 | int ret = add_pkey(dd, key); | ||
723 | |||
724 | if (ret < 0) | ||
725 | key = 0; | ||
726 | else | ||
727 | changed |= ret; | ||
728 | } | ||
729 | pd->port_pkeys[i] = key; | ||
730 | } | ||
731 | if (changed) { | ||
732 | u64 pkey; | ||
733 | |||
734 | pkey = (u64) dd->ipath_pkeys[0] | | ||
735 | ((u64) dd->ipath_pkeys[1] << 16) | | ||
736 | ((u64) dd->ipath_pkeys[2] << 32) | | ||
737 | ((u64) dd->ipath_pkeys[3] << 48); | ||
738 | ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", | ||
739 | (unsigned long long) pkey); | ||
740 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | ||
741 | pkey); | ||
742 | } | ||
743 | return 0; | ||
744 | } | ||
745 | |||
496 | static int recv_subn_set_pkeytable(struct ib_smp *smp, | 746 | static int recv_subn_set_pkeytable(struct ib_smp *smp, |
497 | struct ib_device *ibdev) | 747 | struct ib_device *ibdev) |
498 | { | 748 | { |
@@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, | |||
500 | __be16 *p = (__be16 *) smp->data; | 750 | __be16 *p = (__be16 *) smp->data; |
501 | u16 *q = (u16 *) smp->data; | 751 | u16 *q = (u16 *) smp->data; |
502 | struct ipath_ibdev *dev = to_idev(ibdev); | 752 | struct ipath_ibdev *dev = to_idev(ibdev); |
503 | unsigned i, n = ipath_layer_get_npkeys(dev->dd); | 753 | unsigned i, n = ipath_get_npkeys(dev->dd); |
504 | 754 | ||
505 | for (i = 0; i < n; i++) | 755 | for (i = 0; i < n; i++) |
506 | q[i] = be16_to_cpu(p[i]); | 756 | q[i] = be16_to_cpu(p[i]); |
507 | 757 | ||
508 | if (startpx != 0 || | 758 | if (startpx != 0 || set_pkeys(dev->dd, q) != 0) |
509 | ipath_layer_set_pkeys(dev->dd, q) != 0) | ||
510 | smp->status |= IB_SMP_INVALID_FIELD; | 759 | smp->status |= IB_SMP_INVALID_FIELD; |
511 | 760 | ||
512 | return recv_subn_get_pkeytable(smp, ibdev); | 761 | return recv_subn_get_pkeytable(smp, ibdev); |
@@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, | |||
844 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | 1093 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) |
845 | pmp->data; | 1094 | pmp->data; |
846 | struct ipath_ibdev *dev = to_idev(ibdev); | 1095 | struct ipath_ibdev *dev = to_idev(ibdev); |
847 | struct ipath_layer_counters cntrs; | 1096 | struct ipath_verbs_counters cntrs; |
848 | u8 port_select = p->port_select; | 1097 | u8 port_select = p->port_select; |
849 | 1098 | ||
850 | ipath_layer_get_counters(dev->dd, &cntrs); | 1099 | ipath_get_counters(dev->dd, &cntrs); |
851 | 1100 | ||
852 | /* Adjust counters for any resets done. */ | 1101 | /* Adjust counters for any resets done. */ |
853 | cntrs.symbol_error_counter -= dev->z_symbol_error_counter; | 1102 | cntrs.symbol_error_counter -= dev->z_symbol_error_counter; |
@@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, | |||
944 | u64 swords, rwords, spkts, rpkts, xwait; | 1193 | u64 swords, rwords, spkts, rpkts, xwait; |
945 | u8 port_select = p->port_select; | 1194 | u8 port_select = p->port_select; |
946 | 1195 | ||
947 | ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, | 1196 | ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, |
948 | &rpkts, &xwait); | 1197 | &rpkts, &xwait); |
949 | 1198 | ||
950 | /* Adjust counters for any resets done. */ | 1199 | /* Adjust counters for any resets done. */ |
951 | swords -= dev->z_port_xmit_data; | 1200 | swords -= dev->z_port_xmit_data; |
@@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp, | |||
978 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | 1227 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) |
979 | pmp->data; | 1228 | pmp->data; |
980 | struct ipath_ibdev *dev = to_idev(ibdev); | 1229 | struct ipath_ibdev *dev = to_idev(ibdev); |
981 | struct ipath_layer_counters cntrs; | 1230 | struct ipath_verbs_counters cntrs; |
982 | 1231 | ||
983 | /* | 1232 | /* |
984 | * Since the HW doesn't support clearing counters, we save the | 1233 | * Since the HW doesn't support clearing counters, we save the |
985 | * current count and subtract it from future responses. | 1234 | * current count and subtract it from future responses. |
986 | */ | 1235 | */ |
987 | ipath_layer_get_counters(dev->dd, &cntrs); | 1236 | ipath_get_counters(dev->dd, &cntrs); |
988 | 1237 | ||
989 | if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) | 1238 | if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) |
990 | dev->z_symbol_error_counter = cntrs.symbol_error_counter; | 1239 | dev->z_symbol_error_counter = cntrs.symbol_error_counter; |
@@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, | |||
1041 | struct ipath_ibdev *dev = to_idev(ibdev); | 1290 | struct ipath_ibdev *dev = to_idev(ibdev); |
1042 | u64 swords, rwords, spkts, rpkts, xwait; | 1291 | u64 swords, rwords, spkts, rpkts, xwait; |
1043 | 1292 | ||
1044 | ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, | 1293 | ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, |
1045 | &rpkts, &xwait); | 1294 | &rpkts, &xwait); |
1046 | 1295 | ||
1047 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) | 1296 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) |
1048 | dev->z_port_xmit_data = swords; | 1297 | dev->z_port_xmit_data = swords; |
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c new file mode 100644 index 000000000000..11b7378ff214 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_mmap.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/config.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/vmalloc.h> | ||
36 | #include <linux/mm.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | |||
40 | #include "ipath_verbs.h" | ||
41 | |||
42 | /** | ||
43 | * ipath_release_mmap_info - free mmap info structure | ||
44 | * @ref: a pointer to the kref within struct ipath_mmap_info | ||
45 | */ | ||
46 | void ipath_release_mmap_info(struct kref *ref) | ||
47 | { | ||
48 | struct ipath_mmap_info *ip = | ||
49 | container_of(ref, struct ipath_mmap_info, ref); | ||
50 | |||
51 | vfree(ip->obj); | ||
52 | kfree(ip); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * open and close keep track of how many times the CQ is mapped, | ||
57 | * to avoid releasing it. | ||
58 | */ | ||
59 | static void ipath_vma_open(struct vm_area_struct *vma) | ||
60 | { | ||
61 | struct ipath_mmap_info *ip = vma->vm_private_data; | ||
62 | |||
63 | kref_get(&ip->ref); | ||
64 | ip->mmap_cnt++; | ||
65 | } | ||
66 | |||
67 | static void ipath_vma_close(struct vm_area_struct *vma) | ||
68 | { | ||
69 | struct ipath_mmap_info *ip = vma->vm_private_data; | ||
70 | |||
71 | ip->mmap_cnt--; | ||
72 | kref_put(&ip->ref, ipath_release_mmap_info); | ||
73 | } | ||
74 | |||
75 | static struct vm_operations_struct ipath_vm_ops = { | ||
76 | .open = ipath_vma_open, | ||
77 | .close = ipath_vma_close, | ||
78 | }; | ||
79 | |||
80 | /** | ||
81 | * ipath_mmap - create a new mmap region | ||
82 | * @context: the IB user context of the process making the mmap() call | ||
83 | * @vma: the VMA to be initialized | ||
84 | * Return zero if the mmap is OK. Otherwise, return an errno. | ||
85 | */ | ||
86 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
87 | { | ||
88 | struct ipath_ibdev *dev = to_idev(context->device); | ||
89 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
90 | unsigned long size = vma->vm_end - vma->vm_start; | ||
91 | struct ipath_mmap_info *ip, **pp; | ||
92 | int ret = -EINVAL; | ||
93 | |||
94 | /* | ||
95 | * Search the device's list of objects waiting for a mmap call. | ||
96 | * Normally, this list is very short since a call to create a | ||
97 | * CQ, QP, or SRQ is soon followed by a call to mmap(). | ||
98 | */ | ||
99 | spin_lock_irq(&dev->pending_lock); | ||
100 | for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { | ||
101 | /* Only the creator is allowed to mmap the object */ | ||
102 | if (context != ip->context || (void *) offset != ip->obj) | ||
103 | continue; | ||
104 | /* Don't allow a mmap larger than the object. */ | ||
105 | if (size > ip->size) | ||
106 | break; | ||
107 | |||
108 | *pp = ip->next; | ||
109 | spin_unlock_irq(&dev->pending_lock); | ||
110 | |||
111 | ret = remap_vmalloc_range(vma, ip->obj, 0); | ||
112 | if (ret) | ||
113 | goto done; | ||
114 | vma->vm_ops = &ipath_vm_ops; | ||
115 | vma->vm_private_data = ip; | ||
116 | ipath_vma_open(vma); | ||
117 | goto done; | ||
118 | } | ||
119 | spin_unlock_irq(&dev->pending_lock); | ||
120 | done: | ||
121 | return ret; | ||
122 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index 4ac31a5da330..b36f6fb3e37a 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -36,6 +36,18 @@ | |||
36 | 36 | ||
37 | #include "ipath_verbs.h" | 37 | #include "ipath_verbs.h" |
38 | 38 | ||
39 | /* Fast memory region */ | ||
40 | struct ipath_fmr { | ||
41 | struct ib_fmr ibfmr; | ||
42 | u8 page_shift; | ||
43 | struct ipath_mregion mr; /* must be last */ | ||
44 | }; | ||
45 | |||
46 | static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) | ||
47 | { | ||
48 | return container_of(ibfmr, struct ipath_fmr, ibfmr); | ||
49 | } | ||
50 | |||
39 | /** | 51 | /** |
40 | * ipath_get_dma_mr - get a DMA memory region | 52 | * ipath_get_dma_mr - get a DMA memory region |
41 | * @pd: protection domain for this memory region | 53 | * @pd: protection domain for this memory region |
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 83e557be591e..224b0f40767f 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
36 | 36 | ||
37 | #include "ipath_verbs.h" | 37 | #include "ipath_verbs.h" |
38 | #include "ipath_common.h" | 38 | #include "ipath_kernel.h" |
39 | 39 | ||
40 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) | 40 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) |
41 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | 41 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) |
@@ -44,19 +44,6 @@ | |||
44 | #define find_next_offset(map, off) find_next_zero_bit((map)->page, \ | 44 | #define find_next_offset(map, off) find_next_zero_bit((map)->page, \ |
45 | BITS_PER_PAGE, off) | 45 | BITS_PER_PAGE, off) |
46 | 46 | ||
47 | #define TRANS_INVALID 0 | ||
48 | #define TRANS_ANY2RST 1 | ||
49 | #define TRANS_RST2INIT 2 | ||
50 | #define TRANS_INIT2INIT 3 | ||
51 | #define TRANS_INIT2RTR 4 | ||
52 | #define TRANS_RTR2RTS 5 | ||
53 | #define TRANS_RTS2RTS 6 | ||
54 | #define TRANS_SQERR2RTS 7 | ||
55 | #define TRANS_ANY2ERR 8 | ||
56 | #define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */ | ||
57 | #define TRANS_SQD2SQD 10 /* error if not drained & parameter change */ | ||
58 | #define TRANS_SQD2RTS 11 /* error if not drained */ | ||
59 | |||
60 | /* | 47 | /* |
61 | * Convert the AETH credit code into the number of credits. | 48 | * Convert the AETH credit code into the number of credits. |
62 | */ | 49 | */ |
@@ -287,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt) | |||
287 | free_qpn(qpt, qp->ibqp.qp_num); | 274 | free_qpn(qpt, qp->ibqp.qp_num); |
288 | if (!atomic_dec_and_test(&qp->refcount) || | 275 | if (!atomic_dec_and_test(&qp->refcount) || |
289 | !ipath_destroy_qp(&qp->ibqp)) | 276 | !ipath_destroy_qp(&qp->ibqp)) |
290 | _VERBS_INFO("QP memory leak!\n"); | 277 | ipath_dbg(KERN_INFO "QP memory leak!\n"); |
291 | qp = nqp; | 278 | qp = nqp; |
292 | } | 279 | } |
293 | } | 280 | } |
@@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
355 | qp->s_last = 0; | 342 | qp->s_last = 0; |
356 | qp->s_ssn = 1; | 343 | qp->s_ssn = 1; |
357 | qp->s_lsn = 0; | 344 | qp->s_lsn = 0; |
358 | qp->r_rq.head = 0; | 345 | if (qp->r_rq.wq) { |
359 | qp->r_rq.tail = 0; | 346 | qp->r_rq.wq->head = 0; |
347 | qp->r_rq.wq->tail = 0; | ||
348 | } | ||
360 | qp->r_reuse_sge = 0; | 349 | qp->r_reuse_sge = 0; |
361 | } | 350 | } |
362 | 351 | ||
@@ -373,8 +362,8 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
373 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 362 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
374 | struct ib_wc wc; | 363 | struct ib_wc wc; |
375 | 364 | ||
376 | _VERBS_INFO("QP%d/%d in error state\n", | 365 | ipath_dbg(KERN_INFO "QP%d/%d in error state\n", |
377 | qp->ibqp.qp_num, qp->remote_qpn); | 366 | qp->ibqp.qp_num, qp->remote_qpn); |
378 | 367 | ||
379 | spin_lock(&dev->pending_lock); | 368 | spin_lock(&dev->pending_lock); |
380 | /* XXX What if its already removed by the timeout code? */ | 369 | /* XXX What if its already removed by the timeout code? */ |
@@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
410 | qp->s_hdrwords = 0; | 399 | qp->s_hdrwords = 0; |
411 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | 400 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; |
412 | 401 | ||
413 | wc.opcode = IB_WC_RECV; | 402 | if (qp->r_rq.wq) { |
414 | spin_lock(&qp->r_rq.lock); | 403 | struct ipath_rwq *wq; |
415 | while (qp->r_rq.tail != qp->r_rq.head) { | 404 | u32 head; |
416 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | 405 | u32 tail; |
417 | if (++qp->r_rq.tail >= qp->r_rq.size) | 406 | |
418 | qp->r_rq.tail = 0; | 407 | spin_lock(&qp->r_rq.lock); |
419 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | 408 | |
409 | /* sanity check pointers before trusting them */ | ||
410 | wq = qp->r_rq.wq; | ||
411 | head = wq->head; | ||
412 | if (head >= qp->r_rq.size) | ||
413 | head = 0; | ||
414 | tail = wq->tail; | ||
415 | if (tail >= qp->r_rq.size) | ||
416 | tail = 0; | ||
417 | wc.opcode = IB_WC_RECV; | ||
418 | while (tail != head) { | ||
419 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | ||
420 | if (++tail >= qp->r_rq.size) | ||
421 | tail = 0; | ||
422 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
423 | } | ||
424 | wq->tail = tail; | ||
425 | |||
426 | spin_unlock(&qp->r_rq.lock); | ||
420 | } | 427 | } |
421 | spin_unlock(&qp->r_rq.lock); | ||
422 | } | 428 | } |
423 | 429 | ||
424 | /** | 430 | /** |
@@ -426,11 +432,12 @@ void ipath_error_qp(struct ipath_qp *qp) | |||
426 | * @ibqp: the queue pair who's attributes we're modifying | 432 | * @ibqp: the queue pair who's attributes we're modifying |
427 | * @attr: the new attributes | 433 | * @attr: the new attributes |
428 | * @attr_mask: the mask of attributes to modify | 434 | * @attr_mask: the mask of attributes to modify |
435 | * @udata: user data for ipathverbs.so | ||
429 | * | 436 | * |
430 | * Returns 0 on success, otherwise returns an errno. | 437 | * Returns 0 on success, otherwise returns an errno. |
431 | */ | 438 | */ |
432 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 439 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
433 | int attr_mask) | 440 | int attr_mask, struct ib_udata *udata) |
434 | { | 441 | { |
435 | struct ipath_ibdev *dev = to_idev(ibqp->device); | 442 | struct ipath_ibdev *dev = to_idev(ibqp->device); |
436 | struct ipath_qp *qp = to_iqp(ibqp); | 443 | struct ipath_qp *qp = to_iqp(ibqp); |
@@ -448,19 +455,46 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
448 | attr_mask)) | 455 | attr_mask)) |
449 | goto inval; | 456 | goto inval; |
450 | 457 | ||
451 | if (attr_mask & IB_QP_AV) | 458 | if (attr_mask & IB_QP_AV) { |
452 | if (attr->ah_attr.dlid == 0 || | 459 | if (attr->ah_attr.dlid == 0 || |
453 | attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) | 460 | attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) |
454 | goto inval; | 461 | goto inval; |
455 | 462 | ||
463 | if ((attr->ah_attr.ah_flags & IB_AH_GRH) && | ||
464 | (attr->ah_attr.grh.sgid_index > 1)) | ||
465 | goto inval; | ||
466 | } | ||
467 | |||
456 | if (attr_mask & IB_QP_PKEY_INDEX) | 468 | if (attr_mask & IB_QP_PKEY_INDEX) |
457 | if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) | 469 | if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) |
458 | goto inval; | 470 | goto inval; |
459 | 471 | ||
460 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | 472 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
461 | if (attr->min_rnr_timer > 31) | 473 | if (attr->min_rnr_timer > 31) |
462 | goto inval; | 474 | goto inval; |
463 | 475 | ||
476 | if (attr_mask & IB_QP_PORT) | ||
477 | if (attr->port_num == 0 || | ||
478 | attr->port_num > ibqp->device->phys_port_cnt) | ||
479 | goto inval; | ||
480 | |||
481 | if (attr_mask & IB_QP_PATH_MTU) | ||
482 | if (attr->path_mtu > IB_MTU_4096) | ||
483 | goto inval; | ||
484 | |||
485 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
486 | if (attr->max_dest_rd_atomic > 1) | ||
487 | goto inval; | ||
488 | |||
489 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
490 | if (attr->max_rd_atomic > 1) | ||
491 | goto inval; | ||
492 | |||
493 | if (attr_mask & IB_QP_PATH_MIG_STATE) | ||
494 | if (attr->path_mig_state != IB_MIG_MIGRATED && | ||
495 | attr->path_mig_state != IB_MIG_REARM) | ||
496 | goto inval; | ||
497 | |||
464 | switch (new_state) { | 498 | switch (new_state) { |
465 | case IB_QPS_RESET: | 499 | case IB_QPS_RESET: |
466 | ipath_reset_qp(qp); | 500 | ipath_reset_qp(qp); |
@@ -511,6 +545,9 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
511 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | 545 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
512 | qp->r_min_rnr_timer = attr->min_rnr_timer; | 546 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
513 | 547 | ||
548 | if (attr_mask & IB_QP_TIMEOUT) | ||
549 | qp->timeout = attr->timeout; | ||
550 | |||
514 | if (attr_mask & IB_QP_QKEY) | 551 | if (attr_mask & IB_QP_QKEY) |
515 | qp->qkey = attr->qkey; | 552 | qp->qkey = attr->qkey; |
516 | 553 | ||
@@ -543,7 +580,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
543 | attr->dest_qp_num = qp->remote_qpn; | 580 | attr->dest_qp_num = qp->remote_qpn; |
544 | attr->qp_access_flags = qp->qp_access_flags; | 581 | attr->qp_access_flags = qp->qp_access_flags; |
545 | attr->cap.max_send_wr = qp->s_size - 1; | 582 | attr->cap.max_send_wr = qp->s_size - 1; |
546 | attr->cap.max_recv_wr = qp->r_rq.size - 1; | 583 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; |
547 | attr->cap.max_send_sge = qp->s_max_sge; | 584 | attr->cap.max_send_sge = qp->s_max_sge; |
548 | attr->cap.max_recv_sge = qp->r_rq.max_sge; | 585 | attr->cap.max_recv_sge = qp->r_rq.max_sge; |
549 | attr->cap.max_inline_data = 0; | 586 | attr->cap.max_inline_data = 0; |
@@ -557,7 +594,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
557 | attr->max_dest_rd_atomic = 1; | 594 | attr->max_dest_rd_atomic = 1; |
558 | attr->min_rnr_timer = qp->r_min_rnr_timer; | 595 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
559 | attr->port_num = 1; | 596 | attr->port_num = 1; |
560 | attr->timeout = 0; | 597 | attr->timeout = qp->timeout; |
561 | attr->retry_cnt = qp->s_retry_cnt; | 598 | attr->retry_cnt = qp->s_retry_cnt; |
562 | attr->rnr_retry = qp->s_rnr_retry; | 599 | attr->rnr_retry = qp->s_rnr_retry; |
563 | attr->alt_port_num = 0; | 600 | attr->alt_port_num = 0; |
@@ -569,9 +606,10 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
569 | init_attr->recv_cq = qp->ibqp.recv_cq; | 606 | init_attr->recv_cq = qp->ibqp.recv_cq; |
570 | init_attr->srq = qp->ibqp.srq; | 607 | init_attr->srq = qp->ibqp.srq; |
571 | init_attr->cap = attr->cap; | 608 | init_attr->cap = attr->cap; |
572 | init_attr->sq_sig_type = | 609 | if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) |
573 | (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) | 610 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
574 | ? IB_SIGNAL_REQ_WR : 0; | 611 | else |
612 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | ||
575 | init_attr->qp_type = qp->ibqp.qp_type; | 613 | init_attr->qp_type = qp->ibqp.qp_type; |
576 | init_attr->port_num = 1; | 614 | init_attr->port_num = 1; |
577 | return 0; | 615 | return 0; |
@@ -596,13 +634,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp) | |||
596 | } else { | 634 | } else { |
597 | u32 min, max, x; | 635 | u32 min, max, x; |
598 | u32 credits; | 636 | u32 credits; |
599 | 637 | struct ipath_rwq *wq = qp->r_rq.wq; | |
638 | u32 head; | ||
639 | u32 tail; | ||
640 | |||
641 | /* sanity check pointers before trusting them */ | ||
642 | head = wq->head; | ||
643 | if (head >= qp->r_rq.size) | ||
644 | head = 0; | ||
645 | tail = wq->tail; | ||
646 | if (tail >= qp->r_rq.size) | ||
647 | tail = 0; | ||
600 | /* | 648 | /* |
601 | * Compute the number of credits available (RWQEs). | 649 | * Compute the number of credits available (RWQEs). |
602 | * XXX Not holding the r_rq.lock here so there is a small | 650 | * XXX Not holding the r_rq.lock here so there is a small |
603 | * chance that the pair of reads are not atomic. | 651 | * chance that the pair of reads are not atomic. |
604 | */ | 652 | */ |
605 | credits = qp->r_rq.head - qp->r_rq.tail; | 653 | credits = head - tail; |
606 | if ((int)credits < 0) | 654 | if ((int)credits < 0) |
607 | credits += qp->r_rq.size; | 655 | credits += qp->r_rq.size; |
608 | /* | 656 | /* |
@@ -679,27 +727,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
679 | case IB_QPT_UD: | 727 | case IB_QPT_UD: |
680 | case IB_QPT_SMI: | 728 | case IB_QPT_SMI: |
681 | case IB_QPT_GSI: | 729 | case IB_QPT_GSI: |
682 | qp = kmalloc(sizeof(*qp), GFP_KERNEL); | 730 | sz = sizeof(*qp); |
731 | if (init_attr->srq) { | ||
732 | struct ipath_srq *srq = to_isrq(init_attr->srq); | ||
733 | |||
734 | sz += sizeof(*qp->r_sg_list) * | ||
735 | srq->rq.max_sge; | ||
736 | } else | ||
737 | sz += sizeof(*qp->r_sg_list) * | ||
738 | init_attr->cap.max_recv_sge; | ||
739 | qp = kmalloc(sz, GFP_KERNEL); | ||
683 | if (!qp) { | 740 | if (!qp) { |
684 | vfree(swq); | ||
685 | ret = ERR_PTR(-ENOMEM); | 741 | ret = ERR_PTR(-ENOMEM); |
686 | goto bail; | 742 | goto bail_swq; |
687 | } | 743 | } |
688 | if (init_attr->srq) { | 744 | if (init_attr->srq) { |
745 | sz = 0; | ||
689 | qp->r_rq.size = 0; | 746 | qp->r_rq.size = 0; |
690 | qp->r_rq.max_sge = 0; | 747 | qp->r_rq.max_sge = 0; |
691 | qp->r_rq.wq = NULL; | 748 | qp->r_rq.wq = NULL; |
749 | init_attr->cap.max_recv_wr = 0; | ||
750 | init_attr->cap.max_recv_sge = 0; | ||
692 | } else { | 751 | } else { |
693 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; | 752 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; |
694 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; | 753 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; |
695 | sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + | 754 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + |
696 | sizeof(struct ipath_rwqe); | 755 | sizeof(struct ipath_rwqe); |
697 | qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); | 756 | qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + |
757 | qp->r_rq.size * sz); | ||
698 | if (!qp->r_rq.wq) { | 758 | if (!qp->r_rq.wq) { |
699 | kfree(qp); | ||
700 | vfree(swq); | ||
701 | ret = ERR_PTR(-ENOMEM); | 759 | ret = ERR_PTR(-ENOMEM); |
702 | goto bail; | 760 | goto bail_qp; |
703 | } | 761 | } |
704 | } | 762 | } |
705 | 763 | ||
@@ -719,24 +777,19 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
719 | qp->s_wq = swq; | 777 | qp->s_wq = swq; |
720 | qp->s_size = init_attr->cap.max_send_wr + 1; | 778 | qp->s_size = init_attr->cap.max_send_wr + 1; |
721 | qp->s_max_sge = init_attr->cap.max_send_sge; | 779 | qp->s_max_sge = init_attr->cap.max_send_sge; |
722 | qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? | 780 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
723 | 1 << IPATH_S_SIGNAL_REQ_WR : 0; | 781 | qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; |
782 | else | ||
783 | qp->s_flags = 0; | ||
724 | dev = to_idev(ibpd->device); | 784 | dev = to_idev(ibpd->device); |
725 | err = ipath_alloc_qpn(&dev->qp_table, qp, | 785 | err = ipath_alloc_qpn(&dev->qp_table, qp, |
726 | init_attr->qp_type); | 786 | init_attr->qp_type); |
727 | if (err) { | 787 | if (err) { |
728 | vfree(swq); | ||
729 | vfree(qp->r_rq.wq); | ||
730 | kfree(qp); | ||
731 | ret = ERR_PTR(err); | 788 | ret = ERR_PTR(err); |
732 | goto bail; | 789 | goto bail_rwq; |
733 | } | 790 | } |
791 | qp->ip = NULL; | ||
734 | ipath_reset_qp(qp); | 792 | ipath_reset_qp(qp); |
735 | |||
736 | /* Tell the core driver that the kernel SMA is present. */ | ||
737 | if (init_attr->qp_type == IB_QPT_SMI) | ||
738 | ipath_layer_set_verbs_flags(dev->dd, | ||
739 | IPATH_VERBS_KERNEL_SMA); | ||
740 | break; | 793 | break; |
741 | 794 | ||
742 | default: | 795 | default: |
@@ -747,8 +800,63 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
747 | 800 | ||
748 | init_attr->cap.max_inline_data = 0; | 801 | init_attr->cap.max_inline_data = 0; |
749 | 802 | ||
803 | /* | ||
804 | * Return the address of the RWQ as the offset to mmap. | ||
805 | * See ipath_mmap() for details. | ||
806 | */ | ||
807 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
808 | struct ipath_mmap_info *ip; | ||
809 | __u64 offset = (__u64) qp->r_rq.wq; | ||
810 | int err; | ||
811 | |||
812 | err = ib_copy_to_udata(udata, &offset, sizeof(offset)); | ||
813 | if (err) { | ||
814 | ret = ERR_PTR(err); | ||
815 | goto bail_rwq; | ||
816 | } | ||
817 | |||
818 | if (qp->r_rq.wq) { | ||
819 | /* Allocate info for ipath_mmap(). */ | ||
820 | ip = kmalloc(sizeof(*ip), GFP_KERNEL); | ||
821 | if (!ip) { | ||
822 | ret = ERR_PTR(-ENOMEM); | ||
823 | goto bail_rwq; | ||
824 | } | ||
825 | qp->ip = ip; | ||
826 | ip->context = ibpd->uobject->context; | ||
827 | ip->obj = qp->r_rq.wq; | ||
828 | kref_init(&ip->ref); | ||
829 | ip->mmap_cnt = 0; | ||
830 | ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + | ||
831 | qp->r_rq.size * sz); | ||
832 | spin_lock_irq(&dev->pending_lock); | ||
833 | ip->next = dev->pending_mmaps; | ||
834 | dev->pending_mmaps = ip; | ||
835 | spin_unlock_irq(&dev->pending_lock); | ||
836 | } | ||
837 | } | ||
838 | |||
839 | spin_lock(&dev->n_qps_lock); | ||
840 | if (dev->n_qps_allocated == ib_ipath_max_qps) { | ||
841 | spin_unlock(&dev->n_qps_lock); | ||
842 | ret = ERR_PTR(-ENOMEM); | ||
843 | goto bail_ip; | ||
844 | } | ||
845 | |||
846 | dev->n_qps_allocated++; | ||
847 | spin_unlock(&dev->n_qps_lock); | ||
848 | |||
750 | ret = &qp->ibqp; | 849 | ret = &qp->ibqp; |
850 | goto bail; | ||
751 | 851 | ||
852 | bail_ip: | ||
853 | kfree(qp->ip); | ||
854 | bail_rwq: | ||
855 | vfree(qp->r_rq.wq); | ||
856 | bail_qp: | ||
857 | kfree(qp); | ||
858 | bail_swq: | ||
859 | vfree(swq); | ||
752 | bail: | 860 | bail: |
753 | return ret; | 861 | return ret; |
754 | } | 862 | } |
@@ -768,15 +876,12 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
768 | struct ipath_ibdev *dev = to_idev(ibqp->device); | 876 | struct ipath_ibdev *dev = to_idev(ibqp->device); |
769 | unsigned long flags; | 877 | unsigned long flags; |
770 | 878 | ||
771 | /* Tell the core driver that the kernel SMA is gone. */ | 879 | spin_lock_irqsave(&qp->s_lock, flags); |
772 | if (qp->ibqp.qp_type == IB_QPT_SMI) | ||
773 | ipath_layer_set_verbs_flags(dev->dd, 0); | ||
774 | |||
775 | spin_lock_irqsave(&qp->r_rq.lock, flags); | ||
776 | spin_lock(&qp->s_lock); | ||
777 | qp->state = IB_QPS_ERR; | 880 | qp->state = IB_QPS_ERR; |
778 | spin_unlock(&qp->s_lock); | 881 | spin_unlock_irqrestore(&qp->s_lock, flags); |
779 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | 882 | spin_lock(&dev->n_qps_lock); |
883 | dev->n_qps_allocated--; | ||
884 | spin_unlock(&dev->n_qps_lock); | ||
780 | 885 | ||
781 | /* Stop the sending tasklet. */ | 886 | /* Stop the sending tasklet. */ |
782 | tasklet_kill(&qp->s_task); | 887 | tasklet_kill(&qp->s_task); |
@@ -797,8 +902,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp) | |||
797 | if (atomic_read(&qp->refcount) != 0) | 902 | if (atomic_read(&qp->refcount) != 0) |
798 | ipath_free_qp(&dev->qp_table, qp); | 903 | ipath_free_qp(&dev->qp_table, qp); |
799 | 904 | ||
905 | if (qp->ip) | ||
906 | kref_put(&qp->ip->ref, ipath_release_mmap_info); | ||
907 | else | ||
908 | vfree(qp->r_rq.wq); | ||
800 | vfree(qp->s_wq); | 909 | vfree(qp->s_wq); |
801 | vfree(qp->r_rq.wq); | ||
802 | kfree(qp); | 910 | kfree(qp); |
803 | return 0; | 911 | return 0; |
804 | } | 912 | } |
@@ -850,8 +958,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
850 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 958 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
851 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | 959 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); |
852 | 960 | ||
853 | _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n", | 961 | ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", |
854 | qp->ibqp.qp_num, qp->remote_qpn, wc->status); | 962 | qp->ibqp.qp_num, qp->remote_qpn, wc->status); |
855 | 963 | ||
856 | spin_lock(&dev->pending_lock); | 964 | spin_lock(&dev->pending_lock); |
857 | /* XXX What if its already removed by the timeout code? */ | 965 | /* XXX What if its already removed by the timeout code? */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 774d1615ce2f..a08654042c03 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include "ipath_verbs.h" | 34 | #include "ipath_verbs.h" |
35 | #include "ipath_common.h" | 35 | #include "ipath_kernel.h" |
36 | 36 | ||
37 | /* cut down ridiculously long IB macro names */ | 37 | /* cut down ridiculously long IB macro names */ |
38 | #define OP(x) IB_OPCODE_RC_##x | 38 | #define OP(x) IB_OPCODE_RC_##x |
@@ -540,7 +540,7 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
540 | lrh0 = IPATH_LRH_GRH; | 540 | lrh0 = IPATH_LRH_GRH; |
541 | } | 541 | } |
542 | /* read pkey_index w/o lock (its atomic) */ | 542 | /* read pkey_index w/o lock (its atomic) */ |
543 | bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | 543 | bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index); |
544 | if (qp->r_nak_state) | 544 | if (qp->r_nak_state) |
545 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | | 545 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | |
546 | (qp->r_nak_state << | 546 | (qp->r_nak_state << |
@@ -557,7 +557,7 @@ static void send_rc_ack(struct ipath_qp *qp) | |||
557 | hdr.lrh[0] = cpu_to_be16(lrh0); | 557 | hdr.lrh[0] = cpu_to_be16(lrh0); |
558 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 558 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
559 | hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); | 559 | hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); |
560 | hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); | 560 | hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); |
561 | ohdr->bth[0] = cpu_to_be32(bth0); | 561 | ohdr->bth[0] = cpu_to_be32(bth0); |
562 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | 562 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |
563 | ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); | 563 | ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); |
@@ -1323,8 +1323,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
1323 | * the eager header buffer size to 56 bytes so the last 4 | 1323 | * the eager header buffer size to 56 bytes so the last 4 |
1324 | * bytes of the BTH header (PSN) is in the data buffer. | 1324 | * bytes of the BTH header (PSN) is in the data buffer. |
1325 | */ | 1325 | */ |
1326 | header_in_data = | 1326 | header_in_data = dev->dd->ipath_rcvhdrentsize == 16; |
1327 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | ||
1328 | if (header_in_data) { | 1327 | if (header_in_data) { |
1329 | psn = be32_to_cpu(((__be32 *) data)[0]); | 1328 | psn = be32_to_cpu(((__be32 *) data)[0]); |
1330 | data += sizeof(__be32); | 1329 | data += sizeof(__be32); |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 89df8f5ea998..6e23b3d632b8 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -36,8 +36,7 @@ | |||
36 | 36 | ||
37 | /* | 37 | /* |
38 | * This file should only be included by kernel source, and by the diags. It | 38 | * This file should only be included by kernel source, and by the diags. It |
39 | * defines the registers, and their contents, for the InfiniPath HT-400 | 39 | * defines the registers, and their contents, for InfiniPath chips. |
40 | * chip. | ||
41 | */ | 40 | */ |
42 | 41 | ||
43 | /* | 42 | /* |
@@ -283,10 +282,12 @@ | |||
283 | #define INFINIPATH_XGXS_RESET 0x7ULL | 282 | #define INFINIPATH_XGXS_RESET 0x7ULL |
284 | #define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL | 283 | #define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL |
285 | #define INFINIPATH_XGXS_MDIOADDR_SHIFT 4 | 284 | #define INFINIPATH_XGXS_MDIOADDR_SHIFT 4 |
285 | #define INFINIPATH_XGXS_RX_POL_SHIFT 19 | ||
286 | #define INFINIPATH_XGXS_RX_POL_MASK 0xfULL | ||
286 | 287 | ||
287 | #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ | 288 | #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ |
288 | 289 | ||
289 | /* TID entries (memory), HT400-only */ | 290 | /* TID entries (memory), HT-only */ |
290 | #define INFINIPATH_RT_VALID 0x8000000000000000ULL | 291 | #define INFINIPATH_RT_VALID 0x8000000000000000ULL |
291 | #define INFINIPATH_RT_ADDR_SHIFT 0 | 292 | #define INFINIPATH_RT_ADDR_SHIFT 0 |
292 | #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF | 293 | #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 772bc59fb85c..5c1da2d25e03 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include "ipath_verbs.h" | 34 | #include "ipath_verbs.h" |
35 | #include "ipath_common.h" | 35 | #include "ipath_kernel.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Convert the AETH RNR timeout code into the number of milliseconds. | 38 | * Convert the AETH RNR timeout code into the number of milliseconds. |
@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
106 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 106 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | ||
110 | { | ||
111 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
112 | int user = to_ipd(qp->ibqp.pd)->user; | ||
113 | int i, j, ret; | ||
114 | struct ib_wc wc; | ||
115 | |||
116 | qp->r_len = 0; | ||
117 | for (i = j = 0; i < wqe->num_sge; i++) { | ||
118 | if (wqe->sg_list[i].length == 0) | ||
119 | continue; | ||
120 | /* Check LKEY */ | ||
121 | if ((user && wqe->sg_list[i].lkey == 0) || | ||
122 | !ipath_lkey_ok(&dev->lk_table, | ||
123 | &qp->r_sg_list[j], &wqe->sg_list[i], | ||
124 | IB_ACCESS_LOCAL_WRITE)) | ||
125 | goto bad_lkey; | ||
126 | qp->r_len += wqe->sg_list[i].length; | ||
127 | j++; | ||
128 | } | ||
129 | qp->r_sge.sge = qp->r_sg_list[0]; | ||
130 | qp->r_sge.sg_list = qp->r_sg_list + 1; | ||
131 | qp->r_sge.num_sge = j; | ||
132 | ret = 1; | ||
133 | goto bail; | ||
134 | |||
135 | bad_lkey: | ||
136 | wc.wr_id = wqe->wr_id; | ||
137 | wc.status = IB_WC_LOC_PROT_ERR; | ||
138 | wc.opcode = IB_WC_RECV; | ||
139 | wc.vendor_err = 0; | ||
140 | wc.byte_len = 0; | ||
141 | wc.imm_data = 0; | ||
142 | wc.qp_num = qp->ibqp.qp_num; | ||
143 | wc.src_qp = 0; | ||
144 | wc.wc_flags = 0; | ||
145 | wc.pkey_index = 0; | ||
146 | wc.slid = 0; | ||
147 | wc.sl = 0; | ||
148 | wc.dlid_path_bits = 0; | ||
149 | wc.port_num = 0; | ||
150 | /* Signal solicited completion event. */ | ||
151 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
152 | ret = 0; | ||
153 | bail: | ||
154 | return ret; | ||
155 | } | ||
156 | |||
109 | /** | 157 | /** |
110 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE | 158 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE |
111 | * @qp: the QP | 159 | * @qp: the QP |
@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
119 | { | 167 | { |
120 | unsigned long flags; | 168 | unsigned long flags; |
121 | struct ipath_rq *rq; | 169 | struct ipath_rq *rq; |
170 | struct ipath_rwq *wq; | ||
122 | struct ipath_srq *srq; | 171 | struct ipath_srq *srq; |
123 | struct ipath_rwqe *wqe; | 172 | struct ipath_rwqe *wqe; |
124 | int ret = 1; | 173 | void (*handler)(struct ib_event *, void *); |
174 | u32 tail; | ||
175 | int ret; | ||
125 | 176 | ||
126 | if (!qp->ibqp.srq) { | 177 | if (qp->ibqp.srq) { |
178 | srq = to_isrq(qp->ibqp.srq); | ||
179 | handler = srq->ibsrq.event_handler; | ||
180 | rq = &srq->rq; | ||
181 | } else { | ||
182 | srq = NULL; | ||
183 | handler = NULL; | ||
127 | rq = &qp->r_rq; | 184 | rq = &qp->r_rq; |
128 | spin_lock_irqsave(&rq->lock, flags); | ||
129 | |||
130 | if (unlikely(rq->tail == rq->head)) { | ||
131 | ret = 0; | ||
132 | goto done; | ||
133 | } | ||
134 | wqe = get_rwqe_ptr(rq, rq->tail); | ||
135 | qp->r_wr_id = wqe->wr_id; | ||
136 | if (!wr_id_only) { | ||
137 | qp->r_sge.sge = wqe->sg_list[0]; | ||
138 | qp->r_sge.sg_list = wqe->sg_list + 1; | ||
139 | qp->r_sge.num_sge = wqe->num_sge; | ||
140 | qp->r_len = wqe->length; | ||
141 | } | ||
142 | if (++rq->tail >= rq->size) | ||
143 | rq->tail = 0; | ||
144 | goto done; | ||
145 | } | 185 | } |
146 | 186 | ||
147 | srq = to_isrq(qp->ibqp.srq); | ||
148 | rq = &srq->rq; | ||
149 | spin_lock_irqsave(&rq->lock, flags); | 187 | spin_lock_irqsave(&rq->lock, flags); |
150 | 188 | wq = rq->wq; | |
151 | if (unlikely(rq->tail == rq->head)) { | 189 | tail = wq->tail; |
152 | ret = 0; | 190 | /* Validate tail before using it since it is user writable. */ |
153 | goto done; | 191 | if (tail >= rq->size) |
154 | } | 192 | tail = 0; |
155 | wqe = get_rwqe_ptr(rq, rq->tail); | 193 | do { |
194 | if (unlikely(tail == wq->head)) { | ||
195 | spin_unlock_irqrestore(&rq->lock, flags); | ||
196 | ret = 0; | ||
197 | goto bail; | ||
198 | } | ||
199 | wqe = get_rwqe_ptr(rq, tail); | ||
200 | if (++tail >= rq->size) | ||
201 | tail = 0; | ||
202 | } while (!wr_id_only && !init_sge(qp, wqe)); | ||
156 | qp->r_wr_id = wqe->wr_id; | 203 | qp->r_wr_id = wqe->wr_id; |
157 | if (!wr_id_only) { | 204 | wq->tail = tail; |
158 | qp->r_sge.sge = wqe->sg_list[0]; | 205 | |
159 | qp->r_sge.sg_list = wqe->sg_list + 1; | 206 | ret = 1; |
160 | qp->r_sge.num_sge = wqe->num_sge; | 207 | if (handler) { |
161 | qp->r_len = wqe->length; | ||
162 | } | ||
163 | if (++rq->tail >= rq->size) | ||
164 | rq->tail = 0; | ||
165 | if (srq->ibsrq.event_handler) { | ||
166 | struct ib_event ev; | ||
167 | u32 n; | 208 | u32 n; |
168 | 209 | ||
169 | if (rq->head < rq->tail) | 210 | /* |
170 | n = rq->size + rq->head - rq->tail; | 211 | * validate head pointer value and compute |
212 | * the number of remaining WQEs. | ||
213 | */ | ||
214 | n = wq->head; | ||
215 | if (n >= rq->size) | ||
216 | n = 0; | ||
217 | if (n < tail) | ||
218 | n += rq->size - tail; | ||
171 | else | 219 | else |
172 | n = rq->head - rq->tail; | 220 | n -= tail; |
173 | if (n < srq->limit) { | 221 | if (n < srq->limit) { |
222 | struct ib_event ev; | ||
223 | |||
174 | srq->limit = 0; | 224 | srq->limit = 0; |
175 | spin_unlock_irqrestore(&rq->lock, flags); | 225 | spin_unlock_irqrestore(&rq->lock, flags); |
176 | ev.device = qp->ibqp.device; | 226 | ev.device = qp->ibqp.device; |
177 | ev.element.srq = qp->ibqp.srq; | 227 | ev.element.srq = qp->ibqp.srq; |
178 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | 228 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
179 | srq->ibsrq.event_handler(&ev, | 229 | handler(&ev, srq->ibsrq.srq_context); |
180 | srq->ibsrq.srq_context); | ||
181 | goto bail; | 230 | goto bail; |
182 | } | 231 | } |
183 | } | 232 | } |
184 | |||
185 | done: | ||
186 | spin_unlock_irqrestore(&rq->lock, flags); | 233 | spin_unlock_irqrestore(&rq->lock, flags); |
234 | |||
187 | bail: | 235 | bail: |
188 | return ret; | 236 | return ret; |
189 | } | 237 | } |
@@ -422,6 +470,15 @@ done: | |||
422 | wake_up(&qp->wait); | 470 | wake_up(&qp->wait); |
423 | } | 471 | } |
424 | 472 | ||
473 | static int want_buffer(struct ipath_devdata *dd) | ||
474 | { | ||
475 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | ||
476 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
477 | dd->ipath_sendctrl); | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
425 | /** | 482 | /** |
426 | * ipath_no_bufs_available - tell the layer driver we need buffers | 483 | * ipath_no_bufs_available - tell the layer driver we need buffers |
427 | * @qp: the QP that caused the problem | 484 | * @qp: the QP that caused the problem |
@@ -438,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
438 | list_add_tail(&qp->piowait, &dev->piowait); | 495 | list_add_tail(&qp->piowait, &dev->piowait); |
439 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 496 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
440 | /* | 497 | /* |
441 | * Note that as soon as ipath_layer_want_buffer() is called and | 498 | * Note that as soon as want_buffer() is called and |
442 | * possibly before it returns, ipath_ib_piobufavail() | 499 | * possibly before it returns, ipath_ib_piobufavail() |
443 | * could be called. If we are still in the tasklet function, | 500 | * could be called. If we are still in the tasklet function, |
444 | * tasklet_hi_schedule() will not call us until the next time | 501 | * tasklet_hi_schedule() will not call us until the next time |
@@ -448,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) | |||
448 | */ | 505 | */ |
449 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | 506 | clear_bit(IPATH_S_BUSY, &qp->s_flags); |
450 | tasklet_unlock(&qp->s_task); | 507 | tasklet_unlock(&qp->s_task); |
451 | ipath_layer_want_buffer(dev->dd); | 508 | want_buffer(dev->dd); |
452 | dev->n_piowait++; | 509 | dev->n_piowait++; |
453 | } | 510 | } |
454 | 511 | ||
@@ -563,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, | |||
563 | hdr->hop_limit = grh->hop_limit; | 620 | hdr->hop_limit = grh->hop_limit; |
564 | /* The SGID is 32-bit aligned. */ | 621 | /* The SGID is 32-bit aligned. */ |
565 | hdr->sgid.global.subnet_prefix = dev->gid_prefix; | 622 | hdr->sgid.global.subnet_prefix = dev->gid_prefix; |
566 | hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); | 623 | hdr->sgid.global.interface_id = dev->dd->ipath_guid; |
567 | hdr->dgid = grh->dgid; | 624 | hdr->dgid = grh->dgid; |
568 | 625 | ||
569 | /* GRH header size in 32-bit words. */ | 626 | /* GRH header size in 32-bit words. */ |
@@ -595,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data) | |||
595 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) | 652 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) |
596 | goto bail; | 653 | goto bail; |
597 | 654 | ||
598 | if (unlikely(qp->remote_ah_attr.dlid == | 655 | if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { |
599 | ipath_layer_get_lid(dev->dd))) { | ||
600 | ipath_ruc_loopback(qp); | 656 | ipath_ruc_loopback(qp); |
601 | goto clear; | 657 | goto clear; |
602 | } | 658 | } |
@@ -663,8 +719,8 @@ again: | |||
663 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 719 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
664 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + | 720 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + |
665 | SIZE_OF_CRC); | 721 | SIZE_OF_CRC); |
666 | qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); | 722 | qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); |
667 | bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | 723 | bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); |
668 | bth0 |= extra_bytes << 20; | 724 | bth0 |= extra_bytes << 20; |
669 | ohdr->bth[0] = cpu_to_be32(bth0); | 725 | ohdr->bth[0] = cpu_to_be32(bth0); |
670 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | 726 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index f760434660bd..941e866d9517 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
48 | struct ib_recv_wr **bad_wr) | 48 | struct ib_recv_wr **bad_wr) |
49 | { | 49 | { |
50 | struct ipath_srq *srq = to_isrq(ibsrq); | 50 | struct ipath_srq *srq = to_isrq(ibsrq); |
51 | struct ipath_ibdev *dev = to_idev(ibsrq->device); | 51 | struct ipath_rwq *wq; |
52 | unsigned long flags; | 52 | unsigned long flags; |
53 | int ret; | 53 | int ret; |
54 | 54 | ||
55 | for (; wr; wr = wr->next) { | 55 | for (; wr; wr = wr->next) { |
56 | struct ipath_rwqe *wqe; | 56 | struct ipath_rwqe *wqe; |
57 | u32 next; | 57 | u32 next; |
58 | int i, j; | 58 | int i; |
59 | 59 | ||
60 | if (wr->num_sge > srq->rq.max_sge) { | 60 | if ((unsigned) wr->num_sge > srq->rq.max_sge) { |
61 | *bad_wr = wr; | 61 | *bad_wr = wr; |
62 | ret = -ENOMEM; | 62 | ret = -ENOMEM; |
63 | goto bail; | 63 | goto bail; |
64 | } | 64 | } |
65 | 65 | ||
66 | spin_lock_irqsave(&srq->rq.lock, flags); | 66 | spin_lock_irqsave(&srq->rq.lock, flags); |
67 | next = srq->rq.head + 1; | 67 | wq = srq->rq.wq; |
68 | next = wq->head + 1; | ||
68 | if (next >= srq->rq.size) | 69 | if (next >= srq->rq.size) |
69 | next = 0; | 70 | next = 0; |
70 | if (next == srq->rq.tail) { | 71 | if (next == wq->tail) { |
71 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 72 | spin_unlock_irqrestore(&srq->rq.lock, flags); |
72 | *bad_wr = wr; | 73 | *bad_wr = wr; |
73 | ret = -ENOMEM; | 74 | ret = -ENOMEM; |
74 | goto bail; | 75 | goto bail; |
75 | } | 76 | } |
76 | 77 | ||
77 | wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); | 78 | wqe = get_rwqe_ptr(&srq->rq, wq->head); |
78 | wqe->wr_id = wr->wr_id; | 79 | wqe->wr_id = wr->wr_id; |
79 | wqe->sg_list[0].mr = NULL; | 80 | wqe->num_sge = wr->num_sge; |
80 | wqe->sg_list[0].vaddr = NULL; | 81 | for (i = 0; i < wr->num_sge; i++) |
81 | wqe->sg_list[0].length = 0; | 82 | wqe->sg_list[i] = wr->sg_list[i]; |
82 | wqe->sg_list[0].sge_length = 0; | 83 | wq->head = next; |
83 | wqe->length = 0; | ||
84 | for (i = 0, j = 0; i < wr->num_sge; i++) { | ||
85 | /* Check LKEY */ | ||
86 | if (to_ipd(srq->ibsrq.pd)->user && | ||
87 | wr->sg_list[i].lkey == 0) { | ||
88 | spin_unlock_irqrestore(&srq->rq.lock, | ||
89 | flags); | ||
90 | *bad_wr = wr; | ||
91 | ret = -EINVAL; | ||
92 | goto bail; | ||
93 | } | ||
94 | if (wr->sg_list[i].length == 0) | ||
95 | continue; | ||
96 | if (!ipath_lkey_ok(&dev->lk_table, | ||
97 | &wqe->sg_list[j], | ||
98 | &wr->sg_list[i], | ||
99 | IB_ACCESS_LOCAL_WRITE)) { | ||
100 | spin_unlock_irqrestore(&srq->rq.lock, | ||
101 | flags); | ||
102 | *bad_wr = wr; | ||
103 | ret = -EINVAL; | ||
104 | goto bail; | ||
105 | } | ||
106 | wqe->length += wr->sg_list[i].length; | ||
107 | j++; | ||
108 | } | ||
109 | wqe->num_sge = j; | ||
110 | srq->rq.head = next; | ||
111 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 84 | spin_unlock_irqrestore(&srq->rq.lock, flags); |
112 | } | 85 | } |
113 | ret = 0; | 86 | ret = 0; |
@@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
133 | 106 | ||
134 | if (dev->n_srqs_allocated == ib_ipath_max_srqs) { | 107 | if (dev->n_srqs_allocated == ib_ipath_max_srqs) { |
135 | ret = ERR_PTR(-ENOMEM); | 108 | ret = ERR_PTR(-ENOMEM); |
136 | goto bail; | 109 | goto done; |
137 | } | 110 | } |
138 | 111 | ||
139 | if (srq_init_attr->attr.max_wr == 0) { | 112 | if (srq_init_attr->attr.max_wr == 0) { |
140 | ret = ERR_PTR(-EINVAL); | 113 | ret = ERR_PTR(-EINVAL); |
141 | goto bail; | 114 | goto done; |
142 | } | 115 | } |
143 | 116 | ||
144 | if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || | 117 | if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || |
145 | (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { | 118 | (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { |
146 | ret = ERR_PTR(-EINVAL); | 119 | ret = ERR_PTR(-EINVAL); |
147 | goto bail; | 120 | goto done; |
148 | } | 121 | } |
149 | 122 | ||
150 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | 123 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); |
151 | if (!srq) { | 124 | if (!srq) { |
152 | ret = ERR_PTR(-ENOMEM); | 125 | ret = ERR_PTR(-ENOMEM); |
153 | goto bail; | 126 | goto done; |
154 | } | 127 | } |
155 | 128 | ||
156 | /* | 129 | /* |
157 | * Need to use vmalloc() if we want to support large #s of entries. | 130 | * Need to use vmalloc() if we want to support large #s of entries. |
158 | */ | 131 | */ |
159 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | 132 | srq->rq.size = srq_init_attr->attr.max_wr + 1; |
160 | sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge + | 133 | srq->rq.max_sge = srq_init_attr->attr.max_sge; |
134 | sz = sizeof(struct ib_sge) * srq->rq.max_sge + | ||
161 | sizeof(struct ipath_rwqe); | 135 | sizeof(struct ipath_rwqe); |
162 | srq->rq.wq = vmalloc(srq->rq.size * sz); | 136 | srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); |
163 | if (!srq->rq.wq) { | 137 | if (!srq->rq.wq) { |
164 | kfree(srq); | ||
165 | ret = ERR_PTR(-ENOMEM); | 138 | ret = ERR_PTR(-ENOMEM); |
166 | goto bail; | 139 | goto bail_srq; |
167 | } | 140 | } |
168 | 141 | ||
169 | /* | 142 | /* |
143 | * Return the address of the RWQ as the offset to mmap. | ||
144 | * See ipath_mmap() for details. | ||
145 | */ | ||
146 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
147 | struct ipath_mmap_info *ip; | ||
148 | __u64 offset = (__u64) srq->rq.wq; | ||
149 | int err; | ||
150 | |||
151 | err = ib_copy_to_udata(udata, &offset, sizeof(offset)); | ||
152 | if (err) { | ||
153 | ret = ERR_PTR(err); | ||
154 | goto bail_wq; | ||
155 | } | ||
156 | |||
157 | /* Allocate info for ipath_mmap(). */ | ||
158 | ip = kmalloc(sizeof(*ip), GFP_KERNEL); | ||
159 | if (!ip) { | ||
160 | ret = ERR_PTR(-ENOMEM); | ||
161 | goto bail_wq; | ||
162 | } | ||
163 | srq->ip = ip; | ||
164 | ip->context = ibpd->uobject->context; | ||
165 | ip->obj = srq->rq.wq; | ||
166 | kref_init(&ip->ref); | ||
167 | ip->mmap_cnt = 0; | ||
168 | ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + | ||
169 | srq->rq.size * sz); | ||
170 | spin_lock_irq(&dev->pending_lock); | ||
171 | ip->next = dev->pending_mmaps; | ||
172 | dev->pending_mmaps = ip; | ||
173 | spin_unlock_irq(&dev->pending_lock); | ||
174 | } else | ||
175 | srq->ip = NULL; | ||
176 | |||
177 | /* | ||
170 | * ib_create_srq() will initialize srq->ibsrq. | 178 | * ib_create_srq() will initialize srq->ibsrq. |
171 | */ | 179 | */ |
172 | spin_lock_init(&srq->rq.lock); | 180 | spin_lock_init(&srq->rq.lock); |
173 | srq->rq.head = 0; | 181 | srq->rq.wq->head = 0; |
174 | srq->rq.tail = 0; | 182 | srq->rq.wq->tail = 0; |
175 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | 183 | srq->rq.max_sge = srq_init_attr->attr.max_sge; |
176 | srq->limit = srq_init_attr->attr.srq_limit; | 184 | srq->limit = srq_init_attr->attr.srq_limit; |
177 | 185 | ||
186 | dev->n_srqs_allocated++; | ||
187 | |||
178 | ret = &srq->ibsrq; | 188 | ret = &srq->ibsrq; |
189 | goto done; | ||
179 | 190 | ||
180 | dev->n_srqs_allocated++; | 191 | bail_wq: |
192 | vfree(srq->rq.wq); | ||
181 | 193 | ||
182 | bail: | 194 | bail_srq: |
195 | kfree(srq); | ||
196 | |||
197 | done: | ||
183 | return ret; | 198 | return ret; |
184 | } | 199 | } |
185 | 200 | ||
@@ -188,83 +203,130 @@ bail: | |||
188 | * @ibsrq: the SRQ to modify | 203 | * @ibsrq: the SRQ to modify |
189 | * @attr: the new attributes of the SRQ | 204 | * @attr: the new attributes of the SRQ |
190 | * @attr_mask: indicates which attributes to modify | 205 | * @attr_mask: indicates which attributes to modify |
206 | * @udata: user data for ipathverbs.so | ||
191 | */ | 207 | */ |
192 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 208 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
193 | enum ib_srq_attr_mask attr_mask) | 209 | enum ib_srq_attr_mask attr_mask, |
210 | struct ib_udata *udata) | ||
194 | { | 211 | { |
195 | struct ipath_srq *srq = to_isrq(ibsrq); | 212 | struct ipath_srq *srq = to_isrq(ibsrq); |
196 | unsigned long flags; | 213 | int ret = 0; |
197 | int ret; | ||
198 | 214 | ||
199 | if (attr_mask & IB_SRQ_MAX_WR) | 215 | if (attr_mask & IB_SRQ_MAX_WR) { |
200 | if ((attr->max_wr > ib_ipath_max_srq_wrs) || | 216 | struct ipath_rwq *owq; |
201 | (attr->max_sge > srq->rq.max_sge)) { | 217 | struct ipath_rwq *wq; |
202 | ret = -EINVAL; | 218 | struct ipath_rwqe *p; |
203 | goto bail; | 219 | u32 sz, size, n, head, tail; |
204 | } | ||
205 | 220 | ||
206 | if (attr_mask & IB_SRQ_LIMIT) | 221 | /* Check that the requested sizes are below the limits. */ |
207 | if (attr->srq_limit >= srq->rq.size) { | 222 | if ((attr->max_wr > ib_ipath_max_srq_wrs) || |
223 | ((attr_mask & IB_SRQ_LIMIT) ? | ||
224 | attr->srq_limit : srq->limit) > attr->max_wr) { | ||
208 | ret = -EINVAL; | 225 | ret = -EINVAL; |
209 | goto bail; | 226 | goto bail; |
210 | } | 227 | } |
211 | 228 | ||
212 | if (attr_mask & IB_SRQ_MAX_WR) { | ||
213 | struct ipath_rwqe *wq, *p; | ||
214 | u32 sz, size, n; | ||
215 | |||
216 | sz = sizeof(struct ipath_rwqe) + | 229 | sz = sizeof(struct ipath_rwqe) + |
217 | attr->max_sge * sizeof(struct ipath_sge); | 230 | srq->rq.max_sge * sizeof(struct ib_sge); |
218 | size = attr->max_wr + 1; | 231 | size = attr->max_wr + 1; |
219 | wq = vmalloc(size * sz); | 232 | wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz); |
220 | if (!wq) { | 233 | if (!wq) { |
221 | ret = -ENOMEM; | 234 | ret = -ENOMEM; |
222 | goto bail; | 235 | goto bail; |
223 | } | 236 | } |
224 | 237 | ||
225 | spin_lock_irqsave(&srq->rq.lock, flags); | 238 | /* |
226 | if (srq->rq.head < srq->rq.tail) | 239 | * Return the address of the RWQ as the offset to mmap. |
227 | n = srq->rq.size + srq->rq.head - srq->rq.tail; | 240 | * See ipath_mmap() for details. |
241 | */ | ||
242 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
243 | __u64 offset_addr; | ||
244 | __u64 offset = (__u64) wq; | ||
245 | |||
246 | ret = ib_copy_from_udata(&offset_addr, udata, | ||
247 | sizeof(offset_addr)); | ||
248 | if (ret) { | ||
249 | vfree(wq); | ||
250 | goto bail; | ||
251 | } | ||
252 | udata->outbuf = (void __user *) offset_addr; | ||
253 | ret = ib_copy_to_udata(udata, &offset, | ||
254 | sizeof(offset)); | ||
255 | if (ret) { | ||
256 | vfree(wq); | ||
257 | goto bail; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | spin_lock_irq(&srq->rq.lock); | ||
262 | /* | ||
263 | * validate head pointer value and compute | ||
264 | * the number of remaining WQEs. | ||
265 | */ | ||
266 | owq = srq->rq.wq; | ||
267 | head = owq->head; | ||
268 | if (head >= srq->rq.size) | ||
269 | head = 0; | ||
270 | tail = owq->tail; | ||
271 | if (tail >= srq->rq.size) | ||
272 | tail = 0; | ||
273 | n = head; | ||
274 | if (n < tail) | ||
275 | n += srq->rq.size - tail; | ||
228 | else | 276 | else |
229 | n = srq->rq.head - srq->rq.tail; | 277 | n -= tail; |
230 | if (size <= n || size <= srq->limit) { | 278 | if (size <= n) { |
231 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 279 | spin_unlock_irq(&srq->rq.lock); |
232 | vfree(wq); | 280 | vfree(wq); |
233 | ret = -EINVAL; | 281 | ret = -EINVAL; |
234 | goto bail; | 282 | goto bail; |
235 | } | 283 | } |
236 | n = 0; | 284 | n = 0; |
237 | p = wq; | 285 | p = wq->wq; |
238 | while (srq->rq.tail != srq->rq.head) { | 286 | while (tail != head) { |
239 | struct ipath_rwqe *wqe; | 287 | struct ipath_rwqe *wqe; |
240 | int i; | 288 | int i; |
241 | 289 | ||
242 | wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail); | 290 | wqe = get_rwqe_ptr(&srq->rq, tail); |
243 | p->wr_id = wqe->wr_id; | 291 | p->wr_id = wqe->wr_id; |
244 | p->length = wqe->length; | ||
245 | p->num_sge = wqe->num_sge; | 292 | p->num_sge = wqe->num_sge; |
246 | for (i = 0; i < wqe->num_sge; i++) | 293 | for (i = 0; i < wqe->num_sge; i++) |
247 | p->sg_list[i] = wqe->sg_list[i]; | 294 | p->sg_list[i] = wqe->sg_list[i]; |
248 | n++; | 295 | n++; |
249 | p = (struct ipath_rwqe *)((char *) p + sz); | 296 | p = (struct ipath_rwqe *)((char *) p + sz); |
250 | if (++srq->rq.tail >= srq->rq.size) | 297 | if (++tail >= srq->rq.size) |
251 | srq->rq.tail = 0; | 298 | tail = 0; |
252 | } | 299 | } |
253 | vfree(srq->rq.wq); | ||
254 | srq->rq.wq = wq; | 300 | srq->rq.wq = wq; |
255 | srq->rq.size = size; | 301 | srq->rq.size = size; |
256 | srq->rq.head = n; | 302 | wq->head = n; |
257 | srq->rq.tail = 0; | 303 | wq->tail = 0; |
258 | srq->rq.max_sge = attr->max_sge; | 304 | if (attr_mask & IB_SRQ_LIMIT) |
259 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 305 | srq->limit = attr->srq_limit; |
260 | } | 306 | spin_unlock_irq(&srq->rq.lock); |
261 | 307 | ||
262 | if (attr_mask & IB_SRQ_LIMIT) { | 308 | vfree(owq); |
263 | spin_lock_irqsave(&srq->rq.lock, flags); | 309 | |
264 | srq->limit = attr->srq_limit; | 310 | if (srq->ip) { |
265 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 311 | struct ipath_mmap_info *ip = srq->ip; |
312 | struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); | ||
313 | |||
314 | ip->obj = wq; | ||
315 | ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + | ||
316 | size * sz); | ||
317 | spin_lock_irq(&dev->pending_lock); | ||
318 | ip->next = dev->pending_mmaps; | ||
319 | dev->pending_mmaps = ip; | ||
320 | spin_unlock_irq(&dev->pending_lock); | ||
321 | } | ||
322 | } else if (attr_mask & IB_SRQ_LIMIT) { | ||
323 | spin_lock_irq(&srq->rq.lock); | ||
324 | if (attr->srq_limit >= srq->rq.size) | ||
325 | ret = -EINVAL; | ||
326 | else | ||
327 | srq->limit = attr->srq_limit; | ||
328 | spin_unlock_irq(&srq->rq.lock); | ||
266 | } | 329 | } |
267 | ret = 0; | ||
268 | 330 | ||
269 | bail: | 331 | bail: |
270 | return ret; | 332 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index 70351b7e35c0..30a825928fcf 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c | |||
@@ -271,33 +271,6 @@ void ipath_get_faststats(unsigned long opaque) | |||
271 | } | 271 | } |
272 | } | 272 | } |
273 | 273 | ||
274 | if (dd->ipath_nosma_bufs) { | ||
275 | dd->ipath_nosma_secs += 5; | ||
276 | if (dd->ipath_nosma_secs >= 30) { | ||
277 | ipath_cdbg(SMA, "No SMA bufs avail %u seconds; " | ||
278 | "cancelling pending sends\n", | ||
279 | dd->ipath_nosma_secs); | ||
280 | /* | ||
281 | * issue an abort as well, in case we have a packet | ||
282 | * stuck in launch fifo. This could corrupt an | ||
283 | * outgoing user packet in the worst case, | ||
284 | * but this is a pretty catastrophic, anyway. | ||
285 | */ | ||
286 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
287 | INFINIPATH_S_ABORT); | ||
288 | ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf, | ||
289 | dd->ipath_piobcnt2k + | ||
290 | dd->ipath_piobcnt4k - | ||
291 | dd->ipath_lastport_piobuf); | ||
292 | /* start again, if necessary */ | ||
293 | dd->ipath_nosma_secs = 0; | ||
294 | } else | ||
295 | ipath_cdbg(SMA, "No SMA bufs avail %u tries, " | ||
296 | "after %u seconds\n", | ||
297 | dd->ipath_nosma_bufs, | ||
298 | dd->ipath_nosma_secs); | ||
299 | } | ||
300 | |||
301 | done: | 274 | done: |
302 | mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5); | 275 | mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5); |
303 | } | 276 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index b98821d7801d..e299148c4b68 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | 36 | ||
37 | #include "ipath_kernel.h" | 37 | #include "ipath_kernel.h" |
38 | #include "ipath_layer.h" | ||
39 | #include "ipath_common.h" | 38 | #include "ipath_common.h" |
40 | 39 | ||
41 | /** | 40 | /** |
@@ -76,7 +75,7 @@ bail: | |||
76 | static ssize_t show_version(struct device_driver *dev, char *buf) | 75 | static ssize_t show_version(struct device_driver *dev, char *buf) |
77 | { | 76 | { |
78 | /* The string printed here is already newline-terminated. */ | 77 | /* The string printed here is already newline-terminated. */ |
79 | return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version); | 78 | return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version); |
80 | } | 79 | } |
81 | 80 | ||
82 | static ssize_t show_num_units(struct device_driver *dev, char *buf) | 81 | static ssize_t show_num_units(struct device_driver *dev, char *buf) |
@@ -108,8 +107,8 @@ static const char *ipath_status_str[] = { | |||
108 | "Initted", | 107 | "Initted", |
109 | "Disabled", | 108 | "Disabled", |
110 | "Admin_Disabled", | 109 | "Admin_Disabled", |
111 | "OIB_SMA", | 110 | "", /* This used to be the old "OIB_SMA" status. */ |
112 | "SMA", | 111 | "", /* This used to be the old "SMA" status. */ |
113 | "Present", | 112 | "Present", |
114 | "IB_link_up", | 113 | "IB_link_up", |
115 | "IB_configured", | 114 | "IB_configured", |
@@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device *dev, | |||
227 | unit = dd->ipath_unit; | 226 | unit = dd->ipath_unit; |
228 | 227 | ||
229 | dd->ipath_mlid = mlid; | 228 | dd->ipath_mlid = mlid; |
230 | ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST); | ||
231 | 229 | ||
232 | goto bail; | 230 | goto bail; |
233 | invalid: | 231 | invalid: |
@@ -467,7 +465,7 @@ static ssize_t store_link_state(struct device *dev, | |||
467 | if (ret < 0) | 465 | if (ret < 0) |
468 | goto invalid; | 466 | goto invalid; |
469 | 467 | ||
470 | r = ipath_layer_set_linkstate(dd, state); | 468 | r = ipath_set_linkstate(dd, state); |
471 | if (r < 0) { | 469 | if (r < 0) { |
472 | ret = r; | 470 | ret = r; |
473 | goto bail; | 471 | goto bail; |
@@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *dev, | |||
502 | if (ret < 0) | 500 | if (ret < 0) |
503 | goto invalid; | 501 | goto invalid; |
504 | 502 | ||
505 | r = ipath_layer_set_mtu(dd, mtu); | 503 | r = ipath_set_mtu(dd, mtu); |
506 | if (r < 0) | 504 | if (r < 0) |
507 | ret = r; | 505 | ret = r; |
508 | 506 | ||
@@ -563,6 +561,33 @@ bail: | |||
563 | return ret; | 561 | return ret; |
564 | } | 562 | } |
565 | 563 | ||
564 | static ssize_t store_rx_pol_inv(struct device *dev, | ||
565 | struct device_attribute *attr, | ||
566 | const char *buf, | ||
567 | size_t count) | ||
568 | { | ||
569 | struct ipath_devdata *dd = dev_get_drvdata(dev); | ||
570 | int ret, r; | ||
571 | u16 val; | ||
572 | |||
573 | ret = ipath_parse_ushort(buf, &val); | ||
574 | if (ret < 0) | ||
575 | goto invalid; | ||
576 | |||
577 | r = ipath_set_rx_pol_inv(dd, val); | ||
578 | if (r < 0) { | ||
579 | ret = r; | ||
580 | goto bail; | ||
581 | } | ||
582 | |||
583 | goto bail; | ||
584 | invalid: | ||
585 | ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n"); | ||
586 | bail: | ||
587 | return ret; | ||
588 | } | ||
589 | |||
590 | |||
566 | static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); | 591 | static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); |
567 | static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); | 592 | static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); |
568 | 593 | ||
@@ -589,6 +614,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | |||
589 | static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL); | 614 | static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL); |
590 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); | 615 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); |
591 | static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); | 616 | static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); |
617 | static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); | ||
592 | 618 | ||
593 | static struct attribute *dev_attributes[] = { | 619 | static struct attribute *dev_attributes[] = { |
594 | &dev_attr_guid.attr, | 620 | &dev_attr_guid.attr, |
@@ -603,6 +629,7 @@ static struct attribute *dev_attributes[] = { | |||
603 | &dev_attr_boardversion.attr, | 629 | &dev_attr_boardversion.attr, |
604 | &dev_attr_unit.attr, | 630 | &dev_attr_unit.attr, |
605 | &dev_attr_enabled.attr, | 631 | &dev_attr_enabled.attr, |
632 | &dev_attr_rx_pol_inv.attr, | ||
606 | NULL | 633 | NULL |
607 | }; | 634 | }; |
608 | 635 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index c33abea2d5a7..0fd3cded16ba 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include "ipath_verbs.h" | 34 | #include "ipath_verbs.h" |
35 | #include "ipath_common.h" | 35 | #include "ipath_kernel.h" |
36 | 36 | ||
37 | /* cut down ridiculously long IB macro names */ | 37 | /* cut down ridiculously long IB macro names */ |
38 | #define OP(x) IB_OPCODE_UC_##x | 38 | #define OP(x) IB_OPCODE_UC_##x |
@@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
261 | * size to 56 bytes so the last 4 bytes of | 261 | * size to 56 bytes so the last 4 bytes of |
262 | * the BTH header (PSN) is in the data buffer. | 262 | * the BTH header (PSN) is in the data buffer. |
263 | */ | 263 | */ |
264 | header_in_data = | 264 | header_in_data = dev->dd->ipath_rcvhdrentsize == 16; |
265 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | ||
266 | if (header_in_data) { | 265 | if (header_in_data) { |
267 | psn = be32_to_cpu(((__be32 *) data)[0]); | 266 | psn = be32_to_cpu(((__be32 *) data)[0]); |
268 | data += sizeof(__be32); | 267 | data += sizeof(__be32); |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 3466129af804..6991d1d74e3c 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -34,7 +34,54 @@ | |||
34 | #include <rdma/ib_smi.h> | 34 | #include <rdma/ib_smi.h> |
35 | 35 | ||
36 | #include "ipath_verbs.h" | 36 | #include "ipath_verbs.h" |
37 | #include "ipath_common.h" | 37 | #include "ipath_kernel.h" |
38 | |||
39 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, | ||
40 | u32 *lengthp, struct ipath_sge_state *ss) | ||
41 | { | ||
42 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
43 | int user = to_ipd(qp->ibqp.pd)->user; | ||
44 | int i, j, ret; | ||
45 | struct ib_wc wc; | ||
46 | |||
47 | *lengthp = 0; | ||
48 | for (i = j = 0; i < wqe->num_sge; i++) { | ||
49 | if (wqe->sg_list[i].length == 0) | ||
50 | continue; | ||
51 | /* Check LKEY */ | ||
52 | if ((user && wqe->sg_list[i].lkey == 0) || | ||
53 | !ipath_lkey_ok(&dev->lk_table, | ||
54 | j ? &ss->sg_list[j - 1] : &ss->sge, | ||
55 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) | ||
56 | goto bad_lkey; | ||
57 | *lengthp += wqe->sg_list[i].length; | ||
58 | j++; | ||
59 | } | ||
60 | ss->num_sge = j; | ||
61 | ret = 1; | ||
62 | goto bail; | ||
63 | |||
64 | bad_lkey: | ||
65 | wc.wr_id = wqe->wr_id; | ||
66 | wc.status = IB_WC_LOC_PROT_ERR; | ||
67 | wc.opcode = IB_WC_RECV; | ||
68 | wc.vendor_err = 0; | ||
69 | wc.byte_len = 0; | ||
70 | wc.imm_data = 0; | ||
71 | wc.qp_num = qp->ibqp.qp_num; | ||
72 | wc.src_qp = 0; | ||
73 | wc.wc_flags = 0; | ||
74 | wc.pkey_index = 0; | ||
75 | wc.slid = 0; | ||
76 | wc.sl = 0; | ||
77 | wc.dlid_path_bits = 0; | ||
78 | wc.port_num = 0; | ||
79 | /* Signal solicited completion event. */ | ||
80 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
81 | ret = 0; | ||
82 | bail: | ||
83 | return ret; | ||
84 | } | ||
38 | 85 | ||
39 | /** | 86 | /** |
40 | * ipath_ud_loopback - handle send on loopback QPs | 87 | * ipath_ud_loopback - handle send on loopback QPs |
@@ -46,6 +93,8 @@ | |||
46 | * | 93 | * |
47 | * This is called from ipath_post_ud_send() to forward a WQE addressed | 94 | * This is called from ipath_post_ud_send() to forward a WQE addressed |
48 | * to the same HCA. | 95 | * to the same HCA. |
96 | * Note that the receive interrupt handler may be calling ipath_ud_rcv() | ||
97 | * while this is being called. | ||
49 | */ | 98 | */ |
50 | static void ipath_ud_loopback(struct ipath_qp *sqp, | 99 | static void ipath_ud_loopback(struct ipath_qp *sqp, |
51 | struct ipath_sge_state *ss, | 100 | struct ipath_sge_state *ss, |
@@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
60 | struct ipath_srq *srq; | 109 | struct ipath_srq *srq; |
61 | struct ipath_sge_state rsge; | 110 | struct ipath_sge_state rsge; |
62 | struct ipath_sge *sge; | 111 | struct ipath_sge *sge; |
112 | struct ipath_rwq *wq; | ||
63 | struct ipath_rwqe *wqe; | 113 | struct ipath_rwqe *wqe; |
114 | void (*handler)(struct ib_event *, void *); | ||
115 | u32 tail; | ||
116 | u32 rlen; | ||
64 | 117 | ||
65 | qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); | 118 | qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); |
66 | if (!qp) | 119 | if (!qp) |
@@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
94 | wc->imm_data = 0; | 147 | wc->imm_data = 0; |
95 | } | 148 | } |
96 | 149 | ||
150 | if (wr->num_sge > 1) { | ||
151 | rsge.sg_list = kmalloc((wr->num_sge - 1) * | ||
152 | sizeof(struct ipath_sge), | ||
153 | GFP_ATOMIC); | ||
154 | } else | ||
155 | rsge.sg_list = NULL; | ||
156 | |||
97 | /* | 157 | /* |
98 | * Get the next work request entry to find where to put the data. | 158 | * Get the next work request entry to find where to put the data. |
99 | * Note that it is safe to drop the lock after changing rq->tail | 159 | * Note that it is safe to drop the lock after changing rq->tail |
@@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
101 | */ | 161 | */ |
102 | if (qp->ibqp.srq) { | 162 | if (qp->ibqp.srq) { |
103 | srq = to_isrq(qp->ibqp.srq); | 163 | srq = to_isrq(qp->ibqp.srq); |
164 | handler = srq->ibsrq.event_handler; | ||
104 | rq = &srq->rq; | 165 | rq = &srq->rq; |
105 | } else { | 166 | } else { |
106 | srq = NULL; | 167 | srq = NULL; |
168 | handler = NULL; | ||
107 | rq = &qp->r_rq; | 169 | rq = &qp->r_rq; |
108 | } | 170 | } |
171 | |||
109 | spin_lock_irqsave(&rq->lock, flags); | 172 | spin_lock_irqsave(&rq->lock, flags); |
110 | if (rq->tail == rq->head) { | 173 | wq = rq->wq; |
111 | spin_unlock_irqrestore(&rq->lock, flags); | 174 | tail = wq->tail; |
112 | dev->n_pkt_drops++; | 175 | while (1) { |
113 | goto done; | 176 | if (unlikely(tail == wq->head)) { |
177 | spin_unlock_irqrestore(&rq->lock, flags); | ||
178 | dev->n_pkt_drops++; | ||
179 | goto bail_sge; | ||
180 | } | ||
181 | wqe = get_rwqe_ptr(rq, tail); | ||
182 | if (++tail >= rq->size) | ||
183 | tail = 0; | ||
184 | if (init_sge(qp, wqe, &rlen, &rsge)) | ||
185 | break; | ||
186 | wq->tail = tail; | ||
114 | } | 187 | } |
115 | /* Silently drop packets which are too big. */ | 188 | /* Silently drop packets which are too big. */ |
116 | wqe = get_rwqe_ptr(rq, rq->tail); | 189 | if (wc->byte_len > rlen) { |
117 | if (wc->byte_len > wqe->length) { | ||
118 | spin_unlock_irqrestore(&rq->lock, flags); | 190 | spin_unlock_irqrestore(&rq->lock, flags); |
119 | dev->n_pkt_drops++; | 191 | dev->n_pkt_drops++; |
120 | goto done; | 192 | goto bail_sge; |
121 | } | 193 | } |
194 | wq->tail = tail; | ||
122 | wc->wr_id = wqe->wr_id; | 195 | wc->wr_id = wqe->wr_id; |
123 | rsge.sge = wqe->sg_list[0]; | 196 | if (handler) { |
124 | rsge.sg_list = wqe->sg_list + 1; | ||
125 | rsge.num_sge = wqe->num_sge; | ||
126 | if (++rq->tail >= rq->size) | ||
127 | rq->tail = 0; | ||
128 | if (srq && srq->ibsrq.event_handler) { | ||
129 | u32 n; | 197 | u32 n; |
130 | 198 | ||
131 | if (rq->head < rq->tail) | 199 | /* |
132 | n = rq->size + rq->head - rq->tail; | 200 | * validate head pointer value and compute |
201 | * the number of remaining WQEs. | ||
202 | */ | ||
203 | n = wq->head; | ||
204 | if (n >= rq->size) | ||
205 | n = 0; | ||
206 | if (n < tail) | ||
207 | n += rq->size - tail; | ||
133 | else | 208 | else |
134 | n = rq->head - rq->tail; | 209 | n -= tail; |
135 | if (n < srq->limit) { | 210 | if (n < srq->limit) { |
136 | struct ib_event ev; | 211 | struct ib_event ev; |
137 | 212 | ||
@@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
140 | ev.device = qp->ibqp.device; | 215 | ev.device = qp->ibqp.device; |
141 | ev.element.srq = qp->ibqp.srq; | 216 | ev.element.srq = qp->ibqp.srq; |
142 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | 217 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
143 | srq->ibsrq.event_handler(&ev, | 218 | handler(&ev, srq->ibsrq.srq_context); |
144 | srq->ibsrq.srq_context); | ||
145 | } else | 219 | } else |
146 | spin_unlock_irqrestore(&rq->lock, flags); | 220 | spin_unlock_irqrestore(&rq->lock, flags); |
147 | } else | 221 | } else |
148 | spin_unlock_irqrestore(&rq->lock, flags); | 222 | spin_unlock_irqrestore(&rq->lock, flags); |
223 | |||
149 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; | 224 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; |
150 | if (ah_attr->ah_flags & IB_AH_GRH) { | 225 | if (ah_attr->ah_flags & IB_AH_GRH) { |
151 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); | 226 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); |
@@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
186 | wc->src_qp = sqp->ibqp.qp_num; | 261 | wc->src_qp = sqp->ibqp.qp_num; |
187 | /* XXX do we know which pkey matched? Only needed for GSI. */ | 262 | /* XXX do we know which pkey matched? Only needed for GSI. */ |
188 | wc->pkey_index = 0; | 263 | wc->pkey_index = 0; |
189 | wc->slid = ipath_layer_get_lid(dev->dd) | | 264 | wc->slid = dev->dd->ipath_lid | |
190 | (ah_attr->src_path_bits & | 265 | (ah_attr->src_path_bits & |
191 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); | 266 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); |
192 | wc->sl = ah_attr->sl; | 267 | wc->sl = ah_attr->sl; |
@@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
196 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, | 271 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, |
197 | wr->send_flags & IB_SEND_SOLICITED); | 272 | wr->send_flags & IB_SEND_SOLICITED); |
198 | 273 | ||
274 | bail_sge: | ||
275 | kfree(rsge.sg_list); | ||
199 | done: | 276 | done: |
200 | if (atomic_dec_and_test(&qp->refcount)) | 277 | if (atomic_dec_and_test(&qp->refcount)) |
201 | wake_up(&qp->wait); | 278 | wake_up(&qp->wait); |
@@ -276,7 +353,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
276 | ss.num_sge++; | 353 | ss.num_sge++; |
277 | } | 354 | } |
278 | /* Check for invalid packet size. */ | 355 | /* Check for invalid packet size. */ |
279 | if (len > ipath_layer_get_ibmtu(dev->dd)) { | 356 | if (len > dev->dd->ipath_ibmtu) { |
280 | ret = -EINVAL; | 357 | ret = -EINVAL; |
281 | goto bail; | 358 | goto bail; |
282 | } | 359 | } |
@@ -298,7 +375,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
298 | dev->n_unicast_xmit++; | 375 | dev->n_unicast_xmit++; |
299 | lid = ah_attr->dlid & | 376 | lid = ah_attr->dlid & |
300 | ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | 377 | ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); |
301 | if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { | 378 | if (unlikely(lid == dev->dd->ipath_lid)) { |
302 | /* | 379 | /* |
303 | * Pass in an uninitialized ib_wc to save stack | 380 | * Pass in an uninitialized ib_wc to save stack |
304 | * space. | 381 | * space. |
@@ -327,7 +404,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
327 | qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = | 404 | qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = |
328 | dev->gid_prefix; | 405 | dev->gid_prefix; |
329 | qp->s_hdr.u.l.grh.sgid.global.interface_id = | 406 | qp->s_hdr.u.l.grh.sgid.global.interface_id = |
330 | ipath_layer_get_guid(dev->dd); | 407 | dev->dd->ipath_guid; |
331 | qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; | 408 | qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; |
332 | /* | 409 | /* |
333 | * Don't worry about sending to locally attached multicast | 410 | * Don't worry about sending to locally attached multicast |
@@ -357,7 +434,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
357 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | 434 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); |
358 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ | 435 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ |
359 | qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); | 436 | qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); |
360 | lid = ipath_layer_get_lid(dev->dd); | 437 | lid = dev->dd->ipath_lid; |
361 | if (lid) { | 438 | if (lid) { |
362 | lid |= ah_attr->src_path_bits & | 439 | lid |= ah_attr->src_path_bits & |
363 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | 440 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); |
@@ -368,7 +445,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
368 | bth0 |= 1 << 23; | 445 | bth0 |= 1 << 23; |
369 | bth0 |= extra_bytes << 20; | 446 | bth0 |= extra_bytes << 20; |
370 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : | 447 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : |
371 | ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | 448 | ipath_get_pkey(dev->dd, qp->s_pkey_index); |
372 | ohdr->bth[0] = cpu_to_be32(bth0); | 449 | ohdr->bth[0] = cpu_to_be32(bth0); |
373 | /* | 450 | /* |
374 | * Use the multicast QP if the destination LID is a multicast LID. | 451 | * Use the multicast QP if the destination LID is a multicast LID. |
@@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
433 | int opcode; | 510 | int opcode; |
434 | u32 hdrsize; | 511 | u32 hdrsize; |
435 | u32 pad; | 512 | u32 pad; |
436 | unsigned long flags; | ||
437 | struct ib_wc wc; | 513 | struct ib_wc wc; |
438 | u32 qkey; | 514 | u32 qkey; |
439 | u32 src_qp; | 515 | u32 src_qp; |
440 | struct ipath_rq *rq; | ||
441 | struct ipath_srq *srq; | ||
442 | struct ipath_rwqe *wqe; | ||
443 | u16 dlid; | 516 | u16 dlid; |
444 | int header_in_data; | 517 | int header_in_data; |
445 | 518 | ||
@@ -458,8 +531,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
458 | * the eager header buffer size to 56 bytes so the last 12 | 531 | * the eager header buffer size to 56 bytes so the last 12 |
459 | * bytes of the IB header is in the data buffer. | 532 | * bytes of the IB header is in the data buffer. |
460 | */ | 533 | */ |
461 | header_in_data = | 534 | header_in_data = dev->dd->ipath_rcvhdrentsize == 16; |
462 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | ||
463 | if (header_in_data) { | 535 | if (header_in_data) { |
464 | qkey = be32_to_cpu(((__be32 *) data)[1]); | 536 | qkey = be32_to_cpu(((__be32 *) data)[1]); |
465 | src_qp = be32_to_cpu(((__be32 *) data)[2]); | 537 | src_qp = be32_to_cpu(((__be32 *) data)[2]); |
@@ -547,19 +619,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
547 | 619 | ||
548 | /* | 620 | /* |
549 | * Get the next work request entry to find where to put the data. | 621 | * Get the next work request entry to find where to put the data. |
550 | * Note that it is safe to drop the lock after changing rq->tail | ||
551 | * since ipath_post_receive() won't fill the empty slot. | ||
552 | */ | 622 | */ |
553 | if (qp->ibqp.srq) { | 623 | if (qp->r_reuse_sge) |
554 | srq = to_isrq(qp->ibqp.srq); | 624 | qp->r_reuse_sge = 0; |
555 | rq = &srq->rq; | 625 | else if (!ipath_get_rwqe(qp, 0)) { |
556 | } else { | ||
557 | srq = NULL; | ||
558 | rq = &qp->r_rq; | ||
559 | } | ||
560 | spin_lock_irqsave(&rq->lock, flags); | ||
561 | if (rq->tail == rq->head) { | ||
562 | spin_unlock_irqrestore(&rq->lock, flags); | ||
563 | /* | 626 | /* |
564 | * Count VL15 packets dropped due to no receive buffer. | 627 | * Count VL15 packets dropped due to no receive buffer. |
565 | * Otherwise, count them as buffer overruns since usually, | 628 | * Otherwise, count them as buffer overruns since usually, |
@@ -573,39 +636,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
573 | goto bail; | 636 | goto bail; |
574 | } | 637 | } |
575 | /* Silently drop packets which are too big. */ | 638 | /* Silently drop packets which are too big. */ |
576 | wqe = get_rwqe_ptr(rq, rq->tail); | 639 | if (wc.byte_len > qp->r_len) { |
577 | if (wc.byte_len > wqe->length) { | 640 | qp->r_reuse_sge = 1; |
578 | spin_unlock_irqrestore(&rq->lock, flags); | ||
579 | dev->n_pkt_drops++; | 641 | dev->n_pkt_drops++; |
580 | goto bail; | 642 | goto bail; |
581 | } | 643 | } |
582 | wc.wr_id = wqe->wr_id; | ||
583 | qp->r_sge.sge = wqe->sg_list[0]; | ||
584 | qp->r_sge.sg_list = wqe->sg_list + 1; | ||
585 | qp->r_sge.num_sge = wqe->num_sge; | ||
586 | if (++rq->tail >= rq->size) | ||
587 | rq->tail = 0; | ||
588 | if (srq && srq->ibsrq.event_handler) { | ||
589 | u32 n; | ||
590 | |||
591 | if (rq->head < rq->tail) | ||
592 | n = rq->size + rq->head - rq->tail; | ||
593 | else | ||
594 | n = rq->head - rq->tail; | ||
595 | if (n < srq->limit) { | ||
596 | struct ib_event ev; | ||
597 | |||
598 | srq->limit = 0; | ||
599 | spin_unlock_irqrestore(&rq->lock, flags); | ||
600 | ev.device = qp->ibqp.device; | ||
601 | ev.element.srq = qp->ibqp.srq; | ||
602 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | ||
603 | srq->ibsrq.event_handler(&ev, | ||
604 | srq->ibsrq.srq_context); | ||
605 | } else | ||
606 | spin_unlock_irqrestore(&rq->lock, flags); | ||
607 | } else | ||
608 | spin_unlock_irqrestore(&rq->lock, flags); | ||
609 | if (has_grh) { | 644 | if (has_grh) { |
610 | ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 645 | ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
611 | sizeof(struct ib_grh)); | 646 | sizeof(struct ib_grh)); |
@@ -614,6 +649,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
614 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); | 649 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); |
615 | ipath_copy_sge(&qp->r_sge, data, | 650 | ipath_copy_sge(&qp->r_sge, data, |
616 | wc.byte_len - sizeof(struct ib_grh)); | 651 | wc.byte_len - sizeof(struct ib_grh)); |
652 | wc.wr_id = qp->r_wr_id; | ||
617 | wc.status = IB_WC_SUCCESS; | 653 | wc.status = IB_WC_SUCCESS; |
618 | wc.opcode = IB_WC_RECV; | 654 | wc.opcode = IB_WC_RECV; |
619 | wc.vendor_err = 0; | 655 | wc.vendor_err = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index d70a9b6b5239..b8381c5e72bd 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -33,15 +33,13 @@ | |||
33 | 33 | ||
34 | #include <rdma/ib_mad.h> | 34 | #include <rdma/ib_mad.h> |
35 | #include <rdma/ib_user_verbs.h> | 35 | #include <rdma/ib_user_verbs.h> |
36 | #include <linux/io.h> | ||
36 | #include <linux/utsname.h> | 37 | #include <linux/utsname.h> |
37 | 38 | ||
38 | #include "ipath_kernel.h" | 39 | #include "ipath_kernel.h" |
39 | #include "ipath_verbs.h" | 40 | #include "ipath_verbs.h" |
40 | #include "ipath_common.h" | 41 | #include "ipath_common.h" |
41 | 42 | ||
42 | /* Not static, because we don't want the compiler removing it */ | ||
43 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; | ||
44 | |||
45 | static unsigned int ib_ipath_qp_table_size = 251; | 43 | static unsigned int ib_ipath_qp_table_size = 251; |
46 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); | 44 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); |
47 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 45 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
@@ -52,10 +50,6 @@ module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint, | |||
52 | MODULE_PARM_DESC(lkey_table_size, | 50 | MODULE_PARM_DESC(lkey_table_size, |
53 | "LKEY table size in bits (2^n, 1 <= n <= 23)"); | 51 | "LKEY table size in bits (2^n, 1 <= n <= 23)"); |
54 | 52 | ||
55 | unsigned int ib_ipath_debug; /* debug mask */ | ||
56 | module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO); | ||
57 | MODULE_PARM_DESC(debug, "Verbs debug mask"); | ||
58 | |||
59 | static unsigned int ib_ipath_max_pds = 0xFFFF; | 53 | static unsigned int ib_ipath_max_pds = 0xFFFF; |
60 | module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); | 54 | module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); |
61 | MODULE_PARM_DESC(max_pds, | 55 | MODULE_PARM_DESC(max_pds, |
@@ -79,6 +73,10 @@ module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint, | |||
79 | S_IWUSR | S_IRUGO); | 73 | S_IWUSR | S_IRUGO); |
80 | MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); | 74 | MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); |
81 | 75 | ||
76 | unsigned int ib_ipath_max_qps = 16384; | ||
77 | module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO); | ||
78 | MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); | ||
79 | |||
82 | unsigned int ib_ipath_max_sges = 0x60; | 80 | unsigned int ib_ipath_max_sges = 0x60; |
83 | module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); | 81 | module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); |
84 | MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); | 82 | MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); |
@@ -109,9 +107,9 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs, | |||
109 | uint, S_IWUSR | S_IRUGO); | 107 | uint, S_IWUSR | S_IRUGO); |
110 | MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); | 108 | MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); |
111 | 109 | ||
112 | MODULE_LICENSE("GPL"); | 110 | static unsigned int ib_ipath_disable_sma; |
113 | MODULE_AUTHOR("QLogic <support@pathscale.com>"); | 111 | module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); |
114 | MODULE_DESCRIPTION("QLogic InfiniPath driver"); | 112 | MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA"); |
115 | 113 | ||
116 | const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { | 114 | const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { |
117 | [IB_QPS_RESET] = 0, | 115 | [IB_QPS_RESET] = 0, |
@@ -125,6 +123,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { | |||
125 | [IB_QPS_ERR] = 0, | 123 | [IB_QPS_ERR] = 0, |
126 | }; | 124 | }; |
127 | 125 | ||
126 | struct ipath_ucontext { | ||
127 | struct ib_ucontext ibucontext; | ||
128 | }; | ||
129 | |||
130 | static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext | ||
131 | *ibucontext) | ||
132 | { | ||
133 | return container_of(ibucontext, struct ipath_ucontext, ibucontext); | ||
134 | } | ||
135 | |||
128 | /* | 136 | /* |
129 | * Translate ib_wr_opcode into ib_wc_opcode. | 137 | * Translate ib_wr_opcode into ib_wc_opcode. |
130 | */ | 138 | */ |
@@ -277,11 +285,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
277 | struct ib_recv_wr **bad_wr) | 285 | struct ib_recv_wr **bad_wr) |
278 | { | 286 | { |
279 | struct ipath_qp *qp = to_iqp(ibqp); | 287 | struct ipath_qp *qp = to_iqp(ibqp); |
288 | struct ipath_rwq *wq = qp->r_rq.wq; | ||
280 | unsigned long flags; | 289 | unsigned long flags; |
281 | int ret; | 290 | int ret; |
282 | 291 | ||
283 | /* Check that state is OK to post receive. */ | 292 | /* Check that state is OK to post receive. */ |
284 | if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) { | 293 | if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { |
285 | *bad_wr = wr; | 294 | *bad_wr = wr; |
286 | ret = -EINVAL; | 295 | ret = -EINVAL; |
287 | goto bail; | 296 | goto bail; |
@@ -290,59 +299,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
290 | for (; wr; wr = wr->next) { | 299 | for (; wr; wr = wr->next) { |
291 | struct ipath_rwqe *wqe; | 300 | struct ipath_rwqe *wqe; |
292 | u32 next; | 301 | u32 next; |
293 | int i, j; | 302 | int i; |
294 | 303 | ||
295 | if (wr->num_sge > qp->r_rq.max_sge) { | 304 | if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { |
296 | *bad_wr = wr; | 305 | *bad_wr = wr; |
297 | ret = -ENOMEM; | 306 | ret = -ENOMEM; |
298 | goto bail; | 307 | goto bail; |
299 | } | 308 | } |
300 | 309 | ||
301 | spin_lock_irqsave(&qp->r_rq.lock, flags); | 310 | spin_lock_irqsave(&qp->r_rq.lock, flags); |
302 | next = qp->r_rq.head + 1; | 311 | next = wq->head + 1; |
303 | if (next >= qp->r_rq.size) | 312 | if (next >= qp->r_rq.size) |
304 | next = 0; | 313 | next = 0; |
305 | if (next == qp->r_rq.tail) { | 314 | if (next == wq->tail) { |
306 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | 315 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); |
307 | *bad_wr = wr; | 316 | *bad_wr = wr; |
308 | ret = -ENOMEM; | 317 | ret = -ENOMEM; |
309 | goto bail; | 318 | goto bail; |
310 | } | 319 | } |
311 | 320 | ||
312 | wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head); | 321 | wqe = get_rwqe_ptr(&qp->r_rq, wq->head); |
313 | wqe->wr_id = wr->wr_id; | 322 | wqe->wr_id = wr->wr_id; |
314 | wqe->sg_list[0].mr = NULL; | 323 | wqe->num_sge = wr->num_sge; |
315 | wqe->sg_list[0].vaddr = NULL; | 324 | for (i = 0; i < wr->num_sge; i++) |
316 | wqe->sg_list[0].length = 0; | 325 | wqe->sg_list[i] = wr->sg_list[i]; |
317 | wqe->sg_list[0].sge_length = 0; | 326 | wq->head = next; |
318 | wqe->length = 0; | ||
319 | for (i = 0, j = 0; i < wr->num_sge; i++) { | ||
320 | /* Check LKEY */ | ||
321 | if (to_ipd(qp->ibqp.pd)->user && | ||
322 | wr->sg_list[i].lkey == 0) { | ||
323 | spin_unlock_irqrestore(&qp->r_rq.lock, | ||
324 | flags); | ||
325 | *bad_wr = wr; | ||
326 | ret = -EINVAL; | ||
327 | goto bail; | ||
328 | } | ||
329 | if (wr->sg_list[i].length == 0) | ||
330 | continue; | ||
331 | if (!ipath_lkey_ok( | ||
332 | &to_idev(qp->ibqp.device)->lk_table, | ||
333 | &wqe->sg_list[j], &wr->sg_list[i], | ||
334 | IB_ACCESS_LOCAL_WRITE)) { | ||
335 | spin_unlock_irqrestore(&qp->r_rq.lock, | ||
336 | flags); | ||
337 | *bad_wr = wr; | ||
338 | ret = -EINVAL; | ||
339 | goto bail; | ||
340 | } | ||
341 | wqe->length += wr->sg_list[i].length; | ||
342 | j++; | ||
343 | } | ||
344 | wqe->num_sge = j; | ||
345 | qp->r_rq.head = next; | ||
346 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | 327 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); |
347 | } | 328 | } |
348 | ret = 0; | 329 | ret = 0; |
@@ -377,6 +358,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, | |||
377 | switch (qp->ibqp.qp_type) { | 358 | switch (qp->ibqp.qp_type) { |
378 | case IB_QPT_SMI: | 359 | case IB_QPT_SMI: |
379 | case IB_QPT_GSI: | 360 | case IB_QPT_GSI: |
361 | if (ib_ipath_disable_sma) | ||
362 | break; | ||
363 | /* FALLTHROUGH */ | ||
380 | case IB_QPT_UD: | 364 | case IB_QPT_UD: |
381 | ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); | 365 | ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); |
382 | break; | 366 | break; |
@@ -395,7 +379,7 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, | |||
395 | } | 379 | } |
396 | 380 | ||
397 | /** | 381 | /** |
398 | * ipath_ib_rcv - process and incoming packet | 382 | * ipath_ib_rcv - process an incoming packet |
399 | * @arg: the device pointer | 383 | * @arg: the device pointer |
400 | * @rhdr: the header of the packet | 384 | * @rhdr: the header of the packet |
401 | * @data: the packet data | 385 | * @data: the packet data |
@@ -404,9 +388,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, | |||
404 | * This is called from ipath_kreceive() to process an incoming packet at | 388 | * This is called from ipath_kreceive() to process an incoming packet at |
405 | * interrupt level. Tlen is the length of the header + data + CRC in bytes. | 389 | * interrupt level. Tlen is the length of the header + data + CRC in bytes. |
406 | */ | 390 | */ |
407 | static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) | 391 | void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, |
392 | u32 tlen) | ||
408 | { | 393 | { |
409 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | ||
410 | struct ipath_ib_header *hdr = rhdr; | 394 | struct ipath_ib_header *hdr = rhdr; |
411 | struct ipath_other_headers *ohdr; | 395 | struct ipath_other_headers *ohdr; |
412 | struct ipath_qp *qp; | 396 | struct ipath_qp *qp; |
@@ -427,7 +411,7 @@ static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) | |||
427 | lid = be16_to_cpu(hdr->lrh[1]); | 411 | lid = be16_to_cpu(hdr->lrh[1]); |
428 | if (lid < IPATH_MULTICAST_LID_BASE) { | 412 | if (lid < IPATH_MULTICAST_LID_BASE) { |
429 | lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | 413 | lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); |
430 | if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { | 414 | if (unlikely(lid != dev->dd->ipath_lid)) { |
431 | dev->rcv_errors++; | 415 | dev->rcv_errors++; |
432 | goto bail; | 416 | goto bail; |
433 | } | 417 | } |
@@ -495,9 +479,8 @@ bail:; | |||
495 | * This is called from ipath_do_rcv_timer() at interrupt level to check for | 479 | * This is called from ipath_do_rcv_timer() at interrupt level to check for |
496 | * QPs which need retransmits and to collect performance numbers. | 480 | * QPs which need retransmits and to collect performance numbers. |
497 | */ | 481 | */ |
498 | static void ipath_ib_timer(void *arg) | 482 | void ipath_ib_timer(struct ipath_ibdev *dev) |
499 | { | 483 | { |
500 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | ||
501 | struct ipath_qp *resend = NULL; | 484 | struct ipath_qp *resend = NULL; |
502 | struct list_head *last; | 485 | struct list_head *last; |
503 | struct ipath_qp *qp; | 486 | struct ipath_qp *qp; |
@@ -539,19 +522,19 @@ static void ipath_ib_timer(void *arg) | |||
539 | if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && | 522 | if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && |
540 | --dev->pma_sample_start == 0) { | 523 | --dev->pma_sample_start == 0) { |
541 | dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; | 524 | dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; |
542 | ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, | 525 | ipath_snapshot_counters(dev->dd, &dev->ipath_sword, |
543 | &dev->ipath_rword, | 526 | &dev->ipath_rword, |
544 | &dev->ipath_spkts, | 527 | &dev->ipath_spkts, |
545 | &dev->ipath_rpkts, | 528 | &dev->ipath_rpkts, |
546 | &dev->ipath_xmit_wait); | 529 | &dev->ipath_xmit_wait); |
547 | } | 530 | } |
548 | if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { | 531 | if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { |
549 | if (dev->pma_sample_interval == 0) { | 532 | if (dev->pma_sample_interval == 0) { |
550 | u64 ta, tb, tc, td, te; | 533 | u64 ta, tb, tc, td, te; |
551 | 534 | ||
552 | dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; | 535 | dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; |
553 | ipath_layer_snapshot_counters(dev->dd, &ta, &tb, | 536 | ipath_snapshot_counters(dev->dd, &ta, &tb, |
554 | &tc, &td, &te); | 537 | &tc, &td, &te); |
555 | 538 | ||
556 | dev->ipath_sword = ta - dev->ipath_sword; | 539 | dev->ipath_sword = ta - dev->ipath_sword; |
557 | dev->ipath_rword = tb - dev->ipath_rword; | 540 | dev->ipath_rword = tb - dev->ipath_rword; |
@@ -581,6 +564,362 @@ static void ipath_ib_timer(void *arg) | |||
581 | } | 564 | } |
582 | } | 565 | } |
583 | 566 | ||
567 | static void update_sge(struct ipath_sge_state *ss, u32 length) | ||
568 | { | ||
569 | struct ipath_sge *sge = &ss->sge; | ||
570 | |||
571 | sge->vaddr += length; | ||
572 | sge->length -= length; | ||
573 | sge->sge_length -= length; | ||
574 | if (sge->sge_length == 0) { | ||
575 | if (--ss->num_sge) | ||
576 | *sge = *ss->sg_list++; | ||
577 | } else if (sge->length == 0 && sge->mr != NULL) { | ||
578 | if (++sge->n >= IPATH_SEGSZ) { | ||
579 | if (++sge->m >= sge->mr->mapsz) | ||
580 | return; | ||
581 | sge->n = 0; | ||
582 | } | ||
583 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
584 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | #ifdef __LITTLE_ENDIAN | ||
589 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
590 | { | ||
591 | return data >> shift; | ||
592 | } | ||
593 | |||
594 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
595 | { | ||
596 | return data << shift; | ||
597 | } | ||
598 | |||
599 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
600 | { | ||
601 | data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
602 | data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
603 | return data; | ||
604 | } | ||
605 | #else | ||
606 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
607 | { | ||
608 | return data << shift; | ||
609 | } | ||
610 | |||
611 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
612 | { | ||
613 | return data >> shift; | ||
614 | } | ||
615 | |||
616 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
617 | { | ||
618 | data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
619 | data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
620 | return data; | ||
621 | } | ||
622 | #endif | ||
623 | |||
624 | static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, | ||
625 | u32 length) | ||
626 | { | ||
627 | u32 extra = 0; | ||
628 | u32 data = 0; | ||
629 | u32 last; | ||
630 | |||
631 | while (1) { | ||
632 | u32 len = ss->sge.length; | ||
633 | u32 off; | ||
634 | |||
635 | BUG_ON(len == 0); | ||
636 | if (len > length) | ||
637 | len = length; | ||
638 | if (len > ss->sge.sge_length) | ||
639 | len = ss->sge.sge_length; | ||
640 | /* If the source address is not aligned, try to align it. */ | ||
641 | off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); | ||
642 | if (off) { | ||
643 | u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & | ||
644 | ~(sizeof(u32) - 1)); | ||
645 | u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); | ||
646 | u32 y; | ||
647 | |||
648 | y = sizeof(u32) - off; | ||
649 | if (len > y) | ||
650 | len = y; | ||
651 | if (len + extra >= sizeof(u32)) { | ||
652 | data |= set_upper_bits(v, extra * | ||
653 | BITS_PER_BYTE); | ||
654 | len = sizeof(u32) - extra; | ||
655 | if (len == length) { | ||
656 | last = data; | ||
657 | break; | ||
658 | } | ||
659 | __raw_writel(data, piobuf); | ||
660 | piobuf++; | ||
661 | extra = 0; | ||
662 | data = 0; | ||
663 | } else { | ||
664 | /* Clear unused upper bytes */ | ||
665 | data |= clear_upper_bytes(v, len, extra); | ||
666 | if (len == length) { | ||
667 | last = data; | ||
668 | break; | ||
669 | } | ||
670 | extra += len; | ||
671 | } | ||
672 | } else if (extra) { | ||
673 | /* Source address is aligned. */ | ||
674 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
675 | int shift = extra * BITS_PER_BYTE; | ||
676 | int ushift = 32 - shift; | ||
677 | u32 l = len; | ||
678 | |||
679 | while (l >= sizeof(u32)) { | ||
680 | u32 v = *addr; | ||
681 | |||
682 | data |= set_upper_bits(v, shift); | ||
683 | __raw_writel(data, piobuf); | ||
684 | data = get_upper_bits(v, ushift); | ||
685 | piobuf++; | ||
686 | addr++; | ||
687 | l -= sizeof(u32); | ||
688 | } | ||
689 | /* | ||
690 | * We still have 'extra' number of bytes leftover. | ||
691 | */ | ||
692 | if (l) { | ||
693 | u32 v = *addr; | ||
694 | |||
695 | if (l + extra >= sizeof(u32)) { | ||
696 | data |= set_upper_bits(v, shift); | ||
697 | len -= l + extra - sizeof(u32); | ||
698 | if (len == length) { | ||
699 | last = data; | ||
700 | break; | ||
701 | } | ||
702 | __raw_writel(data, piobuf); | ||
703 | piobuf++; | ||
704 | extra = 0; | ||
705 | data = 0; | ||
706 | } else { | ||
707 | /* Clear unused upper bytes */ | ||
708 | data |= clear_upper_bytes(v, l, | ||
709 | extra); | ||
710 | if (len == length) { | ||
711 | last = data; | ||
712 | break; | ||
713 | } | ||
714 | extra += l; | ||
715 | } | ||
716 | } else if (len == length) { | ||
717 | last = data; | ||
718 | break; | ||
719 | } | ||
720 | } else if (len == length) { | ||
721 | u32 w; | ||
722 | |||
723 | /* | ||
724 | * Need to round up for the last dword in the | ||
725 | * packet. | ||
726 | */ | ||
727 | w = (len + 3) >> 2; | ||
728 | __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); | ||
729 | piobuf += w - 1; | ||
730 | last = ((u32 *) ss->sge.vaddr)[w - 1]; | ||
731 | break; | ||
732 | } else { | ||
733 | u32 w = len >> 2; | ||
734 | |||
735 | __iowrite32_copy(piobuf, ss->sge.vaddr, w); | ||
736 | piobuf += w; | ||
737 | |||
738 | extra = len & (sizeof(u32) - 1); | ||
739 | if (extra) { | ||
740 | u32 v = ((u32 *) ss->sge.vaddr)[w]; | ||
741 | |||
742 | /* Clear unused upper bytes */ | ||
743 | data = clear_upper_bytes(v, extra, 0); | ||
744 | } | ||
745 | } | ||
746 | update_sge(ss, len); | ||
747 | length -= len; | ||
748 | } | ||
749 | /* Update address before sending packet. */ | ||
750 | update_sge(ss, length); | ||
751 | /* must flush early everything before trigger word */ | ||
752 | ipath_flush_wc(); | ||
753 | __raw_writel(last, piobuf); | ||
754 | /* be sure trigger word is written */ | ||
755 | ipath_flush_wc(); | ||
756 | } | ||
757 | |||
758 | /** | ||
759 | * ipath_verbs_send - send a packet | ||
760 | * @dd: the infinipath device | ||
761 | * @hdrwords: the number of words in the header | ||
762 | * @hdr: the packet header | ||
763 | * @len: the length of the packet in bytes | ||
764 | * @ss: the SGE to send | ||
765 | */ | ||
766 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
767 | u32 *hdr, u32 len, struct ipath_sge_state *ss) | ||
768 | { | ||
769 | u32 __iomem *piobuf; | ||
770 | u32 plen; | ||
771 | int ret; | ||
772 | |||
773 | /* +1 is for the qword padding of pbc */ | ||
774 | plen = hdrwords + ((len + 3) >> 2) + 1; | ||
775 | if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { | ||
776 | ipath_dbg("packet len 0x%x too long, failing\n", plen); | ||
777 | ret = -EINVAL; | ||
778 | goto bail; | ||
779 | } | ||
780 | |||
781 | /* Get a PIO buffer to use. */ | ||
782 | piobuf = ipath_getpiobuf(dd, NULL); | ||
783 | if (unlikely(piobuf == NULL)) { | ||
784 | ret = -EBUSY; | ||
785 | goto bail; | ||
786 | } | ||
787 | |||
788 | /* | ||
789 | * Write len to control qword, no flags. | ||
790 | * We have to flush after the PBC for correctness on some cpus | ||
791 | * or WC buffer can be written out of order. | ||
792 | */ | ||
793 | writeq(plen, piobuf); | ||
794 | ipath_flush_wc(); | ||
795 | piobuf += 2; | ||
796 | if (len == 0) { | ||
797 | /* | ||
798 | * If there is just the header portion, must flush before | ||
799 | * writing last word of header for correctness, and after | ||
800 | * the last header word (trigger word). | ||
801 | */ | ||
802 | __iowrite32_copy(piobuf, hdr, hdrwords - 1); | ||
803 | ipath_flush_wc(); | ||
804 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); | ||
805 | ipath_flush_wc(); | ||
806 | ret = 0; | ||
807 | goto bail; | ||
808 | } | ||
809 | |||
810 | __iowrite32_copy(piobuf, hdr, hdrwords); | ||
811 | piobuf += hdrwords; | ||
812 | |||
813 | /* The common case is aligned and contained in one segment. */ | ||
814 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | ||
815 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | ||
816 | u32 w; | ||
817 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
818 | |||
819 | /* Update address before sending packet. */ | ||
820 | update_sge(ss, len); | ||
821 | /* Need to round up for the last dword in the packet. */ | ||
822 | w = (len + 3) >> 2; | ||
823 | __iowrite32_copy(piobuf, addr, w - 1); | ||
824 | /* must flush early everything before trigger word */ | ||
825 | ipath_flush_wc(); | ||
826 | __raw_writel(addr[w - 1], piobuf + w - 1); | ||
827 | /* be sure trigger word is written */ | ||
828 | ipath_flush_wc(); | ||
829 | ret = 0; | ||
830 | goto bail; | ||
831 | } | ||
832 | copy_io(piobuf, ss, len); | ||
833 | ret = 0; | ||
834 | |||
835 | bail: | ||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
840 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
841 | u64 *xmit_wait) | ||
842 | { | ||
843 | int ret; | ||
844 | |||
845 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
846 | /* no hardware, freeze, etc. */ | ||
847 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
848 | ret = -EINVAL; | ||
849 | goto bail; | ||
850 | } | ||
851 | *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
852 | *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
853 | *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
854 | *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
855 | *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); | ||
856 | |||
857 | ret = 0; | ||
858 | |||
859 | bail: | ||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | /** | ||
864 | * ipath_get_counters - get various chip counters | ||
865 | * @dd: the infinipath device | ||
866 | * @cntrs: counters are placed here | ||
867 | * | ||
868 | * Return the counters needed by recv_pma_get_portcounters(). | ||
869 | */ | ||
870 | int ipath_get_counters(struct ipath_devdata *dd, | ||
871 | struct ipath_verbs_counters *cntrs) | ||
872 | { | ||
873 | int ret; | ||
874 | |||
875 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
876 | /* no hardware, freeze, etc. */ | ||
877 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
878 | ret = -EINVAL; | ||
879 | goto bail; | ||
880 | } | ||
881 | cntrs->symbol_error_counter = | ||
882 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
883 | cntrs->link_error_recovery_counter = | ||
884 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
885 | /* | ||
886 | * The link downed counter counts when the other side downs the | ||
887 | * connection. We add in the number of times we downed the link | ||
888 | * due to local link integrity errors to compensate. | ||
889 | */ | ||
890 | cntrs->link_downed_counter = | ||
891 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); | ||
892 | cntrs->port_rcv_errors = | ||
893 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + | ||
894 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + | ||
895 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + | ||
896 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + | ||
897 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + | ||
898 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + | ||
899 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + | ||
900 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + | ||
901 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); | ||
902 | cntrs->port_rcv_remphys_errors = | ||
903 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); | ||
904 | cntrs->port_xmit_discards = | ||
905 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); | ||
906 | cntrs->port_xmit_data = | ||
907 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
908 | cntrs->port_rcv_data = | ||
909 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
910 | cntrs->port_xmit_packets = | ||
911 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
912 | cntrs->port_rcv_packets = | ||
913 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
914 | cntrs->local_link_integrity_errors = dd->ipath_lli_errors; | ||
915 | cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ | ||
916 | |||
917 | ret = 0; | ||
918 | |||
919 | bail: | ||
920 | return ret; | ||
921 | } | ||
922 | |||
584 | /** | 923 | /** |
585 | * ipath_ib_piobufavail - callback when a PIO buffer is available | 924 | * ipath_ib_piobufavail - callback when a PIO buffer is available |
586 | * @arg: the device pointer | 925 | * @arg: the device pointer |
@@ -591,9 +930,8 @@ static void ipath_ib_timer(void *arg) | |||
591 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and | 930 | * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and |
592 | * return zero). | 931 | * return zero). |
593 | */ | 932 | */ |
594 | static int ipath_ib_piobufavail(void *arg) | 933 | int ipath_ib_piobufavail(struct ipath_ibdev *dev) |
595 | { | 934 | { |
596 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | ||
597 | struct ipath_qp *qp; | 935 | struct ipath_qp *qp; |
598 | unsigned long flags; | 936 | unsigned long flags; |
599 | 937 | ||
@@ -624,14 +962,14 @@ static int ipath_query_device(struct ib_device *ibdev, | |||
624 | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | | 962 | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | |
625 | IB_DEVICE_SYS_IMAGE_GUID; | 963 | IB_DEVICE_SYS_IMAGE_GUID; |
626 | props->page_size_cap = PAGE_SIZE; | 964 | props->page_size_cap = PAGE_SIZE; |
627 | props->vendor_id = ipath_layer_get_vendorid(dev->dd); | 965 | props->vendor_id = dev->dd->ipath_vendorid; |
628 | props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); | 966 | props->vendor_part_id = dev->dd->ipath_deviceid; |
629 | props->hw_ver = ipath_layer_get_pcirev(dev->dd); | 967 | props->hw_ver = dev->dd->ipath_pcirev; |
630 | 968 | ||
631 | props->sys_image_guid = dev->sys_image_guid; | 969 | props->sys_image_guid = dev->sys_image_guid; |
632 | 970 | ||
633 | props->max_mr_size = ~0ull; | 971 | props->max_mr_size = ~0ull; |
634 | props->max_qp = dev->qp_table.max; | 972 | props->max_qp = ib_ipath_max_qps; |
635 | props->max_qp_wr = ib_ipath_max_qp_wrs; | 973 | props->max_qp_wr = ib_ipath_max_qp_wrs; |
636 | props->max_sge = ib_ipath_max_sges; | 974 | props->max_sge = ib_ipath_max_sges; |
637 | props->max_cq = ib_ipath_max_cqs; | 975 | props->max_cq = ib_ipath_max_cqs; |
@@ -647,7 +985,7 @@ static int ipath_query_device(struct ib_device *ibdev, | |||
647 | props->max_srq_sge = ib_ipath_max_srq_sges; | 985 | props->max_srq_sge = ib_ipath_max_srq_sges; |
648 | /* props->local_ca_ack_delay */ | 986 | /* props->local_ca_ack_delay */ |
649 | props->atomic_cap = IB_ATOMIC_HCA; | 987 | props->atomic_cap = IB_ATOMIC_HCA; |
650 | props->max_pkeys = ipath_layer_get_npkeys(dev->dd); | 988 | props->max_pkeys = ipath_get_npkeys(dev->dd); |
651 | props->max_mcast_grp = ib_ipath_max_mcast_grps; | 989 | props->max_mcast_grp = ib_ipath_max_mcast_grps; |
652 | props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; | 990 | props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; |
653 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 991 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
@@ -672,12 +1010,17 @@ const u8 ipath_cvt_physportstate[16] = { | |||
672 | [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, | 1010 | [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, |
673 | }; | 1011 | }; |
674 | 1012 | ||
1013 | u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) | ||
1014 | { | ||
1015 | return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); | ||
1016 | } | ||
1017 | |||
675 | static int ipath_query_port(struct ib_device *ibdev, | 1018 | static int ipath_query_port(struct ib_device *ibdev, |
676 | u8 port, struct ib_port_attr *props) | 1019 | u8 port, struct ib_port_attr *props) |
677 | { | 1020 | { |
678 | struct ipath_ibdev *dev = to_idev(ibdev); | 1021 | struct ipath_ibdev *dev = to_idev(ibdev); |
679 | enum ib_mtu mtu; | 1022 | enum ib_mtu mtu; |
680 | u16 lid = ipath_layer_get_lid(dev->dd); | 1023 | u16 lid = dev->dd->ipath_lid; |
681 | u64 ibcstat; | 1024 | u64 ibcstat; |
682 | 1025 | ||
683 | memset(props, 0, sizeof(*props)); | 1026 | memset(props, 0, sizeof(*props)); |
@@ -685,16 +1028,16 @@ static int ipath_query_port(struct ib_device *ibdev, | |||
685 | props->lmc = dev->mkeyprot_resv_lmc & 7; | 1028 | props->lmc = dev->mkeyprot_resv_lmc & 7; |
686 | props->sm_lid = dev->sm_lid; | 1029 | props->sm_lid = dev->sm_lid; |
687 | props->sm_sl = dev->sm_sl; | 1030 | props->sm_sl = dev->sm_sl; |
688 | ibcstat = ipath_layer_get_lastibcstat(dev->dd); | 1031 | ibcstat = dev->dd->ipath_lastibcstat; |
689 | props->state = ((ibcstat >> 4) & 0x3) + 1; | 1032 | props->state = ((ibcstat >> 4) & 0x3) + 1; |
690 | /* See phys_state_show() */ | 1033 | /* See phys_state_show() */ |
691 | props->phys_state = ipath_cvt_physportstate[ | 1034 | props->phys_state = ipath_cvt_physportstate[ |
692 | ipath_layer_get_lastibcstat(dev->dd) & 0xf]; | 1035 | dev->dd->ipath_lastibcstat & 0xf]; |
693 | props->port_cap_flags = dev->port_cap_flags; | 1036 | props->port_cap_flags = dev->port_cap_flags; |
694 | props->gid_tbl_len = 1; | 1037 | props->gid_tbl_len = 1; |
695 | props->max_msg_sz = 0x80000000; | 1038 | props->max_msg_sz = 0x80000000; |
696 | props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); | 1039 | props->pkey_tbl_len = ipath_get_npkeys(dev->dd); |
697 | props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - | 1040 | props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) - |
698 | dev->z_pkey_violations; | 1041 | dev->z_pkey_violations; |
699 | props->qkey_viol_cntr = dev->qkey_violations; | 1042 | props->qkey_viol_cntr = dev->qkey_violations; |
700 | props->active_width = IB_WIDTH_4X; | 1043 | props->active_width = IB_WIDTH_4X; |
@@ -704,7 +1047,7 @@ static int ipath_query_port(struct ib_device *ibdev, | |||
704 | props->init_type_reply = 0; | 1047 | props->init_type_reply = 0; |
705 | 1048 | ||
706 | props->max_mtu = IB_MTU_4096; | 1049 | props->max_mtu = IB_MTU_4096; |
707 | switch (ipath_layer_get_ibmtu(dev->dd)) { | 1050 | switch (dev->dd->ipath_ibmtu) { |
708 | case 4096: | 1051 | case 4096: |
709 | mtu = IB_MTU_4096; | 1052 | mtu = IB_MTU_4096; |
710 | break; | 1053 | break; |
@@ -763,7 +1106,7 @@ static int ipath_modify_port(struct ib_device *ibdev, | |||
763 | dev->port_cap_flags |= props->set_port_cap_mask; | 1106 | dev->port_cap_flags |= props->set_port_cap_mask; |
764 | dev->port_cap_flags &= ~props->clr_port_cap_mask; | 1107 | dev->port_cap_flags &= ~props->clr_port_cap_mask; |
765 | if (port_modify_mask & IB_PORT_SHUTDOWN) | 1108 | if (port_modify_mask & IB_PORT_SHUTDOWN) |
766 | ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); | 1109 | ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); |
767 | if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) | 1110 | if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) |
768 | dev->qkey_violations = 0; | 1111 | dev->qkey_violations = 0; |
769 | return 0; | 1112 | return 0; |
@@ -780,7 +1123,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port, | |||
780 | goto bail; | 1123 | goto bail; |
781 | } | 1124 | } |
782 | gid->global.subnet_prefix = dev->gid_prefix; | 1125 | gid->global.subnet_prefix = dev->gid_prefix; |
783 | gid->global.interface_id = ipath_layer_get_guid(dev->dd); | 1126 | gid->global.interface_id = dev->dd->ipath_guid; |
784 | 1127 | ||
785 | ret = 0; | 1128 | ret = 0; |
786 | 1129 | ||
@@ -803,18 +1146,22 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev, | |||
803 | * we allow allocations of more than we report for this value. | 1146 | * we allow allocations of more than we report for this value. |
804 | */ | 1147 | */ |
805 | 1148 | ||
806 | if (dev->n_pds_allocated == ib_ipath_max_pds) { | 1149 | pd = kmalloc(sizeof *pd, GFP_KERNEL); |
1150 | if (!pd) { | ||
807 | ret = ERR_PTR(-ENOMEM); | 1151 | ret = ERR_PTR(-ENOMEM); |
808 | goto bail; | 1152 | goto bail; |
809 | } | 1153 | } |
810 | 1154 | ||
811 | pd = kmalloc(sizeof *pd, GFP_KERNEL); | 1155 | spin_lock(&dev->n_pds_lock); |
812 | if (!pd) { | 1156 | if (dev->n_pds_allocated == ib_ipath_max_pds) { |
1157 | spin_unlock(&dev->n_pds_lock); | ||
1158 | kfree(pd); | ||
813 | ret = ERR_PTR(-ENOMEM); | 1159 | ret = ERR_PTR(-ENOMEM); |
814 | goto bail; | 1160 | goto bail; |
815 | } | 1161 | } |
816 | 1162 | ||
817 | dev->n_pds_allocated++; | 1163 | dev->n_pds_allocated++; |
1164 | spin_unlock(&dev->n_pds_lock); | ||
818 | 1165 | ||
819 | /* ib_alloc_pd() will initialize pd->ibpd. */ | 1166 | /* ib_alloc_pd() will initialize pd->ibpd. */ |
820 | pd->user = udata != NULL; | 1167 | pd->user = udata != NULL; |
@@ -830,7 +1177,9 @@ static int ipath_dealloc_pd(struct ib_pd *ibpd) | |||
830 | struct ipath_pd *pd = to_ipd(ibpd); | 1177 | struct ipath_pd *pd = to_ipd(ibpd); |
831 | struct ipath_ibdev *dev = to_idev(ibpd->device); | 1178 | struct ipath_ibdev *dev = to_idev(ibpd->device); |
832 | 1179 | ||
1180 | spin_lock(&dev->n_pds_lock); | ||
833 | dev->n_pds_allocated--; | 1181 | dev->n_pds_allocated--; |
1182 | spin_unlock(&dev->n_pds_lock); | ||
834 | 1183 | ||
835 | kfree(pd); | 1184 | kfree(pd); |
836 | 1185 | ||
@@ -851,11 +1200,6 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, | |||
851 | struct ib_ah *ret; | 1200 | struct ib_ah *ret; |
852 | struct ipath_ibdev *dev = to_idev(pd->device); | 1201 | struct ipath_ibdev *dev = to_idev(pd->device); |
853 | 1202 | ||
854 | if (dev->n_ahs_allocated == ib_ipath_max_ahs) { | ||
855 | ret = ERR_PTR(-ENOMEM); | ||
856 | goto bail; | ||
857 | } | ||
858 | |||
859 | /* A multicast address requires a GRH (see ch. 8.4.1). */ | 1203 | /* A multicast address requires a GRH (see ch. 8.4.1). */ |
860 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && | 1204 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && |
861 | ah_attr->dlid != IPATH_PERMISSIVE_LID && | 1205 | ah_attr->dlid != IPATH_PERMISSIVE_LID && |
@@ -881,7 +1225,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, | |||
881 | goto bail; | 1225 | goto bail; |
882 | } | 1226 | } |
883 | 1227 | ||
1228 | spin_lock(&dev->n_ahs_lock); | ||
1229 | if (dev->n_ahs_allocated == ib_ipath_max_ahs) { | ||
1230 | spin_unlock(&dev->n_ahs_lock); | ||
1231 | kfree(ah); | ||
1232 | ret = ERR_PTR(-ENOMEM); | ||
1233 | goto bail; | ||
1234 | } | ||
1235 | |||
884 | dev->n_ahs_allocated++; | 1236 | dev->n_ahs_allocated++; |
1237 | spin_unlock(&dev->n_ahs_lock); | ||
885 | 1238 | ||
886 | /* ib_create_ah() will initialize ah->ibah. */ | 1239 | /* ib_create_ah() will initialize ah->ibah. */ |
887 | ah->attr = *ah_attr; | 1240 | ah->attr = *ah_attr; |
@@ -903,7 +1256,9 @@ static int ipath_destroy_ah(struct ib_ah *ibah) | |||
903 | struct ipath_ibdev *dev = to_idev(ibah->device); | 1256 | struct ipath_ibdev *dev = to_idev(ibah->device); |
904 | struct ipath_ah *ah = to_iah(ibah); | 1257 | struct ipath_ah *ah = to_iah(ibah); |
905 | 1258 | ||
1259 | spin_lock(&dev->n_ahs_lock); | ||
906 | dev->n_ahs_allocated--; | 1260 | dev->n_ahs_allocated--; |
1261 | spin_unlock(&dev->n_ahs_lock); | ||
907 | 1262 | ||
908 | kfree(ah); | 1263 | kfree(ah); |
909 | 1264 | ||
@@ -919,25 +1274,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) | |||
919 | return 0; | 1274 | return 0; |
920 | } | 1275 | } |
921 | 1276 | ||
1277 | /** | ||
1278 | * ipath_get_npkeys - return the size of the PKEY table for port 0 | ||
1279 | * @dd: the infinipath device | ||
1280 | */ | ||
1281 | unsigned ipath_get_npkeys(struct ipath_devdata *dd) | ||
1282 | { | ||
1283 | return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); | ||
1284 | } | ||
1285 | |||
1286 | /** | ||
1287 | * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table | ||
1288 | * @dd: the infinipath device | ||
1289 | * @index: the PKEY index | ||
1290 | */ | ||
1291 | unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) | ||
1292 | { | ||
1293 | unsigned ret; | ||
1294 | |||
1295 | if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) | ||
1296 | ret = 0; | ||
1297 | else | ||
1298 | ret = dd->ipath_pd[0]->port_pkeys[index]; | ||
1299 | |||
1300 | return ret; | ||
1301 | } | ||
1302 | |||
922 | static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | 1303 | static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, |
923 | u16 *pkey) | 1304 | u16 *pkey) |
924 | { | 1305 | { |
925 | struct ipath_ibdev *dev = to_idev(ibdev); | 1306 | struct ipath_ibdev *dev = to_idev(ibdev); |
926 | int ret; | 1307 | int ret; |
927 | 1308 | ||
928 | if (index >= ipath_layer_get_npkeys(dev->dd)) { | 1309 | if (index >= ipath_get_npkeys(dev->dd)) { |
929 | ret = -EINVAL; | 1310 | ret = -EINVAL; |
930 | goto bail; | 1311 | goto bail; |
931 | } | 1312 | } |
932 | 1313 | ||
933 | *pkey = ipath_layer_get_pkey(dev->dd, index); | 1314 | *pkey = ipath_get_pkey(dev->dd, index); |
934 | ret = 0; | 1315 | ret = 0; |
935 | 1316 | ||
936 | bail: | 1317 | bail: |
937 | return ret; | 1318 | return ret; |
938 | } | 1319 | } |
939 | 1320 | ||
940 | |||
941 | /** | 1321 | /** |
942 | * ipath_alloc_ucontext - allocate a ucontest | 1322 | * ipath_alloc_ucontext - allocate a ucontest |
943 | * @ibdev: the infiniband device | 1323 | * @ibdev: the infiniband device |
@@ -970,26 +1350,91 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context) | |||
970 | 1350 | ||
971 | static int ipath_verbs_register_sysfs(struct ib_device *dev); | 1351 | static int ipath_verbs_register_sysfs(struct ib_device *dev); |
972 | 1352 | ||
1353 | static void __verbs_timer(unsigned long arg) | ||
1354 | { | ||
1355 | struct ipath_devdata *dd = (struct ipath_devdata *) arg; | ||
1356 | |||
1357 | /* | ||
1358 | * If port 0 receive packet interrupts are not available, or | ||
1359 | * can be missed, poll the receive queue | ||
1360 | */ | ||
1361 | if (dd->ipath_flags & IPATH_POLL_RX_INTR) | ||
1362 | ipath_kreceive(dd); | ||
1363 | |||
1364 | /* Handle verbs layer timeouts. */ | ||
1365 | ipath_ib_timer(dd->verbs_dev); | ||
1366 | |||
1367 | mod_timer(&dd->verbs_timer, jiffies + 1); | ||
1368 | } | ||
1369 | |||
1370 | static int enable_timer(struct ipath_devdata *dd) | ||
1371 | { | ||
1372 | /* | ||
1373 | * Early chips had a design flaw where the chip and kernel idea | ||
1374 | * of the tail register don't always agree, and therefore we won't | ||
1375 | * get an interrupt on the next packet received. | ||
1376 | * If the board supports per packet receive interrupts, use it. | ||
1377 | * Otherwise, the timer function periodically checks for packets | ||
1378 | * to cover this case. | ||
1379 | * Either way, the timer is needed for verbs layer related | ||
1380 | * processing. | ||
1381 | */ | ||
1382 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | ||
1383 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | ||
1384 | 0x2074076542310ULL); | ||
1385 | /* Enable GPIO bit 2 interrupt */ | ||
1386 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, | ||
1387 | (u64) (1 << 2)); | ||
1388 | } | ||
1389 | |||
1390 | init_timer(&dd->verbs_timer); | ||
1391 | dd->verbs_timer.function = __verbs_timer; | ||
1392 | dd->verbs_timer.data = (unsigned long)dd; | ||
1393 | dd->verbs_timer.expires = jiffies + 1; | ||
1394 | add_timer(&dd->verbs_timer); | ||
1395 | |||
1396 | return 0; | ||
1397 | } | ||
1398 | |||
1399 | static int disable_timer(struct ipath_devdata *dd) | ||
1400 | { | ||
1401 | /* Disable GPIO bit 2 interrupt */ | ||
1402 | if (dd->ipath_flags & IPATH_GPIO_INTR) | ||
1403 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); | ||
1404 | |||
1405 | del_timer_sync(&dd->verbs_timer); | ||
1406 | |||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
973 | /** | 1410 | /** |
974 | * ipath_register_ib_device - register our device with the infiniband core | 1411 | * ipath_register_ib_device - register our device with the infiniband core |
975 | * @unit: the device number to register | ||
976 | * @dd: the device data structure | 1412 | * @dd: the device data structure |
977 | * Return the allocated ipath_ibdev pointer or NULL on error. | 1413 | * Return the allocated ipath_ibdev pointer or NULL on error. |
978 | */ | 1414 | */ |
979 | static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | 1415 | int ipath_register_ib_device(struct ipath_devdata *dd) |
980 | { | 1416 | { |
981 | struct ipath_layer_counters cntrs; | 1417 | struct ipath_verbs_counters cntrs; |
982 | struct ipath_ibdev *idev; | 1418 | struct ipath_ibdev *idev; |
983 | struct ib_device *dev; | 1419 | struct ib_device *dev; |
984 | int ret; | 1420 | int ret; |
985 | 1421 | ||
986 | idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); | 1422 | idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); |
987 | if (idev == NULL) | 1423 | if (idev == NULL) { |
1424 | ret = -ENOMEM; | ||
988 | goto bail; | 1425 | goto bail; |
1426 | } | ||
989 | 1427 | ||
990 | dev = &idev->ibdev; | 1428 | dev = &idev->ibdev; |
991 | 1429 | ||
992 | /* Only need to initialize non-zero fields. */ | 1430 | /* Only need to initialize non-zero fields. */ |
1431 | spin_lock_init(&idev->n_pds_lock); | ||
1432 | spin_lock_init(&idev->n_ahs_lock); | ||
1433 | spin_lock_init(&idev->n_cqs_lock); | ||
1434 | spin_lock_init(&idev->n_qps_lock); | ||
1435 | spin_lock_init(&idev->n_srqs_lock); | ||
1436 | spin_lock_init(&idev->n_mcast_grps_lock); | ||
1437 | |||
993 | spin_lock_init(&idev->qp_table.lock); | 1438 | spin_lock_init(&idev->qp_table.lock); |
994 | spin_lock_init(&idev->lk_table.lock); | 1439 | spin_lock_init(&idev->lk_table.lock); |
995 | idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); | 1440 | idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); |
@@ -1030,7 +1475,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
1030 | idev->link_width_enabled = 3; /* 1x or 4x */ | 1475 | idev->link_width_enabled = 3; /* 1x or 4x */ |
1031 | 1476 | ||
1032 | /* Snapshot current HW counters to "clear" them. */ | 1477 | /* Snapshot current HW counters to "clear" them. */ |
1033 | ipath_layer_get_counters(dd, &cntrs); | 1478 | ipath_get_counters(dd, &cntrs); |
1034 | idev->z_symbol_error_counter = cntrs.symbol_error_counter; | 1479 | idev->z_symbol_error_counter = cntrs.symbol_error_counter; |
1035 | idev->z_link_error_recovery_counter = | 1480 | idev->z_link_error_recovery_counter = |
1036 | cntrs.link_error_recovery_counter; | 1481 | cntrs.link_error_recovery_counter; |
@@ -1054,14 +1499,14 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
1054 | * device types in the system, we can't be sure this is unique. | 1499 | * device types in the system, we can't be sure this is unique. |
1055 | */ | 1500 | */ |
1056 | if (!sys_image_guid) | 1501 | if (!sys_image_guid) |
1057 | sys_image_guid = ipath_layer_get_guid(dd); | 1502 | sys_image_guid = dd->ipath_guid; |
1058 | idev->sys_image_guid = sys_image_guid; | 1503 | idev->sys_image_guid = sys_image_guid; |
1059 | idev->ib_unit = unit; | 1504 | idev->ib_unit = dd->ipath_unit; |
1060 | idev->dd = dd; | 1505 | idev->dd = dd; |
1061 | 1506 | ||
1062 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); | 1507 | strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); |
1063 | dev->owner = THIS_MODULE; | 1508 | dev->owner = THIS_MODULE; |
1064 | dev->node_guid = ipath_layer_get_guid(dd); | 1509 | dev->node_guid = dd->ipath_guid; |
1065 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; | 1510 | dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; |
1066 | dev->uverbs_cmd_mask = | 1511 | dev->uverbs_cmd_mask = |
1067 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | 1512 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | |
@@ -1093,9 +1538,9 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
1093 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | 1538 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | |
1094 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | | 1539 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1095 | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); | 1540 | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); |
1096 | dev->node_type = IB_NODE_CA; | 1541 | dev->node_type = RDMA_NODE_IB_CA; |
1097 | dev->phys_port_cnt = 1; | 1542 | dev->phys_port_cnt = 1; |
1098 | dev->dma_device = ipath_layer_get_device(dd); | 1543 | dev->dma_device = &dd->pcidev->dev; |
1099 | dev->class_dev.dev = dev->dma_device; | 1544 | dev->class_dev.dev = dev->dma_device; |
1100 | dev->query_device = ipath_query_device; | 1545 | dev->query_device = ipath_query_device; |
1101 | dev->modify_device = ipath_modify_device; | 1546 | dev->modify_device = ipath_modify_device; |
@@ -1137,9 +1582,10 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
1137 | dev->attach_mcast = ipath_multicast_attach; | 1582 | dev->attach_mcast = ipath_multicast_attach; |
1138 | dev->detach_mcast = ipath_multicast_detach; | 1583 | dev->detach_mcast = ipath_multicast_detach; |
1139 | dev->process_mad = ipath_process_mad; | 1584 | dev->process_mad = ipath_process_mad; |
1585 | dev->mmap = ipath_mmap; | ||
1140 | 1586 | ||
1141 | snprintf(dev->node_desc, sizeof(dev->node_desc), | 1587 | snprintf(dev->node_desc, sizeof(dev->node_desc), |
1142 | IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename); | 1588 | IPATH_IDSTR " %s", system_utsname.nodename); |
1143 | 1589 | ||
1144 | ret = ib_register_device(dev); | 1590 | ret = ib_register_device(dev); |
1145 | if (ret) | 1591 | if (ret) |
@@ -1148,7 +1594,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) | |||
1148 | if (ipath_verbs_register_sysfs(dev)) | 1594 | if (ipath_verbs_register_sysfs(dev)) |
1149 | goto err_class; | 1595 | goto err_class; |
1150 | 1596 | ||
1151 | ipath_layer_enable_timer(dd); | 1597 | enable_timer(dd); |
1152 | 1598 | ||
1153 | goto bail; | 1599 | goto bail; |
1154 | 1600 | ||
@@ -1160,37 +1606,32 @@ err_lk: | |||
1160 | kfree(idev->qp_table.table); | 1606 | kfree(idev->qp_table.table); |
1161 | err_qp: | 1607 | err_qp: |
1162 | ib_dealloc_device(dev); | 1608 | ib_dealloc_device(dev); |
1163 | _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n", | 1609 | ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); |
1164 | unit, -ret); | ||
1165 | idev = NULL; | 1610 | idev = NULL; |
1166 | 1611 | ||
1167 | bail: | 1612 | bail: |
1168 | return idev; | 1613 | dd->verbs_dev = idev; |
1614 | return ret; | ||
1169 | } | 1615 | } |
1170 | 1616 | ||
1171 | static void ipath_unregister_ib_device(void *arg) | 1617 | void ipath_unregister_ib_device(struct ipath_ibdev *dev) |
1172 | { | 1618 | { |
1173 | struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; | ||
1174 | struct ib_device *ibdev = &dev->ibdev; | 1619 | struct ib_device *ibdev = &dev->ibdev; |
1175 | 1620 | ||
1176 | ipath_layer_disable_timer(dev->dd); | 1621 | disable_timer(dev->dd); |
1177 | 1622 | ||
1178 | ib_unregister_device(ibdev); | 1623 | ib_unregister_device(ibdev); |
1179 | 1624 | ||
1180 | if (!list_empty(&dev->pending[0]) || | 1625 | if (!list_empty(&dev->pending[0]) || |
1181 | !list_empty(&dev->pending[1]) || | 1626 | !list_empty(&dev->pending[1]) || |
1182 | !list_empty(&dev->pending[2])) | 1627 | !list_empty(&dev->pending[2])) |
1183 | _VERBS_ERROR("ipath%d pending list not empty!\n", | 1628 | ipath_dev_err(dev->dd, "pending list not empty!\n"); |
1184 | dev->ib_unit); | ||
1185 | if (!list_empty(&dev->piowait)) | 1629 | if (!list_empty(&dev->piowait)) |
1186 | _VERBS_ERROR("ipath%d piowait list not empty!\n", | 1630 | ipath_dev_err(dev->dd, "piowait list not empty!\n"); |
1187 | dev->ib_unit); | ||
1188 | if (!list_empty(&dev->rnrwait)) | 1631 | if (!list_empty(&dev->rnrwait)) |
1189 | _VERBS_ERROR("ipath%d rnrwait list not empty!\n", | 1632 | ipath_dev_err(dev->dd, "rnrwait list not empty!\n"); |
1190 | dev->ib_unit); | ||
1191 | if (!ipath_mcast_tree_empty()) | 1633 | if (!ipath_mcast_tree_empty()) |
1192 | _VERBS_ERROR("ipath%d multicast table memory leak!\n", | 1634 | ipath_dev_err(dev->dd, "multicast table memory leak!\n"); |
1193 | dev->ib_unit); | ||
1194 | /* | 1635 | /* |
1195 | * Note that ipath_unregister_ib_device() can be called before all | 1636 | * Note that ipath_unregister_ib_device() can be called before all |
1196 | * the QPs are destroyed! | 1637 | * the QPs are destroyed! |
@@ -1201,25 +1642,12 @@ static void ipath_unregister_ib_device(void *arg) | |||
1201 | ib_dealloc_device(ibdev); | 1642 | ib_dealloc_device(ibdev); |
1202 | } | 1643 | } |
1203 | 1644 | ||
1204 | static int __init ipath_verbs_init(void) | ||
1205 | { | ||
1206 | return ipath_verbs_register(ipath_register_ib_device, | ||
1207 | ipath_unregister_ib_device, | ||
1208 | ipath_ib_piobufavail, ipath_ib_rcv, | ||
1209 | ipath_ib_timer); | ||
1210 | } | ||
1211 | |||
1212 | static void __exit ipath_verbs_cleanup(void) | ||
1213 | { | ||
1214 | ipath_verbs_unregister(); | ||
1215 | } | ||
1216 | |||
1217 | static ssize_t show_rev(struct class_device *cdev, char *buf) | 1645 | static ssize_t show_rev(struct class_device *cdev, char *buf) |
1218 | { | 1646 | { |
1219 | struct ipath_ibdev *dev = | 1647 | struct ipath_ibdev *dev = |
1220 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1648 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1221 | 1649 | ||
1222 | return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); | 1650 | return sprintf(buf, "%x\n", dev->dd->ipath_pcirev); |
1223 | } | 1651 | } |
1224 | 1652 | ||
1225 | static ssize_t show_hca(struct class_device *cdev, char *buf) | 1653 | static ssize_t show_hca(struct class_device *cdev, char *buf) |
@@ -1228,7 +1656,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf) | |||
1228 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1656 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1229 | int ret; | 1657 | int ret; |
1230 | 1658 | ||
1231 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); | 1659 | ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128); |
1232 | if (ret < 0) | 1660 | if (ret < 0) |
1233 | goto bail; | 1661 | goto bail; |
1234 | strcat(buf, "\n"); | 1662 | strcat(buf, "\n"); |
@@ -1305,6 +1733,3 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) | |||
1305 | bail: | 1733 | bail: |
1306 | return ret; | 1734 | return ret; |
1307 | } | 1735 | } |
1308 | |||
1309 | module_init(ipath_verbs_init); | ||
1310 | module_exit(ipath_verbs_cleanup); | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 2df684727dc1..09bbb3f9a217 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -38,10 +38,10 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/kernel.h> | 39 | #include <linux/kernel.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/kref.h> | ||
41 | #include <rdma/ib_pack.h> | 42 | #include <rdma/ib_pack.h> |
42 | 43 | ||
43 | #include "ipath_layer.h" | 44 | #include "ipath_layer.h" |
44 | #include "verbs_debug.h" | ||
45 | 45 | ||
46 | #define QPN_MAX (1 << 24) | 46 | #define QPN_MAX (1 << 24) |
47 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | 47 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) |
@@ -50,7 +50,7 @@ | |||
50 | * Increment this value if any changes that break userspace ABI | 50 | * Increment this value if any changes that break userspace ABI |
51 | * compatibility are made. | 51 | * compatibility are made. |
52 | */ | 52 | */ |
53 | #define IPATH_UVERBS_ABI_VERSION 1 | 53 | #define IPATH_UVERBS_ABI_VERSION 2 |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Define an ib_cq_notify value that is not valid so we know when CQ | 56 | * Define an ib_cq_notify value that is not valid so we know when CQ |
@@ -152,19 +152,6 @@ struct ipath_mcast { | |||
152 | int n_attached; | 152 | int n_attached; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | /* Memory region */ | ||
156 | struct ipath_mr { | ||
157 | struct ib_mr ibmr; | ||
158 | struct ipath_mregion mr; /* must be last */ | ||
159 | }; | ||
160 | |||
161 | /* Fast memory region */ | ||
162 | struct ipath_fmr { | ||
163 | struct ib_fmr ibfmr; | ||
164 | u8 page_shift; | ||
165 | struct ipath_mregion mr; /* must be last */ | ||
166 | }; | ||
167 | |||
168 | /* Protection domain */ | 155 | /* Protection domain */ |
169 | struct ipath_pd { | 156 | struct ipath_pd { |
170 | struct ib_pd ibpd; | 157 | struct ib_pd ibpd; |
@@ -178,58 +165,89 @@ struct ipath_ah { | |||
178 | }; | 165 | }; |
179 | 166 | ||
180 | /* | 167 | /* |
181 | * Quick description of our CQ/QP locking scheme: | 168 | * This structure is used by ipath_mmap() to validate an offset |
182 | * | 169 | * when an mmap() request is made. The vm_area_struct then uses |
183 | * We have one global lock that protects dev->cq/qp_table. Each | 170 | * this as its vm_private_data. |
184 | * struct ipath_cq/qp also has its own lock. An individual qp lock | 171 | */ |
185 | * may be taken inside of an individual cq lock. Both cqs attached to | 172 | struct ipath_mmap_info { |
186 | * a qp may be locked, with the send cq locked first. No other | 173 | struct ipath_mmap_info *next; |
187 | * nesting should be done. | 174 | struct ib_ucontext *context; |
188 | * | 175 | void *obj; |
189 | * Each struct ipath_cq/qp also has an atomic_t ref count. The | 176 | struct kref ref; |
190 | * pointer from the cq/qp_table to the struct counts as one reference. | 177 | unsigned size; |
191 | * This reference also is good for access through the consumer API, so | 178 | unsigned mmap_cnt; |
192 | * modifying the CQ/QP etc doesn't need to take another reference. | 179 | }; |
193 | * Access because of a completion being polled does need a reference. | 180 | |
194 | * | 181 | /* |
195 | * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the | 182 | * This structure is used to contain the head pointer, tail pointer, |
196 | * destroy function to sleep on. | 183 | * and completion queue entries as a single memory allocation so |
197 | * | 184 | * it can be mmap'ed into user space. |
198 | * This means that access from the consumer API requires nothing but | ||
199 | * taking the struct's lock. | ||
200 | * | ||
201 | * Access because of a completion event should go as follows: | ||
202 | * - lock cq/qp_table and look up struct | ||
203 | * - increment ref count in struct | ||
204 | * - drop cq/qp_table lock | ||
205 | * - lock struct, do your thing, and unlock struct | ||
206 | * - decrement ref count; if zero, wake up waiters | ||
207 | * | ||
208 | * To destroy a CQ/QP, we can do the following: | ||
209 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | ||
210 | * - decrement ref count | ||
211 | * - wait_event until ref count is zero | ||
212 | * | ||
213 | * It is the consumer's responsibilty to make sure that no QP | ||
214 | * operations (WQE posting or state modification) are pending when the | ||
215 | * QP is destroyed. Also, the consumer must make sure that calls to | ||
216 | * qp_modify are serialized. | ||
217 | * | ||
218 | * Possible optimizations (wait for profile data to see if/where we | ||
219 | * have locks bouncing between CPUs): | ||
220 | * - split cq/qp table lock into n separate (cache-aligned) locks, | ||
221 | * indexed (say) by the page in the table | ||
222 | */ | 185 | */ |
186 | struct ipath_cq_wc { | ||
187 | u32 head; /* index of next entry to fill */ | ||
188 | u32 tail; /* index of next ib_poll_cq() entry */ | ||
189 | struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ | ||
190 | }; | ||
223 | 191 | ||
192 | /* | ||
193 | * The completion queue structure. | ||
194 | */ | ||
224 | struct ipath_cq { | 195 | struct ipath_cq { |
225 | struct ib_cq ibcq; | 196 | struct ib_cq ibcq; |
226 | struct tasklet_struct comptask; | 197 | struct tasklet_struct comptask; |
227 | spinlock_t lock; | 198 | spinlock_t lock; |
228 | u8 notify; | 199 | u8 notify; |
229 | u8 triggered; | 200 | u8 triggered; |
230 | u32 head; /* new records added to the head */ | 201 | struct ipath_cq_wc *queue; |
231 | u32 tail; /* poll_cq() reads from here. */ | 202 | struct ipath_mmap_info *ip; |
232 | struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */ | 203 | }; |
204 | |||
205 | /* | ||
206 | * A segment is a linear region of low physical memory. | ||
207 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | ||
208 | * Used by the verbs layer. | ||
209 | */ | ||
210 | struct ipath_seg { | ||
211 | void *vaddr; | ||
212 | size_t length; | ||
213 | }; | ||
214 | |||
215 | /* The number of ipath_segs that fit in a page. */ | ||
216 | #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) | ||
217 | |||
218 | struct ipath_segarray { | ||
219 | struct ipath_seg segs[IPATH_SEGSZ]; | ||
220 | }; | ||
221 | |||
222 | struct ipath_mregion { | ||
223 | u64 user_base; /* User's address for this region */ | ||
224 | u64 iova; /* IB start address of this region */ | ||
225 | size_t length; | ||
226 | u32 lkey; | ||
227 | u32 offset; /* offset (bytes) to start of region */ | ||
228 | int access_flags; | ||
229 | u32 max_segs; /* number of ipath_segs in all the arrays */ | ||
230 | u32 mapsz; /* size of the map array */ | ||
231 | struct ipath_segarray *map[0]; /* the segments */ | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * These keep track of the copy progress within a memory region. | ||
236 | * Used by the verbs layer. | ||
237 | */ | ||
238 | struct ipath_sge { | ||
239 | struct ipath_mregion *mr; | ||
240 | void *vaddr; /* current pointer into the segment */ | ||
241 | u32 sge_length; /* length of the SGE */ | ||
242 | u32 length; /* remaining length of the segment */ | ||
243 | u16 m; /* current index: mr->map[m] */ | ||
244 | u16 n; /* current index: mr->map[m]->segs[n] */ | ||
245 | }; | ||
246 | |||
247 | /* Memory region */ | ||
248 | struct ipath_mr { | ||
249 | struct ib_mr ibmr; | ||
250 | struct ipath_mregion mr; /* must be last */ | ||
233 | }; | 251 | }; |
234 | 252 | ||
235 | /* | 253 | /* |
@@ -248,32 +266,50 @@ struct ipath_swqe { | |||
248 | 266 | ||
249 | /* | 267 | /* |
250 | * Receive work request queue entry. | 268 | * Receive work request queue entry. |
251 | * The size of the sg_list is determined when the QP is created and stored | 269 | * The size of the sg_list is determined when the QP (or SRQ) is created |
252 | * in qp->r_max_sge. | 270 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). |
253 | */ | 271 | */ |
254 | struct ipath_rwqe { | 272 | struct ipath_rwqe { |
255 | u64 wr_id; | 273 | u64 wr_id; |
256 | u32 length; /* total length of data in sg_list */ | ||
257 | u8 num_sge; | 274 | u8 num_sge; |
258 | struct ipath_sge sg_list[0]; | 275 | struct ib_sge sg_list[0]; |
259 | }; | 276 | }; |
260 | 277 | ||
261 | struct ipath_rq { | 278 | /* |
262 | spinlock_t lock; | 279 | * This structure is used to contain the head pointer, tail pointer, |
280 | * and receive work queue entries as a single memory allocation so | ||
281 | * it can be mmap'ed into user space. | ||
282 | * Note that the wq array elements are variable size so you can't | ||
283 | * just index into the array to get the N'th element; | ||
284 | * use get_rwqe_ptr() instead. | ||
285 | */ | ||
286 | struct ipath_rwq { | ||
263 | u32 head; /* new work requests posted to the head */ | 287 | u32 head; /* new work requests posted to the head */ |
264 | u32 tail; /* receives pull requests from here. */ | 288 | u32 tail; /* receives pull requests from here. */ |
289 | struct ipath_rwqe wq[0]; | ||
290 | }; | ||
291 | |||
292 | struct ipath_rq { | ||
293 | struct ipath_rwq *wq; | ||
294 | spinlock_t lock; | ||
265 | u32 size; /* size of RWQE array */ | 295 | u32 size; /* size of RWQE array */ |
266 | u8 max_sge; | 296 | u8 max_sge; |
267 | struct ipath_rwqe *wq; /* RWQE array */ | ||
268 | }; | 297 | }; |
269 | 298 | ||
270 | struct ipath_srq { | 299 | struct ipath_srq { |
271 | struct ib_srq ibsrq; | 300 | struct ib_srq ibsrq; |
272 | struct ipath_rq rq; | 301 | struct ipath_rq rq; |
302 | struct ipath_mmap_info *ip; | ||
273 | /* send signal when number of RWQEs < limit */ | 303 | /* send signal when number of RWQEs < limit */ |
274 | u32 limit; | 304 | u32 limit; |
275 | }; | 305 | }; |
276 | 306 | ||
307 | struct ipath_sge_state { | ||
308 | struct ipath_sge *sg_list; /* next SGE to be used if any */ | ||
309 | struct ipath_sge sge; /* progress state for the current SGE */ | ||
310 | u8 num_sge; | ||
311 | }; | ||
312 | |||
277 | /* | 313 | /* |
278 | * Variables prefixed with s_ are for the requester (sender). | 314 | * Variables prefixed with s_ are for the requester (sender). |
279 | * Variables prefixed with r_ are for the responder (receiver). | 315 | * Variables prefixed with r_ are for the responder (receiver). |
@@ -293,6 +329,7 @@ struct ipath_qp { | |||
293 | atomic_t refcount; | 329 | atomic_t refcount; |
294 | wait_queue_head_t wait; | 330 | wait_queue_head_t wait; |
295 | struct tasklet_struct s_task; | 331 | struct tasklet_struct s_task; |
332 | struct ipath_mmap_info *ip; | ||
296 | struct ipath_sge_state *s_cur_sge; | 333 | struct ipath_sge_state *s_cur_sge; |
297 | struct ipath_sge_state s_sge; /* current send request data */ | 334 | struct ipath_sge_state s_sge; /* current send request data */ |
298 | /* current RDMA read send data */ | 335 | /* current RDMA read send data */ |
@@ -334,6 +371,7 @@ struct ipath_qp { | |||
334 | u8 s_retry; /* requester retry counter */ | 371 | u8 s_retry; /* requester retry counter */ |
335 | u8 s_rnr_retry; /* requester RNR retry counter */ | 372 | u8 s_rnr_retry; /* requester RNR retry counter */ |
336 | u8 s_pkey_index; /* PKEY index to use */ | 373 | u8 s_pkey_index; /* PKEY index to use */ |
374 | u8 timeout; /* Timeout for this QP */ | ||
337 | enum ib_mtu path_mtu; | 375 | enum ib_mtu path_mtu; |
338 | u32 remote_qpn; | 376 | u32 remote_qpn; |
339 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | 377 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
@@ -345,7 +383,8 @@ struct ipath_qp { | |||
345 | u32 s_ssn; /* SSN of tail entry */ | 383 | u32 s_ssn; /* SSN of tail entry */ |
346 | u32 s_lsn; /* limit sequence number (credit) */ | 384 | u32 s_lsn; /* limit sequence number (credit) */ |
347 | struct ipath_swqe *s_wq; /* send work queue */ | 385 | struct ipath_swqe *s_wq; /* send work queue */ |
348 | struct ipath_rq r_rq; /* receive work queue */ | 386 | struct ipath_rq r_rq; /* receive work queue */ |
387 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ | ||
349 | }; | 388 | }; |
350 | 389 | ||
351 | /* | 390 | /* |
@@ -369,15 +408,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, | |||
369 | 408 | ||
370 | /* | 409 | /* |
371 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into | 410 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into |
372 | * struct ipath_rq.wq. This function does the array index computation. | 411 | * struct ipath_rwq.wq. This function does the array index computation. |
373 | */ | 412 | */ |
374 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, | 413 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, |
375 | unsigned n) | 414 | unsigned n) |
376 | { | 415 | { |
377 | return (struct ipath_rwqe *) | 416 | return (struct ipath_rwqe *) |
378 | ((char *) rq->wq + | 417 | ((char *) rq->wq->wq + |
379 | (sizeof(struct ipath_rwqe) + | 418 | (sizeof(struct ipath_rwqe) + |
380 | rq->max_sge * sizeof(struct ipath_sge)) * n); | 419 | rq->max_sge * sizeof(struct ib_sge)) * n); |
381 | } | 420 | } |
382 | 421 | ||
383 | /* | 422 | /* |
@@ -417,6 +456,7 @@ struct ipath_ibdev { | |||
417 | struct ib_device ibdev; | 456 | struct ib_device ibdev; |
418 | struct list_head dev_list; | 457 | struct list_head dev_list; |
419 | struct ipath_devdata *dd; | 458 | struct ipath_devdata *dd; |
459 | struct ipath_mmap_info *pending_mmaps; | ||
420 | int ib_unit; /* This is the device number */ | 460 | int ib_unit; /* This is the device number */ |
421 | u16 sm_lid; /* in host order */ | 461 | u16 sm_lid; /* in host order */ |
422 | u8 sm_sl; | 462 | u8 sm_sl; |
@@ -435,11 +475,20 @@ struct ipath_ibdev { | |||
435 | __be64 sys_image_guid; /* in network order */ | 475 | __be64 sys_image_guid; /* in network order */ |
436 | __be64 gid_prefix; /* in network order */ | 476 | __be64 gid_prefix; /* in network order */ |
437 | __be64 mkey; | 477 | __be64 mkey; |
478 | |||
438 | u32 n_pds_allocated; /* number of PDs allocated for device */ | 479 | u32 n_pds_allocated; /* number of PDs allocated for device */ |
480 | spinlock_t n_pds_lock; | ||
439 | u32 n_ahs_allocated; /* number of AHs allocated for device */ | 481 | u32 n_ahs_allocated; /* number of AHs allocated for device */ |
482 | spinlock_t n_ahs_lock; | ||
440 | u32 n_cqs_allocated; /* number of CQs allocated for device */ | 483 | u32 n_cqs_allocated; /* number of CQs allocated for device */ |
484 | spinlock_t n_cqs_lock; | ||
485 | u32 n_qps_allocated; /* number of QPs allocated for device */ | ||
486 | spinlock_t n_qps_lock; | ||
441 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ | 487 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ |
488 | spinlock_t n_srqs_lock; | ||
442 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ | 489 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ |
490 | spinlock_t n_mcast_grps_lock; | ||
491 | |||
443 | u64 ipath_sword; /* total dwords sent (sample result) */ | 492 | u64 ipath_sword; /* total dwords sent (sample result) */ |
444 | u64 ipath_rword; /* total dwords received (sample result) */ | 493 | u64 ipath_rword; /* total dwords received (sample result) */ |
445 | u64 ipath_spkts; /* total packets sent (sample result) */ | 494 | u64 ipath_spkts; /* total packets sent (sample result) */ |
@@ -494,8 +543,19 @@ struct ipath_ibdev { | |||
494 | struct ipath_opcode_stats opstats[128]; | 543 | struct ipath_opcode_stats opstats[128]; |
495 | }; | 544 | }; |
496 | 545 | ||
497 | struct ipath_ucontext { | 546 | struct ipath_verbs_counters { |
498 | struct ib_ucontext ibucontext; | 547 | u64 symbol_error_counter; |
548 | u64 link_error_recovery_counter; | ||
549 | u64 link_downed_counter; | ||
550 | u64 port_rcv_errors; | ||
551 | u64 port_rcv_remphys_errors; | ||
552 | u64 port_xmit_discards; | ||
553 | u64 port_xmit_data; | ||
554 | u64 port_rcv_data; | ||
555 | u64 port_xmit_packets; | ||
556 | u64 port_rcv_packets; | ||
557 | u32 local_link_integrity_errors; | ||
558 | u32 excessive_buffer_overrun_errors; | ||
499 | }; | 559 | }; |
500 | 560 | ||
501 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) | 561 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) |
@@ -503,11 +563,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) | |||
503 | return container_of(ibmr, struct ipath_mr, ibmr); | 563 | return container_of(ibmr, struct ipath_mr, ibmr); |
504 | } | 564 | } |
505 | 565 | ||
506 | static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) | ||
507 | { | ||
508 | return container_of(ibfmr, struct ipath_fmr, ibfmr); | ||
509 | } | ||
510 | |||
511 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) | 566 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) |
512 | { | 567 | { |
513 | return container_of(ibpd, struct ipath_pd, ibpd); | 568 | return container_of(ibpd, struct ipath_pd, ibpd); |
@@ -545,12 +600,6 @@ int ipath_process_mad(struct ib_device *ibdev, | |||
545 | struct ib_grh *in_grh, | 600 | struct ib_grh *in_grh, |
546 | struct ib_mad *in_mad, struct ib_mad *out_mad); | 601 | struct ib_mad *in_mad, struct ib_mad *out_mad); |
547 | 602 | ||
548 | static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext | ||
549 | *ibucontext) | ||
550 | { | ||
551 | return container_of(ibucontext, struct ipath_ucontext, ibucontext); | ||
552 | } | ||
553 | |||
554 | /* | 603 | /* |
555 | * Compare the lower 24 bits of the two values. | 604 | * Compare the lower 24 bits of the two values. |
556 | * Returns an integer <, ==, or > than zero. | 605 | * Returns an integer <, ==, or > than zero. |
@@ -562,6 +611,13 @@ static inline int ipath_cmp24(u32 a, u32 b) | |||
562 | 611 | ||
563 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); | 612 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); |
564 | 613 | ||
614 | int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
615 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
616 | u64 *xmit_wait); | ||
617 | |||
618 | int ipath_get_counters(struct ipath_devdata *dd, | ||
619 | struct ipath_verbs_counters *cntrs); | ||
620 | |||
565 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 621 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
566 | 622 | ||
567 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 623 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
@@ -579,7 +635,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
579 | int ipath_destroy_qp(struct ib_qp *ibqp); | 635 | int ipath_destroy_qp(struct ib_qp *ibqp); |
580 | 636 | ||
581 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 637 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
582 | int attr_mask); | 638 | int attr_mask, struct ib_udata *udata); |
583 | 639 | ||
584 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 640 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
585 | int attr_mask, struct ib_qp_init_attr *init_attr); | 641 | int attr_mask, struct ib_qp_init_attr *init_attr); |
@@ -592,6 +648,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | |||
592 | 648 | ||
593 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | 649 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
594 | 650 | ||
651 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
652 | u32 *hdr, u32 len, struct ipath_sge_state *ss); | ||
653 | |||
595 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | 654 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); |
596 | 655 | ||
597 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 656 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, |
@@ -638,7 +697,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
638 | struct ib_udata *udata); | 697 | struct ib_udata *udata); |
639 | 698 | ||
640 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 699 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
641 | enum ib_srq_attr_mask attr_mask); | 700 | enum ib_srq_attr_mask attr_mask, |
701 | struct ib_udata *udata); | ||
642 | 702 | ||
643 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | 703 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); |
644 | 704 | ||
@@ -680,6 +740,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list); | |||
680 | 740 | ||
681 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); | 741 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); |
682 | 742 | ||
743 | void ipath_release_mmap_info(struct kref *ref); | ||
744 | |||
745 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
746 | |||
683 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); | 747 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); |
684 | 748 | ||
685 | void ipath_insert_rnr_queue(struct ipath_qp *qp); | 749 | void ipath_insert_rnr_queue(struct ipath_qp *qp); |
@@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | |||
700 | int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | 764 | int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, |
701 | u32 pmtu, u32 *bth0p, u32 *bth2p); | 765 | u32 pmtu, u32 *bth0p, u32 *bth2p); |
702 | 766 | ||
767 | int ipath_register_ib_device(struct ipath_devdata *); | ||
768 | |||
769 | void ipath_unregister_ib_device(struct ipath_ibdev *); | ||
770 | |||
771 | void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32); | ||
772 | |||
773 | int ipath_ib_piobufavail(struct ipath_ibdev *); | ||
774 | |||
775 | void ipath_ib_timer(struct ipath_ibdev *); | ||
776 | |||
777 | unsigned ipath_get_npkeys(struct ipath_devdata *); | ||
778 | |||
779 | u32 ipath_get_cr_errpkey(struct ipath_devdata *); | ||
780 | |||
781 | unsigned ipath_get_pkey(struct ipath_devdata *, unsigned); | ||
782 | |||
703 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; | 783 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; |
704 | 784 | ||
705 | extern const u8 ipath_cvt_physportstate[]; | 785 | extern const u8 ipath_cvt_physportstate[]; |
@@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs; | |||
714 | 794 | ||
715 | extern unsigned int ib_ipath_max_qp_wrs; | 795 | extern unsigned int ib_ipath_max_qp_wrs; |
716 | 796 | ||
797 | extern unsigned int ib_ipath_max_qps; | ||
798 | |||
717 | extern unsigned int ib_ipath_max_sges; | 799 | extern unsigned int ib_ipath_max_sges; |
718 | 800 | ||
719 | extern unsigned int ib_ipath_max_mcast_grps; | 801 | extern unsigned int ib_ipath_max_mcast_grps; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c index ee0e1d96d723..085e28b939ec 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c | |||
@@ -207,12 +207,17 @@ static int ipath_mcast_add(struct ipath_ibdev *dev, | |||
207 | goto bail; | 207 | goto bail; |
208 | } | 208 | } |
209 | 209 | ||
210 | spin_lock(&dev->n_mcast_grps_lock); | ||
210 | if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) { | 211 | if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) { |
212 | spin_unlock(&dev->n_mcast_grps_lock); | ||
211 | ret = ENOMEM; | 213 | ret = ENOMEM; |
212 | goto bail; | 214 | goto bail; |
213 | } | 215 | } |
214 | 216 | ||
215 | dev->n_mcast_grps_allocated++; | 217 | dev->n_mcast_grps_allocated++; |
218 | spin_unlock(&dev->n_mcast_grps_lock); | ||
219 | |||
220 | mcast->n_attached++; | ||
216 | 221 | ||
217 | list_add_tail_rcu(&mqp->list, &mcast->qp_list); | 222 | list_add_tail_rcu(&mqp->list, &mcast->qp_list); |
218 | 223 | ||
@@ -343,7 +348,9 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
343 | atomic_dec(&mcast->refcount); | 348 | atomic_dec(&mcast->refcount); |
344 | wait_event(mcast->wait, !atomic_read(&mcast->refcount)); | 349 | wait_event(mcast->wait, !atomic_read(&mcast->refcount)); |
345 | ipath_mcast_free(mcast); | 350 | ipath_mcast_free(mcast); |
351 | spin_lock(&dev->n_mcast_grps_lock); | ||
346 | dev->n_mcast_grps_allocated--; | 352 | dev->n_mcast_grps_allocated--; |
353 | spin_unlock(&dev->n_mcast_grps_lock); | ||
347 | } | 354 | } |
348 | 355 | ||
349 | ret = 0; | 356 | ret = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c new file mode 100644 index 000000000000..036fde662aa9 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * This file is conditionally built on PowerPC only. Otherwise weak symbol | ||
35 | * versions of the functions exported from here are used. | ||
36 | */ | ||
37 | |||
38 | #include "ipath_kernel.h" | ||
39 | |||
40 | /** | ||
41 | * ipath_unordered_wc - indicate whether write combining is ordered | ||
42 | * | ||
43 | * PowerPC systems (at least those in the 970 processor family) | ||
44 | * write partially filled store buffers in address order, but will write | ||
45 | * completely filled store buffers in "random" order, and therefore must | ||
46 | * have serialization for correctness with current InfiniPath chips. | ||
47 | * | ||
48 | */ | ||
49 | int ipath_unordered_wc(void) | ||
50 | { | ||
51 | return 1; | ||
52 | } | ||
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h deleted file mode 100644 index 6186676f2a16..000000000000 --- a/drivers/infiniband/hw/ipath/verbs_debug.h +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef _VERBS_DEBUG_H | ||
35 | #define _VERBS_DEBUG_H | ||
36 | |||
37 | /* | ||
38 | * This file contains tracing code for the ib_ipath kernel module. | ||
39 | */ | ||
40 | #ifndef _VERBS_DEBUGGING /* tracing enabled or not */ | ||
41 | #define _VERBS_DEBUGGING 1 | ||
42 | #endif | ||
43 | |||
44 | extern unsigned ib_ipath_debug; | ||
45 | |||
46 | #define _VERBS_ERROR(fmt,...) \ | ||
47 | do { \ | ||
48 | printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ | ||
49 | } while(0) | ||
50 | |||
51 | #define _VERBS_UNIT_ERROR(unit,fmt,...) \ | ||
52 | do { \ | ||
53 | printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ | ||
54 | } while(0) | ||
55 | |||
56 | #if _VERBS_DEBUGGING | ||
57 | |||
58 | /* | ||
59 | * Mask values for debugging. The scheme allows us to compile out any | ||
60 | * of the debug tracing stuff, and if compiled in, to enable or | ||
61 | * disable dynamically. | ||
62 | * This can be set at modprobe time also: | ||
63 | * modprobe ib_path ib_ipath_debug=3 | ||
64 | */ | ||
65 | |||
66 | #define __VERBS_INFO 0x1 /* generic low verbosity stuff */ | ||
67 | #define __VERBS_DBG 0x2 /* generic debug */ | ||
68 | #define __VERBS_VDBG 0x4 /* verbose debug */ | ||
69 | #define __VERBS_SMADBG 0x8000 /* sma packet debug */ | ||
70 | |||
71 | #define _VERBS_INFO(fmt,...) \ | ||
72 | do { \ | ||
73 | if (unlikely(ib_ipath_debug&__VERBS_INFO)) \ | ||
74 | printk(KERN_INFO "%s: " fmt,"ib_ipath", \ | ||
75 | ##__VA_ARGS__); \ | ||
76 | } while(0) | ||
77 | |||
78 | #define _VERBS_DBG(fmt,...) \ | ||
79 | do { \ | ||
80 | if (unlikely(ib_ipath_debug&__VERBS_DBG)) \ | ||
81 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
82 | ##__VA_ARGS__); \ | ||
83 | } while(0) | ||
84 | |||
85 | #define _VERBS_VDBG(fmt,...) \ | ||
86 | do { \ | ||
87 | if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \ | ||
88 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
89 | ##__VA_ARGS__); \ | ||
90 | } while(0) | ||
91 | |||
92 | #define _VERBS_SMADBG(fmt,...) \ | ||
93 | do { \ | ||
94 | if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \ | ||
95 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
96 | ##__VA_ARGS__); \ | ||
97 | } while(0) | ||
98 | |||
99 | #else /* ! _VERBS_DEBUGGING */ | ||
100 | |||
101 | #define _VERBS_INFO(fmt,...) | ||
102 | #define _VERBS_DBG(fmt,...) | ||
103 | #define _VERBS_VDBG(fmt,...) | ||
104 | #define _VERBS_SMADBG(fmt,...) | ||
105 | |||
106 | #endif /* _VERBS_DEBUGGING */ | ||
107 | |||
108 | #endif /* _VERBS_DEBUG_H */ | ||