diff options
26 files changed, 9021 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 830bec779d47..b08c537018de 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -298,6 +298,14 @@ L: info-linux@geode.amd.com | |||
298 | W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html | 298 | W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html |
299 | S: Supported | 299 | S: Supported |
300 | 300 | ||
301 | AMSO1100 RNIC DRIVER | ||
302 | P: Tom Tucker | ||
303 | M: tom@opengridcomputing.com | ||
304 | P: Steve Wise | ||
305 | M: swise@opengridcomputing.com | ||
306 | L: openib-general@openib.org | ||
307 | S: Maintained | ||
308 | |||
301 | AOA (Apple Onboard Audio) ALSA DRIVER | 309 | AOA (Apple Onboard Audio) ALSA DRIVER |
302 | P: Johannes Berg | 310 | P: Johannes Berg |
303 | M: johannes@sipsolutions.net | 311 | M: johannes@sipsolutions.net |
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 9a329b2c108c..9edfacee7d84 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -37,6 +37,7 @@ config INFINIBAND_ADDR_TRANS | |||
37 | source "drivers/infiniband/hw/mthca/Kconfig" | 37 | source "drivers/infiniband/hw/mthca/Kconfig" |
38 | source "drivers/infiniband/hw/ipath/Kconfig" | 38 | source "drivers/infiniband/hw/ipath/Kconfig" |
39 | source "drivers/infiniband/hw/ehca/Kconfig" | 39 | source "drivers/infiniband/hw/ehca/Kconfig" |
40 | source "drivers/infiniband/hw/amso1100/Kconfig" | ||
40 | 41 | ||
41 | source "drivers/infiniband/ulp/ipoib/Kconfig" | 42 | source "drivers/infiniband/ulp/ipoib/Kconfig" |
42 | 43 | ||
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index 08cff32d900e..2b5d1098ef45 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_INFINIBAND) += core/ | |||
2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ | 2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ |
3 | obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ | 3 | obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ |
4 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ | 4 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ |
5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ | ||
5 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ | 6 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ |
6 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ | 7 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ |
7 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ | 8 | obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ |
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild new file mode 100644 index 000000000000..06964c4af849 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/Kbuild | |||
@@ -0,0 +1,8 @@ | |||
1 | ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG | ||
2 | EXTRA_CFLAGS += -DDEBUG | ||
3 | endif | ||
4 | |||
5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o | ||
6 | |||
7 | iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \ | ||
8 | c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o | ||
diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/infiniband/hw/amso1100/Kconfig new file mode 100644 index 000000000000..809cb14ac6de --- /dev/null +++ b/drivers/infiniband/hw/amso1100/Kconfig | |||
@@ -0,0 +1,15 @@ | |||
1 | config INFINIBAND_AMSO1100 | ||
2 | tristate "Ammasso 1100 HCA support" | ||
3 | depends on PCI && INET && INFINIBAND | ||
4 | ---help--- | ||
5 | This is a low-level driver for the Ammasso 1100 host | ||
6 | channel adapter (HCA). | ||
7 | |||
8 | config INFINIBAND_AMSO1100_DEBUG | ||
9 | bool "Verbose debugging output" | ||
10 | depends on INFINIBAND_AMSO1100 | ||
11 | default n | ||
12 | ---help--- | ||
13 | This option causes the amso1100 driver to produce a bunch of | ||
14 | debug messages. Select this if you are developing the driver | ||
15 | or trying to diagnose a problem. | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c new file mode 100644 index 000000000000..9e9120f36019 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -0,0 +1,1255 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/moduleparam.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/inetdevice.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/ethtool.h> | ||
41 | #include <linux/mii.h> | ||
42 | #include <linux/if_vlan.h> | ||
43 | #include <linux/crc32.h> | ||
44 | #include <linux/in.h> | ||
45 | #include <linux/ip.h> | ||
46 | #include <linux/tcp.h> | ||
47 | #include <linux/init.h> | ||
48 | #include <linux/dma-mapping.h> | ||
49 | |||
50 | #include <asm/io.h> | ||
51 | #include <asm/irq.h> | ||
52 | #include <asm/byteorder.h> | ||
53 | |||
54 | #include <rdma/ib_smi.h> | ||
55 | #include "c2.h" | ||
56 | #include "c2_provider.h" | ||
57 | |||
58 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); | ||
59 | MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver"); | ||
60 | MODULE_LICENSE("Dual BSD/GPL"); | ||
61 | MODULE_VERSION(DRV_VERSION); | ||
62 | |||
63 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | ||
64 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | ||
65 | |||
66 | static int debug = -1; /* defaults above */ | ||
67 | module_param(debug, int, 0); | ||
68 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
69 | |||
70 | static int c2_up(struct net_device *netdev); | ||
71 | static int c2_down(struct net_device *netdev); | ||
72 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | ||
73 | static void c2_tx_interrupt(struct net_device *netdev); | ||
74 | static void c2_rx_interrupt(struct net_device *netdev); | ||
75 | static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
76 | static void c2_tx_timeout(struct net_device *netdev); | ||
77 | static int c2_change_mtu(struct net_device *netdev, int new_mtu); | ||
78 | static void c2_reset(struct c2_port *c2_port); | ||
79 | static struct net_device_stats *c2_get_stats(struct net_device *netdev); | ||
80 | |||
81 | static struct pci_device_id c2_pci_table[] = { | ||
82 | { PCI_DEVICE(0x18b8, 0xb001) }, | ||
83 | { 0 } | ||
84 | }; | ||
85 | |||
86 | MODULE_DEVICE_TABLE(pci, c2_pci_table); | ||
87 | |||
88 | static void c2_print_macaddr(struct net_device *netdev) | ||
89 | { | ||
90 | pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " | ||
91 | "IRQ %u\n", netdev->name, | ||
92 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
93 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], | ||
94 | netdev->irq); | ||
95 | } | ||
96 | |||
97 | static void c2_set_rxbufsize(struct c2_port *c2_port) | ||
98 | { | ||
99 | struct net_device *netdev = c2_port->netdev; | ||
100 | |||
101 | if (netdev->mtu > RX_BUF_SIZE) | ||
102 | c2_port->rx_buf_size = | ||
103 | netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) + | ||
104 | NET_IP_ALIGN; | ||
105 | else | ||
106 | c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Allocate TX ring elements and chain them together. | ||
111 | * One-to-one association of adapter descriptors with ring elements. | ||
112 | */ | ||
113 | static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, | ||
114 | dma_addr_t base, void __iomem * mmio_txp_ring) | ||
115 | { | ||
116 | struct c2_tx_desc *tx_desc; | ||
117 | struct c2_txp_desc __iomem *txp_desc; | ||
118 | struct c2_element *elem; | ||
119 | int i; | ||
120 | |||
121 | tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); | ||
122 | if (!tx_ring->start) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | elem = tx_ring->start; | ||
126 | tx_desc = vaddr; | ||
127 | txp_desc = mmio_txp_ring; | ||
128 | for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { | ||
129 | tx_desc->len = 0; | ||
130 | tx_desc->status = 0; | ||
131 | |||
132 | /* Set TXP_HTXD_UNINIT */ | ||
133 | __raw_writeq(cpu_to_be64(0x1122334455667788ULL), | ||
134 | (void __iomem *) txp_desc + C2_TXP_ADDR); | ||
135 | __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); | ||
136 | __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), | ||
137 | (void __iomem *) txp_desc + C2_TXP_FLAGS); | ||
138 | |||
139 | elem->skb = NULL; | ||
140 | elem->ht_desc = tx_desc; | ||
141 | elem->hw_desc = txp_desc; | ||
142 | |||
143 | if (i == tx_ring->count - 1) { | ||
144 | elem->next = tx_ring->start; | ||
145 | tx_desc->next_offset = base; | ||
146 | } else { | ||
147 | elem->next = elem + 1; | ||
148 | tx_desc->next_offset = | ||
149 | base + (i + 1) * sizeof(*tx_desc); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | tx_ring->to_use = tx_ring->to_clean = tx_ring->start; | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Allocate RX ring elements and chain them together. | ||
160 | * One-to-one association of adapter descriptors with ring elements. | ||
161 | */ | ||
162 | static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr, | ||
163 | dma_addr_t base, void __iomem * mmio_rxp_ring) | ||
164 | { | ||
165 | struct c2_rx_desc *rx_desc; | ||
166 | struct c2_rxp_desc __iomem *rxp_desc; | ||
167 | struct c2_element *elem; | ||
168 | int i; | ||
169 | |||
170 | rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL); | ||
171 | if (!rx_ring->start) | ||
172 | return -ENOMEM; | ||
173 | |||
174 | elem = rx_ring->start; | ||
175 | rx_desc = vaddr; | ||
176 | rxp_desc = mmio_rxp_ring; | ||
177 | for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) { | ||
178 | rx_desc->len = 0; | ||
179 | rx_desc->status = 0; | ||
180 | |||
181 | /* Set RXP_HRXD_UNINIT */ | ||
182 | __raw_writew(cpu_to_be16(RXP_HRXD_OK), | ||
183 | (void __iomem *) rxp_desc + C2_RXP_STATUS); | ||
184 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); | ||
185 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); | ||
186 | __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), | ||
187 | (void __iomem *) rxp_desc + C2_RXP_ADDR); | ||
188 | __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), | ||
189 | (void __iomem *) rxp_desc + C2_RXP_FLAGS); | ||
190 | |||
191 | elem->skb = NULL; | ||
192 | elem->ht_desc = rx_desc; | ||
193 | elem->hw_desc = rxp_desc; | ||
194 | |||
195 | if (i == rx_ring->count - 1) { | ||
196 | elem->next = rx_ring->start; | ||
197 | rx_desc->next_offset = base; | ||
198 | } else { | ||
199 | elem->next = elem + 1; | ||
200 | rx_desc->next_offset = | ||
201 | base + (i + 1) * sizeof(*rx_desc); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | rx_ring->to_use = rx_ring->to_clean = rx_ring->start; | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* Setup buffer for receiving */ | ||
211 | static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem) | ||
212 | { | ||
213 | struct c2_dev *c2dev = c2_port->c2dev; | ||
214 | struct c2_rx_desc *rx_desc = elem->ht_desc; | ||
215 | struct sk_buff *skb; | ||
216 | dma_addr_t mapaddr; | ||
217 | u32 maplen; | ||
218 | struct c2_rxp_hdr *rxp_hdr; | ||
219 | |||
220 | skb = dev_alloc_skb(c2_port->rx_buf_size); | ||
221 | if (unlikely(!skb)) { | ||
222 | pr_debug("%s: out of memory for receive\n", | ||
223 | c2_port->netdev->name); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | |||
227 | /* Zero out the rxp hdr in the sk_buff */ | ||
228 | memset(skb->data, 0, sizeof(*rxp_hdr)); | ||
229 | |||
230 | skb->dev = c2_port->netdev; | ||
231 | |||
232 | maplen = c2_port->rx_buf_size; | ||
233 | mapaddr = | ||
234 | pci_map_single(c2dev->pcidev, skb->data, maplen, | ||
235 | PCI_DMA_FROMDEVICE); | ||
236 | |||
237 | /* Set the sk_buff RXP_header to RXP_HRXD_READY */ | ||
238 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | ||
239 | rxp_hdr->flags = RXP_HRXD_READY; | ||
240 | |||
241 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | ||
242 | __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), | ||
243 | elem->hw_desc + C2_RXP_LEN); | ||
244 | __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); | ||
245 | __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); | ||
246 | |||
247 | elem->skb = skb; | ||
248 | elem->mapaddr = mapaddr; | ||
249 | elem->maplen = maplen; | ||
250 | rx_desc->len = maplen; | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Allocate buffers for the Rx ring | ||
257 | * For receive: rx_ring.to_clean is next received frame | ||
258 | */ | ||
259 | static int c2_rx_fill(struct c2_port *c2_port) | ||
260 | { | ||
261 | struct c2_ring *rx_ring = &c2_port->rx_ring; | ||
262 | struct c2_element *elem; | ||
263 | int ret = 0; | ||
264 | |||
265 | elem = rx_ring->start; | ||
266 | do { | ||
267 | if (c2_rx_alloc(c2_port, elem)) { | ||
268 | ret = 1; | ||
269 | break; | ||
270 | } | ||
271 | } while ((elem = elem->next) != rx_ring->start); | ||
272 | |||
273 | rx_ring->to_clean = rx_ring->start; | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | /* Free all buffers in RX ring, assumes receiver stopped */ | ||
278 | static void c2_rx_clean(struct c2_port *c2_port) | ||
279 | { | ||
280 | struct c2_dev *c2dev = c2_port->c2dev; | ||
281 | struct c2_ring *rx_ring = &c2_port->rx_ring; | ||
282 | struct c2_element *elem; | ||
283 | struct c2_rx_desc *rx_desc; | ||
284 | |||
285 | elem = rx_ring->start; | ||
286 | do { | ||
287 | rx_desc = elem->ht_desc; | ||
288 | rx_desc->len = 0; | ||
289 | |||
290 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | ||
291 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | ||
292 | __raw_writew(0, elem->hw_desc + C2_RXP_LEN); | ||
293 | __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), | ||
294 | elem->hw_desc + C2_RXP_ADDR); | ||
295 | __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), | ||
296 | elem->hw_desc + C2_RXP_FLAGS); | ||
297 | |||
298 | if (elem->skb) { | ||
299 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, | ||
300 | elem->maplen, PCI_DMA_FROMDEVICE); | ||
301 | dev_kfree_skb(elem->skb); | ||
302 | elem->skb = NULL; | ||
303 | } | ||
304 | } while ((elem = elem->next) != rx_ring->start); | ||
305 | } | ||
306 | |||
307 | static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem) | ||
308 | { | ||
309 | struct c2_tx_desc *tx_desc = elem->ht_desc; | ||
310 | |||
311 | tx_desc->len = 0; | ||
312 | |||
313 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen, | ||
314 | PCI_DMA_TODEVICE); | ||
315 | |||
316 | if (elem->skb) { | ||
317 | dev_kfree_skb_any(elem->skb); | ||
318 | elem->skb = NULL; | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /* Free all buffers in TX ring, assumes transmitter stopped */ | ||
325 | static void c2_tx_clean(struct c2_port *c2_port) | ||
326 | { | ||
327 | struct c2_ring *tx_ring = &c2_port->tx_ring; | ||
328 | struct c2_element *elem; | ||
329 | struct c2_txp_desc txp_htxd; | ||
330 | int retry; | ||
331 | unsigned long flags; | ||
332 | |||
333 | spin_lock_irqsave(&c2_port->tx_lock, flags); | ||
334 | |||
335 | elem = tx_ring->start; | ||
336 | |||
337 | do { | ||
338 | retry = 0; | ||
339 | do { | ||
340 | txp_htxd.flags = | ||
341 | readw(elem->hw_desc + C2_TXP_FLAGS); | ||
342 | |||
343 | if (txp_htxd.flags == TXP_HTXD_READY) { | ||
344 | retry = 1; | ||
345 | __raw_writew(0, | ||
346 | elem->hw_desc + C2_TXP_LEN); | ||
347 | __raw_writeq(0, | ||
348 | elem->hw_desc + C2_TXP_ADDR); | ||
349 | __raw_writew(cpu_to_be16(TXP_HTXD_DONE), | ||
350 | elem->hw_desc + C2_TXP_FLAGS); | ||
351 | c2_port->netstats.tx_dropped++; | ||
352 | break; | ||
353 | } else { | ||
354 | __raw_writew(0, | ||
355 | elem->hw_desc + C2_TXP_LEN); | ||
356 | __raw_writeq(cpu_to_be64(0x1122334455667788ULL), | ||
357 | elem->hw_desc + C2_TXP_ADDR); | ||
358 | __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), | ||
359 | elem->hw_desc + C2_TXP_FLAGS); | ||
360 | } | ||
361 | |||
362 | c2_tx_free(c2_port->c2dev, elem); | ||
363 | |||
364 | } while ((elem = elem->next) != tx_ring->start); | ||
365 | } while (retry); | ||
366 | |||
367 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | ||
368 | c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; | ||
369 | |||
370 | if (c2_port->tx_avail > MAX_SKB_FRAGS + 1) | ||
371 | netif_wake_queue(c2_port->netdev); | ||
372 | |||
373 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | ||
374 | } | ||
375 | |||
376 | /* | ||
377 | * Process transmit descriptors marked 'DONE' by the firmware, | ||
378 | * freeing up their unneeded sk_buffs. | ||
379 | */ | ||
380 | static void c2_tx_interrupt(struct net_device *netdev) | ||
381 | { | ||
382 | struct c2_port *c2_port = netdev_priv(netdev); | ||
383 | struct c2_dev *c2dev = c2_port->c2dev; | ||
384 | struct c2_ring *tx_ring = &c2_port->tx_ring; | ||
385 | struct c2_element *elem; | ||
386 | struct c2_txp_desc txp_htxd; | ||
387 | |||
388 | spin_lock(&c2_port->tx_lock); | ||
389 | |||
390 | for (elem = tx_ring->to_clean; elem != tx_ring->to_use; | ||
391 | elem = elem->next) { | ||
392 | txp_htxd.flags = | ||
393 | be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS)); | ||
394 | |||
395 | if (txp_htxd.flags != TXP_HTXD_DONE) | ||
396 | break; | ||
397 | |||
398 | if (netif_msg_tx_done(c2_port)) { | ||
399 | /* PCI reads are expensive in fast path */ | ||
400 | txp_htxd.len = | ||
401 | be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN)); | ||
402 | pr_debug("%s: tx done slot %3Zu status 0x%x len " | ||
403 | "%5u bytes\n", | ||
404 | netdev->name, elem - tx_ring->start, | ||
405 | txp_htxd.flags, txp_htxd.len); | ||
406 | } | ||
407 | |||
408 | c2_tx_free(c2dev, elem); | ||
409 | ++(c2_port->tx_avail); | ||
410 | } | ||
411 | |||
412 | tx_ring->to_clean = elem; | ||
413 | |||
414 | if (netif_queue_stopped(netdev) | ||
415 | && c2_port->tx_avail > MAX_SKB_FRAGS + 1) | ||
416 | netif_wake_queue(netdev); | ||
417 | |||
418 | spin_unlock(&c2_port->tx_lock); | ||
419 | } | ||
420 | |||
421 | static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) | ||
422 | { | ||
423 | struct c2_rx_desc *rx_desc = elem->ht_desc; | ||
424 | struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | ||
425 | |||
426 | if (rxp_hdr->status != RXP_HRXD_OK || | ||
427 | rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) { | ||
428 | pr_debug("BAD RXP_HRXD\n"); | ||
429 | pr_debug(" rx_desc : %p\n", rx_desc); | ||
430 | pr_debug(" index : %Zu\n", | ||
431 | elem - c2_port->rx_ring.start); | ||
432 | pr_debug(" len : %u\n", rx_desc->len); | ||
433 | pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr, | ||
434 | (void *) __pa((unsigned long) rxp_hdr)); | ||
435 | pr_debug(" flags : 0x%x\n", rxp_hdr->flags); | ||
436 | pr_debug(" status: 0x%x\n", rxp_hdr->status); | ||
437 | pr_debug(" len : %u\n", rxp_hdr->len); | ||
438 | pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd); | ||
439 | } | ||
440 | |||
441 | /* Setup the skb for reuse since we're dropping this pkt */ | ||
442 | elem->skb->tail = elem->skb->data = elem->skb->head; | ||
443 | |||
444 | /* Zero out the rxp hdr in the sk_buff */ | ||
445 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); | ||
446 | |||
447 | /* Write the descriptor to the adapter's rx ring */ | ||
448 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | ||
449 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | ||
450 | __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), | ||
451 | elem->hw_desc + C2_RXP_LEN); | ||
452 | __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR); | ||
453 | __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); | ||
454 | |||
455 | pr_debug("packet dropped\n"); | ||
456 | c2_port->netstats.rx_dropped++; | ||
457 | } | ||
458 | |||
459 | static void c2_rx_interrupt(struct net_device *netdev) | ||
460 | { | ||
461 | struct c2_port *c2_port = netdev_priv(netdev); | ||
462 | struct c2_dev *c2dev = c2_port->c2dev; | ||
463 | struct c2_ring *rx_ring = &c2_port->rx_ring; | ||
464 | struct c2_element *elem; | ||
465 | struct c2_rx_desc *rx_desc; | ||
466 | struct c2_rxp_hdr *rxp_hdr; | ||
467 | struct sk_buff *skb; | ||
468 | dma_addr_t mapaddr; | ||
469 | u32 maplen, buflen; | ||
470 | unsigned long flags; | ||
471 | |||
472 | spin_lock_irqsave(&c2dev->lock, flags); | ||
473 | |||
474 | /* Begin where we left off */ | ||
475 | rx_ring->to_clean = rx_ring->start + c2dev->cur_rx; | ||
476 | |||
477 | for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean; | ||
478 | elem = elem->next) { | ||
479 | rx_desc = elem->ht_desc; | ||
480 | mapaddr = elem->mapaddr; | ||
481 | maplen = elem->maplen; | ||
482 | skb = elem->skb; | ||
483 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | ||
484 | |||
485 | if (rxp_hdr->flags != RXP_HRXD_DONE) | ||
486 | break; | ||
487 | buflen = rxp_hdr->len; | ||
488 | |||
489 | /* Sanity check the RXP header */ | ||
490 | if (rxp_hdr->status != RXP_HRXD_OK || | ||
491 | buflen > (rx_desc->len - sizeof(*rxp_hdr))) { | ||
492 | c2_rx_error(c2_port, elem); | ||
493 | continue; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Allocate and map a new skb for replenishing the host | ||
498 | * RX desc | ||
499 | */ | ||
500 | if (c2_rx_alloc(c2_port, elem)) { | ||
501 | c2_rx_error(c2_port, elem); | ||
502 | continue; | ||
503 | } | ||
504 | |||
505 | /* Unmap the old skb */ | ||
506 | pci_unmap_single(c2dev->pcidev, mapaddr, maplen, | ||
507 | PCI_DMA_FROMDEVICE); | ||
508 | |||
509 | prefetch(skb->data); | ||
510 | |||
511 | /* | ||
512 | * Skip past the leading 8 bytes comprising of the | ||
513 | * "struct c2_rxp_hdr", prepended by the adapter | ||
514 | * to the usual Ethernet header ("struct ethhdr"), | ||
515 | * to the start of the raw Ethernet packet. | ||
516 | * | ||
517 | * Fix up the various fields in the sk_buff before | ||
518 | * passing it up to netif_rx(). The transfer size | ||
519 | * (in bytes) specified by the adapter len field of | ||
520 | * the "struct rxp_hdr_t" does NOT include the | ||
521 | * "sizeof(struct c2_rxp_hdr)". | ||
522 | */ | ||
523 | skb->data += sizeof(*rxp_hdr); | ||
524 | skb->tail = skb->data + buflen; | ||
525 | skb->len = buflen; | ||
526 | skb->dev = netdev; | ||
527 | skb->protocol = eth_type_trans(skb, netdev); | ||
528 | |||
529 | netif_rx(skb); | ||
530 | |||
531 | netdev->last_rx = jiffies; | ||
532 | c2_port->netstats.rx_packets++; | ||
533 | c2_port->netstats.rx_bytes += buflen; | ||
534 | } | ||
535 | |||
536 | /* Save where we left off */ | ||
537 | rx_ring->to_clean = elem; | ||
538 | c2dev->cur_rx = elem - rx_ring->start; | ||
539 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | ||
540 | |||
541 | spin_unlock_irqrestore(&c2dev->lock, flags); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Handle netisr0 TX & RX interrupts. | ||
546 | */ | ||
547 | static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
548 | { | ||
549 | unsigned int netisr0, dmaisr; | ||
550 | int handled = 0; | ||
551 | struct c2_dev *c2dev = (struct c2_dev *) dev_id; | ||
552 | |||
553 | /* Process CCILNET interrupts */ | ||
554 | netisr0 = readl(c2dev->regs + C2_NISR0); | ||
555 | if (netisr0) { | ||
556 | |||
557 | /* | ||
558 | * There is an issue with the firmware that always | ||
559 | * provides the status of RX for both TX & RX | ||
560 | * interrupts. So process both queues here. | ||
561 | */ | ||
562 | c2_rx_interrupt(c2dev->netdev); | ||
563 | c2_tx_interrupt(c2dev->netdev); | ||
564 | |||
565 | /* Clear the interrupt */ | ||
566 | writel(netisr0, c2dev->regs + C2_NISR0); | ||
567 | handled++; | ||
568 | } | ||
569 | |||
570 | /* Process RNIC interrupts */ | ||
571 | dmaisr = readl(c2dev->regs + C2_DISR); | ||
572 | if (dmaisr) { | ||
573 | writel(dmaisr, c2dev->regs + C2_DISR); | ||
574 | c2_rnic_interrupt(c2dev); | ||
575 | handled++; | ||
576 | } | ||
577 | |||
578 | if (handled) { | ||
579 | return IRQ_HANDLED; | ||
580 | } else { | ||
581 | return IRQ_NONE; | ||
582 | } | ||
583 | } | ||
584 | |||
585 | static int c2_up(struct net_device *netdev) | ||
586 | { | ||
587 | struct c2_port *c2_port = netdev_priv(netdev); | ||
588 | struct c2_dev *c2dev = c2_port->c2dev; | ||
589 | struct c2_element *elem; | ||
590 | struct c2_rxp_hdr *rxp_hdr; | ||
591 | struct in_device *in_dev; | ||
592 | size_t rx_size, tx_size; | ||
593 | int ret, i; | ||
594 | unsigned int netimr0; | ||
595 | |||
596 | if (netif_msg_ifup(c2_port)) | ||
597 | pr_debug("%s: enabling interface\n", netdev->name); | ||
598 | |||
599 | /* Set the Rx buffer size based on MTU */ | ||
600 | c2_set_rxbufsize(c2_port); | ||
601 | |||
602 | /* Allocate DMA'able memory for Tx/Rx host descriptor rings */ | ||
603 | rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); | ||
604 | tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); | ||
605 | |||
606 | c2_port->mem_size = tx_size + rx_size; | ||
607 | c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, | ||
608 | &c2_port->dma); | ||
609 | if (c2_port->mem == NULL) { | ||
610 | pr_debug("Unable to allocate memory for " | ||
611 | "host descriptor rings\n"); | ||
612 | return -ENOMEM; | ||
613 | } | ||
614 | |||
615 | memset(c2_port->mem, 0, c2_port->mem_size); | ||
616 | |||
617 | /* Create the Rx host descriptor ring */ | ||
618 | if ((ret = | ||
619 | c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, | ||
620 | c2dev->mmio_rxp_ring))) { | ||
621 | pr_debug("Unable to create RX ring\n"); | ||
622 | goto bail0; | ||
623 | } | ||
624 | |||
625 | /* Allocate Rx buffers for the host descriptor ring */ | ||
626 | if (c2_rx_fill(c2_port)) { | ||
627 | pr_debug("Unable to fill RX ring\n"); | ||
628 | goto bail1; | ||
629 | } | ||
630 | |||
631 | /* Create the Tx host descriptor ring */ | ||
632 | if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, | ||
633 | c2_port->dma + rx_size, | ||
634 | c2dev->mmio_txp_ring))) { | ||
635 | pr_debug("Unable to create TX ring\n"); | ||
636 | goto bail1; | ||
637 | } | ||
638 | |||
639 | /* Set the TX pointer to where we left off */ | ||
640 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | ||
641 | c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = | ||
642 | c2_port->tx_ring.start + c2dev->cur_tx; | ||
643 | |||
644 | /* missing: Initialize MAC */ | ||
645 | |||
646 | BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); | ||
647 | |||
648 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | ||
649 | c2_reset(c2_port); | ||
650 | |||
651 | /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */ | ||
652 | for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; | ||
653 | i++, elem++) { | ||
654 | rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | ||
655 | rxp_hdr->flags = 0; | ||
656 | __raw_writew(cpu_to_be16(RXP_HRXD_READY), | ||
657 | elem->hw_desc + C2_RXP_FLAGS); | ||
658 | } | ||
659 | |||
660 | /* Enable network packets */ | ||
661 | netif_start_queue(netdev); | ||
662 | |||
663 | /* Enable IRQ */ | ||
664 | writel(0, c2dev->regs + C2_IDIS); | ||
665 | netimr0 = readl(c2dev->regs + C2_NIMR0); | ||
666 | netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); | ||
667 | writel(netimr0, c2dev->regs + C2_NIMR0); | ||
668 | |||
669 | /* Tell the stack to ignore arp requests for ipaddrs bound to | ||
670 | * other interfaces. This is needed to prevent the host stack | ||
671 | * from responding to arp requests to the ipaddr bound on the | ||
672 | * rdma interface. | ||
673 | */ | ||
674 | in_dev = in_dev_get(netdev); | ||
675 | in_dev->cnf.arp_ignore = 1; | ||
676 | in_dev_put(in_dev); | ||
677 | |||
678 | return 0; | ||
679 | |||
680 | bail1: | ||
681 | c2_rx_clean(c2_port); | ||
682 | kfree(c2_port->rx_ring.start); | ||
683 | |||
684 | bail0: | ||
685 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | ||
686 | c2_port->dma); | ||
687 | |||
688 | return ret; | ||
689 | } | ||
690 | |||
691 | static int c2_down(struct net_device *netdev) | ||
692 | { | ||
693 | struct c2_port *c2_port = netdev_priv(netdev); | ||
694 | struct c2_dev *c2dev = c2_port->c2dev; | ||
695 | |||
696 | if (netif_msg_ifdown(c2_port)) | ||
697 | pr_debug("%s: disabling interface\n", | ||
698 | netdev->name); | ||
699 | |||
700 | /* Wait for all the queued packets to get sent */ | ||
701 | c2_tx_interrupt(netdev); | ||
702 | |||
703 | /* Disable network packets */ | ||
704 | netif_stop_queue(netdev); | ||
705 | |||
706 | /* Disable IRQs by clearing the interrupt mask */ | ||
707 | writel(1, c2dev->regs + C2_IDIS); | ||
708 | writel(0, c2dev->regs + C2_NIMR0); | ||
709 | |||
710 | /* missing: Stop transmitter */ | ||
711 | |||
712 | /* missing: Stop receiver */ | ||
713 | |||
714 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | ||
715 | c2_reset(c2_port); | ||
716 | |||
717 | /* missing: Turn off LEDs here */ | ||
718 | |||
719 | /* Free all buffers in the host descriptor rings */ | ||
720 | c2_tx_clean(c2_port); | ||
721 | c2_rx_clean(c2_port); | ||
722 | |||
723 | /* Free the host descriptor rings */ | ||
724 | kfree(c2_port->rx_ring.start); | ||
725 | kfree(c2_port->tx_ring.start); | ||
726 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | ||
727 | c2_port->dma); | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static void c2_reset(struct c2_port *c2_port) | ||
733 | { | ||
734 | struct c2_dev *c2dev = c2_port->c2dev; | ||
735 | unsigned int cur_rx = c2dev->cur_rx; | ||
736 | |||
737 | /* Tell the hardware to quiesce */ | ||
738 | C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI); | ||
739 | |||
740 | /* | ||
741 | * The hardware will reset the C2_PCI_HRX_QUI bit once | ||
742 | * the RXP is quiesced. Wait 2 seconds for this. | ||
743 | */ | ||
744 | ssleep(2); | ||
745 | |||
746 | cur_rx = C2_GET_CUR_RX(c2dev); | ||
747 | |||
748 | if (cur_rx & C2_PCI_HRX_QUI) | ||
749 | pr_debug("c2_reset: failed to quiesce the hardware!\n"); | ||
750 | |||
751 | cur_rx &= ~C2_PCI_HRX_QUI; | ||
752 | |||
753 | c2dev->cur_rx = cur_rx; | ||
754 | |||
755 | pr_debug("Current RX: %u\n", c2dev->cur_rx); | ||
756 | } | ||
757 | |||
758 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
759 | { | ||
760 | struct c2_port *c2_port = netdev_priv(netdev); | ||
761 | struct c2_dev *c2dev = c2_port->c2dev; | ||
762 | struct c2_ring *tx_ring = &c2_port->tx_ring; | ||
763 | struct c2_element *elem; | ||
764 | dma_addr_t mapaddr; | ||
765 | u32 maplen; | ||
766 | unsigned long flags; | ||
767 | unsigned int i; | ||
768 | |||
769 | spin_lock_irqsave(&c2_port->tx_lock, flags); | ||
770 | |||
771 | if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) { | ||
772 | netif_stop_queue(netdev); | ||
773 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | ||
774 | |||
775 | pr_debug("%s: Tx ring full when queue awake!\n", | ||
776 | netdev->name); | ||
777 | return NETDEV_TX_BUSY; | ||
778 | } | ||
779 | |||
780 | maplen = skb_headlen(skb); | ||
781 | mapaddr = | ||
782 | pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE); | ||
783 | |||
784 | elem = tx_ring->to_use; | ||
785 | elem->skb = skb; | ||
786 | elem->mapaddr = mapaddr; | ||
787 | elem->maplen = maplen; | ||
788 | |||
789 | /* Tell HW to xmit */ | ||
790 | __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR); | ||
791 | __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN); | ||
792 | __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); | ||
793 | |||
794 | c2_port->netstats.tx_packets++; | ||
795 | c2_port->netstats.tx_bytes += maplen; | ||
796 | |||
797 | /* Loop thru additional data fragments and queue them */ | ||
798 | if (skb_shinfo(skb)->nr_frags) { | ||
799 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
800 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
801 | maplen = frag->size; | ||
802 | mapaddr = | ||
803 | pci_map_page(c2dev->pcidev, frag->page, | ||
804 | frag->page_offset, maplen, | ||
805 | PCI_DMA_TODEVICE); | ||
806 | |||
807 | elem = elem->next; | ||
808 | elem->skb = NULL; | ||
809 | elem->mapaddr = mapaddr; | ||
810 | elem->maplen = maplen; | ||
811 | |||
812 | /* Tell HW to xmit */ | ||
813 | __raw_writeq(cpu_to_be64(mapaddr), | ||
814 | elem->hw_desc + C2_TXP_ADDR); | ||
815 | __raw_writew(cpu_to_be16(maplen), | ||
816 | elem->hw_desc + C2_TXP_LEN); | ||
817 | __raw_writew(cpu_to_be16(TXP_HTXD_READY), | ||
818 | elem->hw_desc + C2_TXP_FLAGS); | ||
819 | |||
820 | c2_port->netstats.tx_packets++; | ||
821 | c2_port->netstats.tx_bytes += maplen; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | tx_ring->to_use = elem->next; | ||
826 | c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1); | ||
827 | |||
828 | if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) { | ||
829 | netif_stop_queue(netdev); | ||
830 | if (netif_msg_tx_queued(c2_port)) | ||
831 | pr_debug("%s: transmit queue full\n", | ||
832 | netdev->name); | ||
833 | } | ||
834 | |||
835 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | ||
836 | |||
837 | netdev->trans_start = jiffies; | ||
838 | |||
839 | return NETDEV_TX_OK; | ||
840 | } | ||
841 | |||
842 | static struct net_device_stats *c2_get_stats(struct net_device *netdev) | ||
843 | { | ||
844 | struct c2_port *c2_port = netdev_priv(netdev); | ||
845 | |||
846 | return &c2_port->netstats; | ||
847 | } | ||
848 | |||
849 | static void c2_tx_timeout(struct net_device *netdev) | ||
850 | { | ||
851 | struct c2_port *c2_port = netdev_priv(netdev); | ||
852 | |||
853 | if (netif_msg_timer(c2_port)) | ||
854 | pr_debug("%s: tx timeout\n", netdev->name); | ||
855 | |||
856 | c2_tx_clean(c2_port); | ||
857 | } | ||
858 | |||
859 | static int c2_change_mtu(struct net_device *netdev, int new_mtu) | ||
860 | { | ||
861 | int ret = 0; | ||
862 | |||
863 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | ||
864 | return -EINVAL; | ||
865 | |||
866 | netdev->mtu = new_mtu; | ||
867 | |||
868 | if (netif_running(netdev)) { | ||
869 | c2_down(netdev); | ||
870 | |||
871 | c2_up(netdev); | ||
872 | } | ||
873 | |||
874 | return ret; | ||
875 | } | ||
876 | |||
877 | /* Initialize network device */ | ||
878 | static struct net_device *c2_devinit(struct c2_dev *c2dev, | ||
879 | void __iomem * mmio_addr) | ||
880 | { | ||
881 | struct c2_port *c2_port = NULL; | ||
882 | struct net_device *netdev = alloc_etherdev(sizeof(*c2_port)); | ||
883 | |||
884 | if (!netdev) { | ||
885 | pr_debug("c2_port etherdev alloc failed"); | ||
886 | return NULL; | ||
887 | } | ||
888 | |||
889 | SET_MODULE_OWNER(netdev); | ||
890 | SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); | ||
891 | |||
892 | netdev->open = c2_up; | ||
893 | netdev->stop = c2_down; | ||
894 | netdev->hard_start_xmit = c2_xmit_frame; | ||
895 | netdev->get_stats = c2_get_stats; | ||
896 | netdev->tx_timeout = c2_tx_timeout; | ||
897 | netdev->change_mtu = c2_change_mtu; | ||
898 | netdev->watchdog_timeo = C2_TX_TIMEOUT; | ||
899 | netdev->irq = c2dev->pcidev->irq; | ||
900 | |||
901 | c2_port = netdev_priv(netdev); | ||
902 | c2_port->netdev = netdev; | ||
903 | c2_port->c2dev = c2dev; | ||
904 | c2_port->msg_enable = netif_msg_init(debug, default_msg); | ||
905 | c2_port->tx_ring.count = C2_NUM_TX_DESC; | ||
906 | c2_port->rx_ring.count = C2_NUM_RX_DESC; | ||
907 | |||
908 | spin_lock_init(&c2_port->tx_lock); | ||
909 | |||
910 | /* Copy our 48-bit ethernet hardware address */ | ||
911 | memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6); | ||
912 | |||
913 | /* Validate the MAC address */ | ||
914 | if (!is_valid_ether_addr(netdev->dev_addr)) { | ||
915 | pr_debug("Invalid MAC Address\n"); | ||
916 | c2_print_macaddr(netdev); | ||
917 | free_netdev(netdev); | ||
918 | return NULL; | ||
919 | } | ||
920 | |||
921 | c2dev->netdev = netdev; | ||
922 | |||
923 | return netdev; | ||
924 | } | ||
925 | |||
926 | static int __devinit c2_probe(struct pci_dev *pcidev, | ||
927 | const struct pci_device_id *ent) | ||
928 | { | ||
929 | int ret = 0, i; | ||
930 | unsigned long reg0_start, reg0_flags, reg0_len; | ||
931 | unsigned long reg2_start, reg2_flags, reg2_len; | ||
932 | unsigned long reg4_start, reg4_flags, reg4_len; | ||
933 | unsigned kva_map_size; | ||
934 | struct net_device *netdev = NULL; | ||
935 | struct c2_dev *c2dev = NULL; | ||
936 | void __iomem *mmio_regs = NULL; | ||
937 | |||
938 | printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", | ||
939 | DRV_VERSION); | ||
940 | |||
941 | /* Enable PCI device */ | ||
942 | ret = pci_enable_device(pcidev); | ||
943 | if (ret) { | ||
944 | printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", | ||
945 | pci_name(pcidev)); | ||
946 | goto bail0; | ||
947 | } | ||
948 | |||
949 | reg0_start = pci_resource_start(pcidev, BAR_0); | ||
950 | reg0_len = pci_resource_len(pcidev, BAR_0); | ||
951 | reg0_flags = pci_resource_flags(pcidev, BAR_0); | ||
952 | |||
953 | reg2_start = pci_resource_start(pcidev, BAR_2); | ||
954 | reg2_len = pci_resource_len(pcidev, BAR_2); | ||
955 | reg2_flags = pci_resource_flags(pcidev, BAR_2); | ||
956 | |||
957 | reg4_start = pci_resource_start(pcidev, BAR_4); | ||
958 | reg4_len = pci_resource_len(pcidev, BAR_4); | ||
959 | reg4_flags = pci_resource_flags(pcidev, BAR_4); | ||
960 | |||
961 | pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); | ||
962 | pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); | ||
963 | pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); | ||
964 | |||
965 | /* Make sure PCI base addr are MMIO */ | ||
966 | if (!(reg0_flags & IORESOURCE_MEM) || | ||
967 | !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { | ||
968 | printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); | ||
969 | ret = -ENODEV; | ||
970 | goto bail1; | ||
971 | } | ||
972 | |||
973 | /* Check for weird/broken PCI region reporting */ | ||
974 | if ((reg0_len < C2_REG0_SIZE) || | ||
975 | (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { | ||
976 | printk(KERN_ERR PFX "Invalid PCI region sizes\n"); | ||
977 | ret = -ENODEV; | ||
978 | goto bail1; | ||
979 | } | ||
980 | |||
981 | /* Reserve PCI I/O and memory resources */ | ||
982 | ret = pci_request_regions(pcidev, DRV_NAME); | ||
983 | if (ret) { | ||
984 | printk(KERN_ERR PFX "%s: Unable to request regions\n", | ||
985 | pci_name(pcidev)); | ||
986 | goto bail1; | ||
987 | } | ||
988 | |||
989 | if ((sizeof(dma_addr_t) > 4)) { | ||
990 | ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); | ||
991 | if (ret < 0) { | ||
992 | printk(KERN_ERR PFX "64b DMA configuration failed\n"); | ||
993 | goto bail2; | ||
994 | } | ||
995 | } else { | ||
996 | ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK); | ||
997 | if (ret < 0) { | ||
998 | printk(KERN_ERR PFX "32b DMA configuration failed\n"); | ||
999 | goto bail2; | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | /* Enables bus-mastering on the device */ | ||
1004 | pci_set_master(pcidev); | ||
1005 | |||
1006 | /* Remap the adapter PCI registers in BAR4 */ | ||
1007 | mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | ||
1008 | sizeof(struct c2_adapter_pci_regs)); | ||
1009 | if (mmio_regs == 0UL) { | ||
1010 | printk(KERN_ERR PFX | ||
1011 | "Unable to remap adapter PCI registers in BAR4\n"); | ||
1012 | ret = -EIO; | ||
1013 | goto bail2; | ||
1014 | } | ||
1015 | |||
1016 | /* Validate PCI regs magic */ | ||
1017 | for (i = 0; i < sizeof(c2_magic); i++) { | ||
1018 | if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { | ||
1019 | printk(KERN_ERR PFX "Downlevel Firmware boot loader " | ||
1020 | "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " | ||
1021 | "utility to update your boot loader\n", | ||
1022 | i + 1, sizeof(c2_magic), | ||
1023 | readb(mmio_regs + C2_REGS_MAGIC + i), | ||
1024 | c2_magic[i]); | ||
1025 | printk(KERN_ERR PFX "Adapter not claimed\n"); | ||
1026 | iounmap(mmio_regs); | ||
1027 | ret = -EIO; | ||
1028 | goto bail2; | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | /* Validate the adapter version */ | ||
1033 | if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { | ||
1034 | printk(KERN_ERR PFX "Version mismatch " | ||
1035 | "[fw=%u, c2=%u], Adapter not claimed\n", | ||
1036 | be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)), | ||
1037 | C2_VERSION); | ||
1038 | ret = -EINVAL; | ||
1039 | iounmap(mmio_regs); | ||
1040 | goto bail2; | ||
1041 | } | ||
1042 | |||
1043 | /* Validate the adapter IVN */ | ||
1044 | if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { | ||
1045 | printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " | ||
1046 | "the OpenIB device support kit. " | ||
1047 | "[fw=0x%x, c2=0x%x], Adapter not claimed\n", | ||
1048 | be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)), | ||
1049 | C2_IVN); | ||
1050 | ret = -EINVAL; | ||
1051 | iounmap(mmio_regs); | ||
1052 | goto bail2; | ||
1053 | } | ||
1054 | |||
1055 | /* Allocate hardware structure */ | ||
1056 | c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); | ||
1057 | if (!c2dev) { | ||
1058 | printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", | ||
1059 | pci_name(pcidev)); | ||
1060 | ret = -ENOMEM; | ||
1061 | iounmap(mmio_regs); | ||
1062 | goto bail2; | ||
1063 | } | ||
1064 | |||
1065 | memset(c2dev, 0, sizeof(*c2dev)); | ||
1066 | spin_lock_init(&c2dev->lock); | ||
1067 | c2dev->pcidev = pcidev; | ||
1068 | c2dev->cur_tx = 0; | ||
1069 | |||
1070 | /* Get the last RX index */ | ||
1071 | c2dev->cur_rx = | ||
1072 | (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) - | ||
1073 | 0xffffc000) / sizeof(struct c2_rxp_desc); | ||
1074 | |||
1075 | /* Request an interrupt line for the driver */ | ||
1076 | ret = request_irq(pcidev->irq, c2_interrupt, SA_SHIRQ, DRV_NAME, c2dev); | ||
1077 | if (ret) { | ||
1078 | printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", | ||
1079 | pci_name(pcidev), pcidev->irq); | ||
1080 | iounmap(mmio_regs); | ||
1081 | goto bail3; | ||
1082 | } | ||
1083 | |||
1084 | /* Set driver specific data */ | ||
1085 | pci_set_drvdata(pcidev, c2dev); | ||
1086 | |||
1087 | /* Initialize network device */ | ||
1088 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | ||
1089 | iounmap(mmio_regs); | ||
1090 | goto bail4; | ||
1091 | } | ||
1092 | |||
1093 | /* Save off the actual size prior to unmapping mmio_regs */ | ||
1094 | kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE)); | ||
1095 | |||
1096 | /* Unmap the adapter PCI registers in BAR4 */ | ||
1097 | iounmap(mmio_regs); | ||
1098 | |||
1099 | /* Register network device */ | ||
1100 | ret = register_netdev(netdev); | ||
1101 | if (ret) { | ||
1102 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", | ||
1103 | ret); | ||
1104 | goto bail5; | ||
1105 | } | ||
1106 | |||
1107 | /* Disable network packets */ | ||
1108 | netif_stop_queue(netdev); | ||
1109 | |||
1110 | /* Remap the adapter HRXDQ PA space to kernel VA space */ | ||
1111 | c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, | ||
1112 | C2_RXP_HRXDQ_SIZE); | ||
1113 | if (c2dev->mmio_rxp_ring == 0UL) { | ||
1114 | printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); | ||
1115 | ret = -EIO; | ||
1116 | goto bail6; | ||
1117 | } | ||
1118 | |||
1119 | /* Remap the adapter HTXDQ PA space to kernel VA space */ | ||
1120 | c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, | ||
1121 | C2_TXP_HTXDQ_SIZE); | ||
1122 | if (c2dev->mmio_txp_ring == 0UL) { | ||
1123 | printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); | ||
1124 | ret = -EIO; | ||
1125 | goto bail7; | ||
1126 | } | ||
1127 | |||
1128 | /* Save off the current RX index in the last 4 bytes of the TXP Ring */ | ||
1129 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | ||
1130 | |||
1131 | /* Remap the PCI registers in adapter BAR0 to kernel VA space */ | ||
1132 | c2dev->regs = ioremap_nocache(reg0_start, reg0_len); | ||
1133 | if (c2dev->regs == 0UL) { | ||
1134 | printk(KERN_ERR PFX "Unable to remap BAR0\n"); | ||
1135 | ret = -EIO; | ||
1136 | goto bail8; | ||
1137 | } | ||
1138 | |||
1139 | /* Remap the PCI registers in adapter BAR4 to kernel VA space */ | ||
1140 | c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; | ||
1141 | c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | ||
1142 | kva_map_size); | ||
1143 | if (c2dev->kva == 0UL) { | ||
1144 | printk(KERN_ERR PFX "Unable to remap BAR4\n"); | ||
1145 | ret = -EIO; | ||
1146 | goto bail9; | ||
1147 | } | ||
1148 | |||
1149 | /* Print out the MAC address */ | ||
1150 | c2_print_macaddr(netdev); | ||
1151 | |||
1152 | ret = c2_rnic_init(c2dev); | ||
1153 | if (ret) { | ||
1154 | printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); | ||
1155 | goto bail10; | ||
1156 | } | ||
1157 | |||
1158 | c2_register_device(c2dev); | ||
1159 | |||
1160 | return 0; | ||
1161 | |||
1162 | bail10: | ||
1163 | iounmap(c2dev->kva); | ||
1164 | |||
1165 | bail9: | ||
1166 | iounmap(c2dev->regs); | ||
1167 | |||
1168 | bail8: | ||
1169 | iounmap(c2dev->mmio_txp_ring); | ||
1170 | |||
1171 | bail7: | ||
1172 | iounmap(c2dev->mmio_rxp_ring); | ||
1173 | |||
1174 | bail6: | ||
1175 | unregister_netdev(netdev); | ||
1176 | |||
1177 | bail5: | ||
1178 | free_netdev(netdev); | ||
1179 | |||
1180 | bail4: | ||
1181 | free_irq(pcidev->irq, c2dev); | ||
1182 | |||
1183 | bail3: | ||
1184 | ib_dealloc_device(&c2dev->ibdev); | ||
1185 | |||
1186 | bail2: | ||
1187 | pci_release_regions(pcidev); | ||
1188 | |||
1189 | bail1: | ||
1190 | pci_disable_device(pcidev); | ||
1191 | |||
1192 | bail0: | ||
1193 | return ret; | ||
1194 | } | ||
1195 | |||
1196 | static void __devexit c2_remove(struct pci_dev *pcidev) | ||
1197 | { | ||
1198 | struct c2_dev *c2dev = pci_get_drvdata(pcidev); | ||
1199 | struct net_device *netdev = c2dev->netdev; | ||
1200 | |||
1201 | /* Unregister with OpenIB */ | ||
1202 | c2_unregister_device(c2dev); | ||
1203 | |||
1204 | /* Clean up the RNIC resources */ | ||
1205 | c2_rnic_term(c2dev); | ||
1206 | |||
1207 | /* Remove network device from the kernel */ | ||
1208 | unregister_netdev(netdev); | ||
1209 | |||
1210 | /* Free network device */ | ||
1211 | free_netdev(netdev); | ||
1212 | |||
1213 | /* Free the interrupt line */ | ||
1214 | free_irq(pcidev->irq, c2dev); | ||
1215 | |||
1216 | /* missing: Turn LEDs off here */ | ||
1217 | |||
1218 | /* Unmap adapter PA space */ | ||
1219 | iounmap(c2dev->kva); | ||
1220 | iounmap(c2dev->regs); | ||
1221 | iounmap(c2dev->mmio_txp_ring); | ||
1222 | iounmap(c2dev->mmio_rxp_ring); | ||
1223 | |||
1224 | /* Free the hardware structure */ | ||
1225 | ib_dealloc_device(&c2dev->ibdev); | ||
1226 | |||
1227 | /* Release reserved PCI I/O and memory resources */ | ||
1228 | pci_release_regions(pcidev); | ||
1229 | |||
1230 | /* Disable PCI device */ | ||
1231 | pci_disable_device(pcidev); | ||
1232 | |||
1233 | /* Clear driver specific data */ | ||
1234 | pci_set_drvdata(pcidev, NULL); | ||
1235 | } | ||
1236 | |||
1237 | static struct pci_driver c2_pci_driver = { | ||
1238 | .name = DRV_NAME, | ||
1239 | .id_table = c2_pci_table, | ||
1240 | .probe = c2_probe, | ||
1241 | .remove = __devexit_p(c2_remove), | ||
1242 | }; | ||
1243 | |||
1244 | static int __init c2_init_module(void) | ||
1245 | { | ||
1246 | return pci_module_init(&c2_pci_driver); | ||
1247 | } | ||
1248 | |||
1249 | static void __exit c2_exit_module(void) | ||
1250 | { | ||
1251 | pci_unregister_driver(&c2_pci_driver); | ||
1252 | } | ||
1253 | |||
1254 | module_init(c2_init_module); | ||
1255 | module_exit(c2_exit_module); | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h new file mode 100644 index 000000000000..1b17dcdd0505 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2.h | |||
@@ -0,0 +1,551 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef __C2_H | ||
35 | #define __C2_H | ||
36 | |||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/dma-mapping.h> | ||
42 | #include <linux/idr.h> | ||
43 | #include <asm/semaphore.h> | ||
44 | |||
45 | #include "c2_provider.h" | ||
46 | #include "c2_mq.h" | ||
47 | #include "c2_status.h" | ||
48 | |||
49 | #define DRV_NAME "c2" | ||
50 | #define DRV_VERSION "1.1" | ||
51 | #define PFX DRV_NAME ": " | ||
52 | |||
53 | #define BAR_0 0 | ||
54 | #define BAR_2 2 | ||
55 | #define BAR_4 4 | ||
56 | |||
57 | #define RX_BUF_SIZE (1536 + 8) | ||
58 | #define ETH_JUMBO_MTU 9000 | ||
59 | #define C2_MAGIC "CEPHEUS" | ||
60 | #define C2_VERSION 4 | ||
61 | #define C2_IVN (18 & 0x7fffffff) | ||
62 | |||
63 | #define C2_REG0_SIZE (16 * 1024) | ||
64 | #define C2_REG2_SIZE (2 * 1024 * 1024) | ||
65 | #define C2_REG4_SIZE (256 * 1024 * 1024) | ||
66 | #define C2_NUM_TX_DESC 341 | ||
67 | #define C2_NUM_RX_DESC 256 | ||
68 | #define C2_PCI_REGS_OFFSET (0x10000) | ||
69 | #define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2)) | ||
70 | #define C2_RXP_HRXDQ_SIZE (4096) | ||
71 | #define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE) | ||
72 | #define C2_TXP_HTXDQ_SIZE (4096) | ||
73 | #define C2_TX_TIMEOUT (6*HZ) | ||
74 | |||
75 | /* CEPHEUS */ | ||
76 | static const u8 c2_magic[] = { | ||
77 | 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53 | ||
78 | }; | ||
79 | |||
80 | enum adapter_pci_regs { | ||
81 | C2_REGS_MAGIC = 0x0000, | ||
82 | C2_REGS_VERS = 0x0008, | ||
83 | C2_REGS_IVN = 0x000C, | ||
84 | C2_REGS_PCI_WINSIZE = 0x0010, | ||
85 | C2_REGS_Q0_QSIZE = 0x0014, | ||
86 | C2_REGS_Q0_MSGSIZE = 0x0018, | ||
87 | C2_REGS_Q0_POOLSTART = 0x001C, | ||
88 | C2_REGS_Q0_SHARED = 0x0020, | ||
89 | C2_REGS_Q1_QSIZE = 0x0024, | ||
90 | C2_REGS_Q1_MSGSIZE = 0x0028, | ||
91 | C2_REGS_Q1_SHARED = 0x0030, | ||
92 | C2_REGS_Q2_QSIZE = 0x0034, | ||
93 | C2_REGS_Q2_MSGSIZE = 0x0038, | ||
94 | C2_REGS_Q2_SHARED = 0x0040, | ||
95 | C2_REGS_ENADDR = 0x004C, | ||
96 | C2_REGS_RDMA_ENADDR = 0x0054, | ||
97 | C2_REGS_HRX_CUR = 0x006C, | ||
98 | }; | ||
99 | |||
100 | struct c2_adapter_pci_regs { | ||
101 | char reg_magic[8]; | ||
102 | u32 version; | ||
103 | u32 ivn; | ||
104 | u32 pci_window_size; | ||
105 | u32 q0_q_size; | ||
106 | u32 q0_msg_size; | ||
107 | u32 q0_pool_start; | ||
108 | u32 q0_shared; | ||
109 | u32 q1_q_size; | ||
110 | u32 q1_msg_size; | ||
111 | u32 q1_pool_start; | ||
112 | u32 q1_shared; | ||
113 | u32 q2_q_size; | ||
114 | u32 q2_msg_size; | ||
115 | u32 q2_pool_start; | ||
116 | u32 q2_shared; | ||
117 | u32 log_start; | ||
118 | u32 log_size; | ||
119 | u8 host_enaddr[8]; | ||
120 | u8 rdma_enaddr[8]; | ||
121 | u32 crash_entry; | ||
122 | u32 crash_ready[2]; | ||
123 | u32 fw_txd_cur; | ||
124 | u32 fw_hrxd_cur; | ||
125 | u32 fw_rxd_cur; | ||
126 | }; | ||
127 | |||
128 | enum pci_regs { | ||
129 | C2_HISR = 0x0000, | ||
130 | C2_DISR = 0x0004, | ||
131 | C2_HIMR = 0x0008, | ||
132 | C2_DIMR = 0x000C, | ||
133 | C2_NISR0 = 0x0010, | ||
134 | C2_NISR1 = 0x0014, | ||
135 | C2_NIMR0 = 0x0018, | ||
136 | C2_NIMR1 = 0x001C, | ||
137 | C2_IDIS = 0x0020, | ||
138 | }; | ||
139 | |||
140 | enum { | ||
141 | C2_PCI_HRX_INT = 1 << 8, | ||
142 | C2_PCI_HTX_INT = 1 << 17, | ||
143 | C2_PCI_HRX_QUI = 1 << 31, | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * Cepheus registers in BAR0. | ||
148 | */ | ||
149 | struct c2_pci_regs { | ||
150 | u32 hostisr; | ||
151 | u32 dmaisr; | ||
152 | u32 hostimr; | ||
153 | u32 dmaimr; | ||
154 | u32 netisr0; | ||
155 | u32 netisr1; | ||
156 | u32 netimr0; | ||
157 | u32 netimr1; | ||
158 | u32 int_disable; | ||
159 | }; | ||
160 | |||
161 | /* TXP flags */ | ||
162 | enum c2_txp_flags { | ||
163 | TXP_HTXD_DONE = 0, | ||
164 | TXP_HTXD_READY = 1 << 0, | ||
165 | TXP_HTXD_UNINIT = 1 << 1, | ||
166 | }; | ||
167 | |||
168 | /* RXP flags */ | ||
169 | enum c2_rxp_flags { | ||
170 | RXP_HRXD_UNINIT = 0, | ||
171 | RXP_HRXD_READY = 1 << 0, | ||
172 | RXP_HRXD_DONE = 1 << 1, | ||
173 | }; | ||
174 | |||
175 | /* RXP status */ | ||
176 | enum c2_rxp_status { | ||
177 | RXP_HRXD_ZERO = 0, | ||
178 | RXP_HRXD_OK = 1 << 0, | ||
179 | RXP_HRXD_BUF_OV = 1 << 1, | ||
180 | }; | ||
181 | |||
182 | /* TXP descriptor fields */ | ||
183 | enum txp_desc { | ||
184 | C2_TXP_FLAGS = 0x0000, | ||
185 | C2_TXP_LEN = 0x0002, | ||
186 | C2_TXP_ADDR = 0x0004, | ||
187 | }; | ||
188 | |||
189 | /* RXP descriptor fields */ | ||
190 | enum rxp_desc { | ||
191 | C2_RXP_FLAGS = 0x0000, | ||
192 | C2_RXP_STATUS = 0x0002, | ||
193 | C2_RXP_COUNT = 0x0004, | ||
194 | C2_RXP_LEN = 0x0006, | ||
195 | C2_RXP_ADDR = 0x0008, | ||
196 | }; | ||
197 | |||
198 | struct c2_txp_desc { | ||
199 | u16 flags; | ||
200 | u16 len; | ||
201 | u64 addr; | ||
202 | } __attribute__ ((packed)); | ||
203 | |||
204 | struct c2_rxp_desc { | ||
205 | u16 flags; | ||
206 | u16 status; | ||
207 | u16 count; | ||
208 | u16 len; | ||
209 | u64 addr; | ||
210 | } __attribute__ ((packed)); | ||
211 | |||
212 | struct c2_rxp_hdr { | ||
213 | u16 flags; | ||
214 | u16 status; | ||
215 | u16 len; | ||
216 | u16 rsvd; | ||
217 | } __attribute__ ((packed)); | ||
218 | |||
219 | struct c2_tx_desc { | ||
220 | u32 len; | ||
221 | u32 status; | ||
222 | dma_addr_t next_offset; | ||
223 | }; | ||
224 | |||
225 | struct c2_rx_desc { | ||
226 | u32 len; | ||
227 | u32 status; | ||
228 | dma_addr_t next_offset; | ||
229 | }; | ||
230 | |||
231 | struct c2_alloc { | ||
232 | u32 last; | ||
233 | u32 max; | ||
234 | spinlock_t lock; | ||
235 | unsigned long *table; | ||
236 | }; | ||
237 | |||
238 | struct c2_array { | ||
239 | struct { | ||
240 | void **page; | ||
241 | int used; | ||
242 | } *page_list; | ||
243 | }; | ||
244 | |||
245 | /* | ||
246 | * The MQ shared pointer pool is organized as a linked list of | ||
247 | * chunks. Each chunk contains a linked list of free shared pointers | ||
248 | * that can be allocated to a given user mode client. | ||
249 | * | ||
250 | */ | ||
251 | struct sp_chunk { | ||
252 | struct sp_chunk *next; | ||
253 | dma_addr_t dma_addr; | ||
254 | DECLARE_PCI_UNMAP_ADDR(mapping); | ||
255 | u16 head; | ||
256 | u16 shared_ptr[0]; | ||
257 | }; | ||
258 | |||
259 | struct c2_pd_table { | ||
260 | u32 last; | ||
261 | u32 max; | ||
262 | spinlock_t lock; | ||
263 | unsigned long *table; | ||
264 | }; | ||
265 | |||
266 | struct c2_qp_table { | ||
267 | struct idr idr; | ||
268 | spinlock_t lock; | ||
269 | int last; | ||
270 | }; | ||
271 | |||
272 | struct c2_element { | ||
273 | struct c2_element *next; | ||
274 | void *ht_desc; /* host descriptor */ | ||
275 | void __iomem *hw_desc; /* hardware descriptor */ | ||
276 | struct sk_buff *skb; | ||
277 | dma_addr_t mapaddr; | ||
278 | u32 maplen; | ||
279 | }; | ||
280 | |||
281 | struct c2_ring { | ||
282 | struct c2_element *to_clean; | ||
283 | struct c2_element *to_use; | ||
284 | struct c2_element *start; | ||
285 | unsigned long count; | ||
286 | }; | ||
287 | |||
288 | struct c2_dev { | ||
289 | struct ib_device ibdev; | ||
290 | void __iomem *regs; | ||
291 | void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */ | ||
292 | void __iomem *mmio_rxp_ring; | ||
293 | spinlock_t lock; | ||
294 | struct pci_dev *pcidev; | ||
295 | struct net_device *netdev; | ||
296 | struct net_device *pseudo_netdev; | ||
297 | unsigned int cur_tx; | ||
298 | unsigned int cur_rx; | ||
299 | u32 adapter_handle; | ||
300 | int device_cap_flags; | ||
301 | void __iomem *kva; /* KVA device memory */ | ||
302 | unsigned long pa; /* PA device memory */ | ||
303 | void **qptr_array; | ||
304 | |||
305 | kmem_cache_t *host_msg_cache; | ||
306 | |||
307 | struct list_head cca_link; /* adapter list */ | ||
308 | struct list_head eh_wakeup_list; /* event wakeup list */ | ||
309 | wait_queue_head_t req_vq_wo; | ||
310 | |||
311 | /* Cached RNIC properties */ | ||
312 | struct ib_device_attr props; | ||
313 | |||
314 | struct c2_pd_table pd_table; | ||
315 | struct c2_qp_table qp_table; | ||
316 | int ports; /* num of GigE ports */ | ||
317 | int devnum; | ||
318 | spinlock_t vqlock; /* sync vbs req MQ */ | ||
319 | |||
320 | /* Verbs Queues */ | ||
321 | struct c2_mq req_vq; /* Verbs Request MQ */ | ||
322 | struct c2_mq rep_vq; /* Verbs Reply MQ */ | ||
323 | struct c2_mq aeq; /* Async Events MQ */ | ||
324 | |||
325 | /* Kernel client MQs */ | ||
326 | struct sp_chunk *kern_mqsp_pool; | ||
327 | |||
328 | /* Device updates these values when posting messages to a host | ||
329 | * target queue */ | ||
330 | u16 req_vq_shared; | ||
331 | u16 rep_vq_shared; | ||
332 | u16 aeq_shared; | ||
333 | u16 irq_claimed; | ||
334 | |||
335 | /* | ||
336 | * Shared host target pages for user-accessible MQs. | ||
337 | */ | ||
338 | int hthead; /* index of first free entry */ | ||
339 | void *htpages; /* kernel vaddr */ | ||
340 | int htlen; /* length of htpages memory */ | ||
341 | void *htuva; /* user mapped vaddr */ | ||
342 | spinlock_t htlock; /* serialize allocation */ | ||
343 | |||
344 | u64 adapter_hint_uva; /* access to the activity FIFO */ | ||
345 | |||
346 | // spinlock_t aeq_lock; | ||
347 | // spinlock_t rnic_lock; | ||
348 | |||
349 | u16 *hint_count; | ||
350 | dma_addr_t hint_count_dma; | ||
351 | u16 hints_read; | ||
352 | |||
353 | int init; /* TRUE if it's ready */ | ||
354 | char ae_cache_name[16]; | ||
355 | char vq_cache_name[16]; | ||
356 | }; | ||
357 | |||
358 | struct c2_port { | ||
359 | u32 msg_enable; | ||
360 | struct c2_dev *c2dev; | ||
361 | struct net_device *netdev; | ||
362 | |||
363 | spinlock_t tx_lock; | ||
364 | u32 tx_avail; | ||
365 | struct c2_ring tx_ring; | ||
366 | struct c2_ring rx_ring; | ||
367 | |||
368 | void *mem; /* PCI memory for host rings */ | ||
369 | dma_addr_t dma; | ||
370 | unsigned long mem_size; | ||
371 | |||
372 | u32 rx_buf_size; | ||
373 | |||
374 | struct net_device_stats netstats; | ||
375 | }; | ||
376 | |||
377 | /* | ||
378 | * Activity FIFO registers in BAR0. | ||
379 | */ | ||
380 | #define PCI_BAR0_HOST_HINT 0x100 | ||
381 | #define PCI_BAR0_ADAPTER_HINT 0x2000 | ||
382 | |||
383 | /* | ||
384 | * Ammasso PCI vendor id and Cepheus PCI device id. | ||
385 | */ | ||
386 | #define CQ_ARMED 0x01 | ||
387 | #define CQ_WAIT_FOR_DMA 0x80 | ||
388 | |||
389 | /* | ||
390 | * The format of a hint is as follows: | ||
391 | * Lower 16 bits are the count of hints for the queue. | ||
392 | * Next 15 bits are the qp_index | ||
393 | * Upper most bit depends on who reads it: | ||
394 | * If read by producer, then it means Full (1) or Not-Full (0) | ||
395 | * If read by consumer, then it means Empty (1) or Not-Empty (0) | ||
396 | */ | ||
397 | #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count) | ||
398 | #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16) | ||
399 | #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF) | ||
400 | |||
401 | |||
402 | /* | ||
403 | * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t | ||
404 | * struct. | ||
405 | */ | ||
406 | #define C2_ADAPTER_PCI_REGS_OFFSET 0x10000 | ||
407 | |||
408 | #ifndef readq | ||
409 | static inline u64 readq(const void __iomem * addr) | ||
410 | { | ||
411 | u64 ret = readl(addr + 4); | ||
412 | ret <<= 32; | ||
413 | ret |= readl(addr); | ||
414 | |||
415 | return ret; | ||
416 | } | ||
417 | #endif | ||
418 | |||
419 | #ifndef writeq | ||
420 | static inline void __raw_writeq(u64 val, void __iomem * addr) | ||
421 | { | ||
422 | __raw_writel((u32) (val), addr); | ||
423 | __raw_writel((u32) (val >> 32), (addr + 4)); | ||
424 | } | ||
425 | #endif | ||
426 | |||
427 | #define C2_SET_CUR_RX(c2dev, cur_rx) \ | ||
428 | __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) | ||
429 | |||
430 | #define C2_GET_CUR_RX(c2dev) \ | ||
431 | be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092)) | ||
432 | |||
433 | static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) | ||
434 | { | ||
435 | return container_of(ibdev, struct c2_dev, ibdev); | ||
436 | } | ||
437 | |||
438 | static inline int c2_errno(void *reply) | ||
439 | { | ||
440 | switch (c2_wr_get_result(reply)) { | ||
441 | case C2_OK: | ||
442 | return 0; | ||
443 | case CCERR_NO_BUFS: | ||
444 | case CCERR_INSUFFICIENT_RESOURCES: | ||
445 | case CCERR_ZERO_RDMA_READ_RESOURCES: | ||
446 | return -ENOMEM; | ||
447 | case CCERR_MR_IN_USE: | ||
448 | case CCERR_QP_IN_USE: | ||
449 | return -EBUSY; | ||
450 | case CCERR_ADDR_IN_USE: | ||
451 | return -EADDRINUSE; | ||
452 | case CCERR_ADDR_NOT_AVAIL: | ||
453 | return -EADDRNOTAVAIL; | ||
454 | case CCERR_CONN_RESET: | ||
455 | return -ECONNRESET; | ||
456 | case CCERR_NOT_IMPLEMENTED: | ||
457 | case CCERR_INVALID_WQE: | ||
458 | return -ENOSYS; | ||
459 | case CCERR_QP_NOT_PRIVILEGED: | ||
460 | return -EPERM; | ||
461 | case CCERR_STACK_ERROR: | ||
462 | return -EPROTO; | ||
463 | case CCERR_ACCESS_VIOLATION: | ||
464 | case CCERR_BASE_AND_BOUNDS_VIOLATION: | ||
465 | return -EFAULT; | ||
466 | case CCERR_STAG_STATE_NOT_INVALID: | ||
467 | case CCERR_INVALID_ADDRESS: | ||
468 | case CCERR_INVALID_CQ: | ||
469 | case CCERR_INVALID_EP: | ||
470 | case CCERR_INVALID_MODIFIER: | ||
471 | case CCERR_INVALID_MTU: | ||
472 | case CCERR_INVALID_PD_ID: | ||
473 | case CCERR_INVALID_QP: | ||
474 | case CCERR_INVALID_RNIC: | ||
475 | case CCERR_INVALID_STAG: | ||
476 | return -EINVAL; | ||
477 | default: | ||
478 | return -EAGAIN; | ||
479 | } | ||
480 | } | ||
481 | |||
482 | /* Device */ | ||
483 | extern int c2_register_device(struct c2_dev *c2dev); | ||
484 | extern void c2_unregister_device(struct c2_dev *c2dev); | ||
485 | extern int c2_rnic_init(struct c2_dev *c2dev); | ||
486 | extern void c2_rnic_term(struct c2_dev *c2dev); | ||
487 | extern void c2_rnic_interrupt(struct c2_dev *c2dev); | ||
488 | extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); | ||
489 | extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); | ||
490 | |||
491 | /* QPs */ | ||
492 | extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, | ||
493 | struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); | ||
494 | extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); | ||
495 | extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn); | ||
496 | extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, | ||
497 | struct ib_qp_attr *attr, int attr_mask); | ||
498 | extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, | ||
499 | int ord, int ird); | ||
500 | extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | ||
501 | struct ib_send_wr **bad_wr); | ||
502 | extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | ||
503 | struct ib_recv_wr **bad_wr); | ||
504 | extern void __devinit c2_init_qp_table(struct c2_dev *c2dev); | ||
505 | extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev); | ||
506 | extern void c2_set_qp_state(struct c2_qp *, int); | ||
507 | extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn); | ||
508 | |||
509 | /* PDs */ | ||
510 | extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd); | ||
511 | extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd); | ||
512 | extern int __devinit c2_init_pd_table(struct c2_dev *c2dev); | ||
513 | extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev); | ||
514 | |||
515 | /* CQs */ | ||
516 | extern int c2_init_cq(struct c2_dev *c2dev, int entries, | ||
517 | struct c2_ucontext *ctx, struct c2_cq *cq); | ||
518 | extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq); | ||
519 | extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); | ||
520 | extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); | ||
521 | extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | ||
522 | extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); | ||
523 | |||
524 | /* CM */ | ||
525 | extern int c2_llp_connect(struct iw_cm_id *cm_id, | ||
526 | struct iw_cm_conn_param *iw_param); | ||
527 | extern int c2_llp_accept(struct iw_cm_id *cm_id, | ||
528 | struct iw_cm_conn_param *iw_param); | ||
529 | extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, | ||
530 | u8 pdata_len); | ||
531 | extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog); | ||
532 | extern int c2_llp_service_destroy(struct iw_cm_id *cm_id); | ||
533 | |||
534 | /* MM */ | ||
535 | extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, | ||
536 | int page_size, int pbl_depth, u32 length, | ||
537 | u32 off, u64 *va, enum c2_acf acf, | ||
538 | struct c2_mr *mr); | ||
539 | extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index); | ||
540 | |||
541 | /* AE */ | ||
542 | extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index); | ||
543 | |||
544 | /* MQSP Allocator */ | ||
545 | extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, | ||
546 | struct sp_chunk **root); | ||
547 | extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root); | ||
548 | extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, | ||
549 | dma_addr_t *dma_addr, gfp_t gfp_mask); | ||
550 | extern void c2_free_mqsp(u16 * mqsp); | ||
551 | #endif | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c new file mode 100644 index 000000000000..08f46c83a3a4 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_ae.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include "c2.h" | ||
34 | #include <rdma/iw_cm.h> | ||
35 | #include "c2_status.h" | ||
36 | #include "c2_ae.h" | ||
37 | |||
38 | static int c2_convert_cm_status(u32 c2_status) | ||
39 | { | ||
40 | switch (c2_status) { | ||
41 | case C2_CONN_STATUS_SUCCESS: | ||
42 | return 0; | ||
43 | case C2_CONN_STATUS_REJECTED: | ||
44 | return -ENETRESET; | ||
45 | case C2_CONN_STATUS_REFUSED: | ||
46 | return -ECONNREFUSED; | ||
47 | case C2_CONN_STATUS_TIMEDOUT: | ||
48 | return -ETIMEDOUT; | ||
49 | case C2_CONN_STATUS_NETUNREACH: | ||
50 | return -ENETUNREACH; | ||
51 | case C2_CONN_STATUS_HOSTUNREACH: | ||
52 | return -EHOSTUNREACH; | ||
53 | case C2_CONN_STATUS_INVALID_RNIC: | ||
54 | return -EINVAL; | ||
55 | case C2_CONN_STATUS_INVALID_QP: | ||
56 | return -EINVAL; | ||
57 | case C2_CONN_STATUS_INVALID_QP_STATE: | ||
58 | return -EINVAL; | ||
59 | case C2_CONN_STATUS_ADDR_NOT_AVAIL: | ||
60 | return -EADDRNOTAVAIL; | ||
61 | default: | ||
62 | printk(KERN_ERR PFX | ||
63 | "%s - Unable to convert CM status: %d\n", | ||
64 | __FUNCTION__, c2_status); | ||
65 | return -EIO; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | #ifdef DEBUG | ||
70 | static const char* to_event_str(int event) | ||
71 | { | ||
72 | static const char* event_str[] = { | ||
73 | "CCAE_REMOTE_SHUTDOWN", | ||
74 | "CCAE_ACTIVE_CONNECT_RESULTS", | ||
75 | "CCAE_CONNECTION_REQUEST", | ||
76 | "CCAE_LLP_CLOSE_COMPLETE", | ||
77 | "CCAE_TERMINATE_MESSAGE_RECEIVED", | ||
78 | "CCAE_LLP_CONNECTION_RESET", | ||
79 | "CCAE_LLP_CONNECTION_LOST", | ||
80 | "CCAE_LLP_SEGMENT_SIZE_INVALID", | ||
81 | "CCAE_LLP_INVALID_CRC", | ||
82 | "CCAE_LLP_BAD_FPDU", | ||
83 | "CCAE_INVALID_DDP_VERSION", | ||
84 | "CCAE_INVALID_RDMA_VERSION", | ||
85 | "CCAE_UNEXPECTED_OPCODE", | ||
86 | "CCAE_INVALID_DDP_QUEUE_NUMBER", | ||
87 | "CCAE_RDMA_READ_NOT_ENABLED", | ||
88 | "CCAE_RDMA_WRITE_NOT_ENABLED", | ||
89 | "CCAE_RDMA_READ_TOO_SMALL", | ||
90 | "CCAE_NO_L_BIT", | ||
91 | "CCAE_TAGGED_INVALID_STAG", | ||
92 | "CCAE_TAGGED_BASE_BOUNDS_VIOLATION", | ||
93 | "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION", | ||
94 | "CCAE_TAGGED_INVALID_PD", | ||
95 | "CCAE_WRAP_ERROR", | ||
96 | "CCAE_BAD_CLOSE", | ||
97 | "CCAE_BAD_LLP_CLOSE", | ||
98 | "CCAE_INVALID_MSN_RANGE", | ||
99 | "CCAE_INVALID_MSN_GAP", | ||
100 | "CCAE_IRRQ_OVERFLOW", | ||
101 | "CCAE_IRRQ_MSN_GAP", | ||
102 | "CCAE_IRRQ_MSN_RANGE", | ||
103 | "CCAE_IRRQ_INVALID_STAG", | ||
104 | "CCAE_IRRQ_BASE_BOUNDS_VIOLATION", | ||
105 | "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION", | ||
106 | "CCAE_IRRQ_INVALID_PD", | ||
107 | "CCAE_IRRQ_WRAP_ERROR", | ||
108 | "CCAE_CQ_SQ_COMPLETION_OVERFLOW", | ||
109 | "CCAE_CQ_RQ_COMPLETION_ERROR", | ||
110 | "CCAE_QP_SRQ_WQE_ERROR", | ||
111 | "CCAE_QP_LOCAL_CATASTROPHIC_ERROR", | ||
112 | "CCAE_CQ_OVERFLOW", | ||
113 | "CCAE_CQ_OPERATION_ERROR", | ||
114 | "CCAE_SRQ_LIMIT_REACHED", | ||
115 | "CCAE_QP_RQ_LIMIT_REACHED", | ||
116 | "CCAE_SRQ_CATASTROPHIC_ERROR", | ||
117 | "CCAE_RNIC_CATASTROPHIC_ERROR" | ||
118 | }; | ||
119 | |||
120 | if (event < CCAE_REMOTE_SHUTDOWN || | ||
121 | event > CCAE_RNIC_CATASTROPHIC_ERROR) | ||
122 | return "<invalid event>"; | ||
123 | |||
124 | event -= CCAE_REMOTE_SHUTDOWN; | ||
125 | return event_str[event]; | ||
126 | } | ||
127 | |||
128 | static const char *to_qp_state_str(int state) | ||
129 | { | ||
130 | switch (state) { | ||
131 | case C2_QP_STATE_IDLE: | ||
132 | return "C2_QP_STATE_IDLE"; | ||
133 | case C2_QP_STATE_CONNECTING: | ||
134 | return "C2_QP_STATE_CONNECTING"; | ||
135 | case C2_QP_STATE_RTS: | ||
136 | return "C2_QP_STATE_RTS"; | ||
137 | case C2_QP_STATE_CLOSING: | ||
138 | return "C2_QP_STATE_CLOSING"; | ||
139 | case C2_QP_STATE_TERMINATE: | ||
140 | return "C2_QP_STATE_TERMINATE"; | ||
141 | case C2_QP_STATE_ERROR: | ||
142 | return "C2_QP_STATE_ERROR"; | ||
143 | default: | ||
144 | return "<invalid QP state>"; | ||
145 | }; | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) | ||
150 | { | ||
151 | struct c2_mq *mq = c2dev->qptr_array[mq_index]; | ||
152 | union c2wr *wr; | ||
153 | void *resource_user_context; | ||
154 | struct iw_cm_event cm_event; | ||
155 | struct ib_event ib_event; | ||
156 | enum c2_resource_indicator resource_indicator; | ||
157 | enum c2_event_id event_id; | ||
158 | unsigned long flags; | ||
159 | int status; | ||
160 | |||
161 | /* | ||
162 | * retreive the message | ||
163 | */ | ||
164 | wr = c2_mq_consume(mq); | ||
165 | if (!wr) | ||
166 | return; | ||
167 | |||
168 | memset(&ib_event, 0, sizeof(ib_event)); | ||
169 | memset(&cm_event, 0, sizeof(cm_event)); | ||
170 | |||
171 | event_id = c2_wr_get_id(wr); | ||
172 | resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type); | ||
173 | resource_user_context = | ||
174 | (void *) (unsigned long) wr->ae.ae_generic.user_context; | ||
175 | |||
176 | status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr)); | ||
177 | |||
178 | pr_debug("event received c2_dev=%p, event_id=%d, " | ||
179 | "resource_indicator=%d, user_context=%p, status = %d\n", | ||
180 | c2dev, event_id, resource_indicator, resource_user_context, | ||
181 | status); | ||
182 | |||
183 | switch (resource_indicator) { | ||
184 | case C2_RES_IND_QP:{ | ||
185 | |||
186 | struct c2_qp *qp = (struct c2_qp *)resource_user_context; | ||
187 | struct iw_cm_id *cm_id = qp->cm_id; | ||
188 | struct c2wr_ae_active_connect_results *res; | ||
189 | |||
190 | if (!cm_id) { | ||
191 | pr_debug("event received, but cm_id is <nul>, qp=%p!\n", | ||
192 | qp); | ||
193 | goto ignore_it; | ||
194 | } | ||
195 | pr_debug("%s: event = %s, user_context=%llx, " | ||
196 | "resource_type=%x, " | ||
197 | "resource=%x, qp_state=%s\n", | ||
198 | __FUNCTION__, | ||
199 | to_event_str(event_id), | ||
200 | be64_to_cpu(wr->ae.ae_generic.user_context), | ||
201 | be32_to_cpu(wr->ae.ae_generic.resource_type), | ||
202 | be32_to_cpu(wr->ae.ae_generic.resource), | ||
203 | to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); | ||
204 | |||
205 | c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); | ||
206 | |||
207 | switch (event_id) { | ||
208 | case CCAE_ACTIVE_CONNECT_RESULTS: | ||
209 | res = &wr->ae.ae_active_connect_results; | ||
210 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | ||
211 | cm_event.local_addr.sin_addr.s_addr = res->laddr; | ||
212 | cm_event.remote_addr.sin_addr.s_addr = res->raddr; | ||
213 | cm_event.local_addr.sin_port = res->lport; | ||
214 | cm_event.remote_addr.sin_port = res->rport; | ||
215 | if (status == 0) { | ||
216 | cm_event.private_data_len = | ||
217 | be32_to_cpu(res->private_data_length); | ||
218 | cm_event.private_data = res->private_data; | ||
219 | } else { | ||
220 | spin_lock_irqsave(&qp->lock, flags); | ||
221 | if (qp->cm_id) { | ||
222 | qp->cm_id->rem_ref(qp->cm_id); | ||
223 | qp->cm_id = NULL; | ||
224 | } | ||
225 | spin_unlock_irqrestore(&qp->lock, flags); | ||
226 | cm_event.private_data_len = 0; | ||
227 | cm_event.private_data = NULL; | ||
228 | } | ||
229 | if (cm_id->event_handler) | ||
230 | cm_id->event_handler(cm_id, &cm_event); | ||
231 | break; | ||
232 | case CCAE_TERMINATE_MESSAGE_RECEIVED: | ||
233 | case CCAE_CQ_SQ_COMPLETION_OVERFLOW: | ||
234 | ib_event.device = &c2dev->ibdev; | ||
235 | ib_event.element.qp = &qp->ibqp; | ||
236 | ib_event.event = IB_EVENT_QP_REQ_ERR; | ||
237 | |||
238 | if (qp->ibqp.event_handler) | ||
239 | qp->ibqp.event_handler(&ib_event, | ||
240 | qp->ibqp. | ||
241 | qp_context); | ||
242 | break; | ||
243 | case CCAE_BAD_CLOSE: | ||
244 | case CCAE_LLP_CLOSE_COMPLETE: | ||
245 | case CCAE_LLP_CONNECTION_RESET: | ||
246 | case CCAE_LLP_CONNECTION_LOST: | ||
247 | BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b); | ||
248 | |||
249 | spin_lock_irqsave(&qp->lock, flags); | ||
250 | if (qp->cm_id) { | ||
251 | qp->cm_id->rem_ref(qp->cm_id); | ||
252 | qp->cm_id = NULL; | ||
253 | } | ||
254 | spin_unlock_irqrestore(&qp->lock, flags); | ||
255 | cm_event.event = IW_CM_EVENT_CLOSE; | ||
256 | cm_event.status = 0; | ||
257 | if (cm_id->event_handler) | ||
258 | cm_id->event_handler(cm_id, &cm_event); | ||
259 | break; | ||
260 | default: | ||
261 | BUG_ON(1); | ||
262 | pr_debug("%s:%d Unexpected event_id=%d on QP=%p, " | ||
263 | "CM_ID=%p\n", | ||
264 | __FUNCTION__, __LINE__, | ||
265 | event_id, qp, cm_id); | ||
266 | break; | ||
267 | } | ||
268 | break; | ||
269 | } | ||
270 | |||
271 | case C2_RES_IND_EP:{ | ||
272 | |||
273 | struct c2wr_ae_connection_request *req = | ||
274 | &wr->ae.ae_connection_request; | ||
275 | struct iw_cm_id *cm_id = | ||
276 | (struct iw_cm_id *)resource_user_context; | ||
277 | |||
278 | pr_debug("C2_RES_IND_EP event_id=%d\n", event_id); | ||
279 | if (event_id != CCAE_CONNECTION_REQUEST) { | ||
280 | pr_debug("%s: Invalid event_id: %d\n", | ||
281 | __FUNCTION__, event_id); | ||
282 | break; | ||
283 | } | ||
284 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | ||
285 | cm_event.provider_data = (void*)(unsigned long)req->cr_handle; | ||
286 | cm_event.local_addr.sin_addr.s_addr = req->laddr; | ||
287 | cm_event.remote_addr.sin_addr.s_addr = req->raddr; | ||
288 | cm_event.local_addr.sin_port = req->lport; | ||
289 | cm_event.remote_addr.sin_port = req->rport; | ||
290 | cm_event.private_data_len = | ||
291 | be32_to_cpu(req->private_data_length); | ||
292 | cm_event.private_data = req->private_data; | ||
293 | |||
294 | if (cm_id->event_handler) | ||
295 | cm_id->event_handler(cm_id, &cm_event); | ||
296 | break; | ||
297 | } | ||
298 | |||
299 | case C2_RES_IND_CQ:{ | ||
300 | struct c2_cq *cq = | ||
301 | (struct c2_cq *) resource_user_context; | ||
302 | |||
303 | pr_debug("IB_EVENT_CQ_ERR\n"); | ||
304 | ib_event.device = &c2dev->ibdev; | ||
305 | ib_event.element.cq = &cq->ibcq; | ||
306 | ib_event.event = IB_EVENT_CQ_ERR; | ||
307 | |||
308 | if (cq->ibcq.event_handler) | ||
309 | cq->ibcq.event_handler(&ib_event, | ||
310 | cq->ibcq.cq_context); | ||
311 | } | ||
312 | |||
313 | default: | ||
314 | printk("Bad resource indicator = %d\n", | ||
315 | resource_indicator); | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | ignore_it: | ||
320 | c2_mq_free(mq); | ||
321 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.h b/drivers/infiniband/hw/amso1100/c2_ae.h new file mode 100644 index 000000000000..3a065c33b83b --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_ae.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _C2_AE_H_ | ||
34 | #define _C2_AE_H_ | ||
35 | |||
36 | /* | ||
37 | * WARNING: If you change this file, also bump C2_IVN_BASE | ||
38 | * in common/include/clustercore/c2_ivn.h. | ||
39 | */ | ||
40 | |||
41 | /* | ||
42 | * Asynchronous Event Identifiers | ||
43 | * | ||
44 | * These start at 0x80 only so it's obvious from inspection that | ||
45 | * they are not work-request statuses. This isn't critical. | ||
46 | * | ||
47 | * NOTE: these event id's must fit in eight bits. | ||
48 | */ | ||
49 | enum c2_event_id { | ||
50 | CCAE_REMOTE_SHUTDOWN = 0x80, | ||
51 | CCAE_ACTIVE_CONNECT_RESULTS, | ||
52 | CCAE_CONNECTION_REQUEST, | ||
53 | CCAE_LLP_CLOSE_COMPLETE, | ||
54 | CCAE_TERMINATE_MESSAGE_RECEIVED, | ||
55 | CCAE_LLP_CONNECTION_RESET, | ||
56 | CCAE_LLP_CONNECTION_LOST, | ||
57 | CCAE_LLP_SEGMENT_SIZE_INVALID, | ||
58 | CCAE_LLP_INVALID_CRC, | ||
59 | CCAE_LLP_BAD_FPDU, | ||
60 | CCAE_INVALID_DDP_VERSION, | ||
61 | CCAE_INVALID_RDMA_VERSION, | ||
62 | CCAE_UNEXPECTED_OPCODE, | ||
63 | CCAE_INVALID_DDP_QUEUE_NUMBER, | ||
64 | CCAE_RDMA_READ_NOT_ENABLED, | ||
65 | CCAE_RDMA_WRITE_NOT_ENABLED, | ||
66 | CCAE_RDMA_READ_TOO_SMALL, | ||
67 | CCAE_NO_L_BIT, | ||
68 | CCAE_TAGGED_INVALID_STAG, | ||
69 | CCAE_TAGGED_BASE_BOUNDS_VIOLATION, | ||
70 | CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION, | ||
71 | CCAE_TAGGED_INVALID_PD, | ||
72 | CCAE_WRAP_ERROR, | ||
73 | CCAE_BAD_CLOSE, | ||
74 | CCAE_BAD_LLP_CLOSE, | ||
75 | CCAE_INVALID_MSN_RANGE, | ||
76 | CCAE_INVALID_MSN_GAP, | ||
77 | CCAE_IRRQ_OVERFLOW, | ||
78 | CCAE_IRRQ_MSN_GAP, | ||
79 | CCAE_IRRQ_MSN_RANGE, | ||
80 | CCAE_IRRQ_INVALID_STAG, | ||
81 | CCAE_IRRQ_BASE_BOUNDS_VIOLATION, | ||
82 | CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION, | ||
83 | CCAE_IRRQ_INVALID_PD, | ||
84 | CCAE_IRRQ_WRAP_ERROR, | ||
85 | CCAE_CQ_SQ_COMPLETION_OVERFLOW, | ||
86 | CCAE_CQ_RQ_COMPLETION_ERROR, | ||
87 | CCAE_QP_SRQ_WQE_ERROR, | ||
88 | CCAE_QP_LOCAL_CATASTROPHIC_ERROR, | ||
89 | CCAE_CQ_OVERFLOW, | ||
90 | CCAE_CQ_OPERATION_ERROR, | ||
91 | CCAE_SRQ_LIMIT_REACHED, | ||
92 | CCAE_QP_RQ_LIMIT_REACHED, | ||
93 | CCAE_SRQ_CATASTROPHIC_ERROR, | ||
94 | CCAE_RNIC_CATASTROPHIC_ERROR | ||
95 | /* WARNING If you add more id's, make sure their values fit in eight bits. */ | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * Resource Indicators and Identifiers | ||
100 | */ | ||
101 | enum c2_resource_indicator { | ||
102 | C2_RES_IND_QP = 1, | ||
103 | C2_RES_IND_EP, | ||
104 | C2_RES_IND_CQ, | ||
105 | C2_RES_IND_SRQ, | ||
106 | }; | ||
107 | |||
108 | #endif /* _C2_AE_H_ */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c new file mode 100644 index 000000000000..1d2529992c0c --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/errno.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/bitmap.h> | ||
37 | |||
38 | #include "c2.h" | ||
39 | |||
40 | static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, | ||
41 | struct sp_chunk **head) | ||
42 | { | ||
43 | int i; | ||
44 | struct sp_chunk *new_head; | ||
45 | |||
46 | new_head = (struct sp_chunk *) __get_free_page(gfp_mask); | ||
47 | if (new_head == NULL) | ||
48 | return -ENOMEM; | ||
49 | |||
50 | new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, | ||
51 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
52 | pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); | ||
53 | |||
54 | new_head->next = NULL; | ||
55 | new_head->head = 0; | ||
56 | |||
57 | /* build list where each index is the next free slot */ | ||
58 | for (i = 0; | ||
59 | i < (PAGE_SIZE - sizeof(struct sp_chunk) - | ||
60 | sizeof(u16)) / sizeof(u16) - 1; | ||
61 | i++) { | ||
62 | new_head->shared_ptr[i] = i + 1; | ||
63 | } | ||
64 | /* terminate list */ | ||
65 | new_head->shared_ptr[i] = 0xFFFF; | ||
66 | |||
67 | *head = new_head; | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, | ||
72 | struct sp_chunk **root) | ||
73 | { | ||
74 | return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); | ||
75 | } | ||
76 | |||
77 | void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) | ||
78 | { | ||
79 | struct sp_chunk *next; | ||
80 | |||
81 | while (root) { | ||
82 | next = root->next; | ||
83 | dma_unmap_single(c2dev->ibdev.dma_device, | ||
84 | pci_unmap_addr(root, mapping), PAGE_SIZE, | ||
85 | DMA_FROM_DEVICE); | ||
86 | __free_page((struct page *) root); | ||
87 | root = next; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, | ||
92 | dma_addr_t *dma_addr, gfp_t gfp_mask) | ||
93 | { | ||
94 | u16 mqsp; | ||
95 | |||
96 | while (head) { | ||
97 | mqsp = head->head; | ||
98 | if (mqsp != 0xFFFF) { | ||
99 | head->head = head->shared_ptr[mqsp]; | ||
100 | break; | ||
101 | } else if (head->next == NULL) { | ||
102 | if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == | ||
103 | 0) { | ||
104 | head = head->next; | ||
105 | mqsp = head->head; | ||
106 | head->head = head->shared_ptr[mqsp]; | ||
107 | break; | ||
108 | } else | ||
109 | return NULL; | ||
110 | } else | ||
111 | head = head->next; | ||
112 | } | ||
113 | if (head) { | ||
114 | *dma_addr = head->dma_addr + | ||
115 | ((unsigned long) &(head->shared_ptr[mqsp]) - | ||
116 | (unsigned long) head); | ||
117 | pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, | ||
118 | &(head->shared_ptr[mqsp]), (u64)*dma_addr); | ||
119 | return &(head->shared_ptr[mqsp]); | ||
120 | } | ||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | void c2_free_mqsp(u16 * mqsp) | ||
125 | { | ||
126 | struct sp_chunk *head; | ||
127 | u16 idx; | ||
128 | |||
129 | /* The chunk containing this ptr begins at the page boundary */ | ||
130 | head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); | ||
131 | |||
132 | /* Link head to new mqsp */ | ||
133 | *mqsp = head->head; | ||
134 | |||
135 | /* Compute the shared_ptr index */ | ||
136 | idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1; | ||
137 | idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1; | ||
138 | |||
139 | /* Point this index at the head */ | ||
140 | head->shared_ptr[idx] = head->head; | ||
141 | |||
142 | /* Point head at this index */ | ||
143 | head->head = idx; | ||
144 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c new file mode 100644 index 000000000000..485254efdd1e --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_cm.c | |||
@@ -0,0 +1,452 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | #include "c2.h" | ||
35 | #include "c2_wr.h" | ||
36 | #include "c2_vq.h" | ||
37 | #include <rdma/iw_cm.h> | ||
38 | |||
39 | int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
40 | { | ||
41 | struct c2_dev *c2dev = to_c2dev(cm_id->device); | ||
42 | struct ib_qp *ibqp; | ||
43 | struct c2_qp *qp; | ||
44 | struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ | ||
45 | struct c2_vq_req *vq_req; | ||
46 | int err; | ||
47 | |||
48 | ibqp = c2_get_qp(cm_id->device, iw_param->qpn); | ||
49 | if (!ibqp) | ||
50 | return -EINVAL; | ||
51 | qp = to_c2qp(ibqp); | ||
52 | |||
53 | /* Associate QP <--> CM_ID */ | ||
54 | cm_id->provider_data = qp; | ||
55 | cm_id->add_ref(cm_id); | ||
56 | qp->cm_id = cm_id; | ||
57 | |||
58 | /* | ||
59 | * only support the max private_data length | ||
60 | */ | ||
61 | if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { | ||
62 | err = -EINVAL; | ||
63 | goto bail0; | ||
64 | } | ||
65 | /* | ||
66 | * Set the rdma read limits | ||
67 | */ | ||
68 | err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); | ||
69 | if (err) | ||
70 | goto bail0; | ||
71 | |||
72 | /* | ||
73 | * Create and send a WR_QP_CONNECT... | ||
74 | */ | ||
75 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
76 | if (!wr) { | ||
77 | err = -ENOMEM; | ||
78 | goto bail0; | ||
79 | } | ||
80 | |||
81 | vq_req = vq_req_alloc(c2dev); | ||
82 | if (!vq_req) { | ||
83 | err = -ENOMEM; | ||
84 | goto bail1; | ||
85 | } | ||
86 | |||
87 | c2_wr_set_id(wr, CCWR_QP_CONNECT); | ||
88 | wr->hdr.context = 0; | ||
89 | wr->rnic_handle = c2dev->adapter_handle; | ||
90 | wr->qp_handle = qp->adapter_handle; | ||
91 | |||
92 | wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; | ||
93 | wr->remote_port = cm_id->remote_addr.sin_port; | ||
94 | |||
95 | /* | ||
96 | * Move any private data from the callers's buf into | ||
97 | * the WR. | ||
98 | */ | ||
99 | if (iw_param->private_data) { | ||
100 | wr->private_data_length = | ||
101 | cpu_to_be32(iw_param->private_data_len); | ||
102 | memcpy(&wr->private_data[0], iw_param->private_data, | ||
103 | iw_param->private_data_len); | ||
104 | } else | ||
105 | wr->private_data_length = 0; | ||
106 | |||
107 | /* | ||
108 | * Send WR to adapter. NOTE: There is no synch reply from | ||
109 | * the adapter. | ||
110 | */ | ||
111 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
112 | vq_req_free(c2dev, vq_req); | ||
113 | |||
114 | bail1: | ||
115 | kfree(wr); | ||
116 | bail0: | ||
117 | if (err) { | ||
118 | /* | ||
119 | * If we fail, release reference on QP and | ||
120 | * disassociate QP from CM_ID | ||
121 | */ | ||
122 | cm_id->provider_data = NULL; | ||
123 | qp->cm_id = NULL; | ||
124 | cm_id->rem_ref(cm_id); | ||
125 | } | ||
126 | return err; | ||
127 | } | ||
128 | |||
129 | int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) | ||
130 | { | ||
131 | struct c2_dev *c2dev; | ||
132 | struct c2wr_ep_listen_create_req wr; | ||
133 | struct c2wr_ep_listen_create_rep *reply; | ||
134 | struct c2_vq_req *vq_req; | ||
135 | int err; | ||
136 | |||
137 | c2dev = to_c2dev(cm_id->device); | ||
138 | if (c2dev == NULL) | ||
139 | return -EINVAL; | ||
140 | |||
141 | /* | ||
142 | * Allocate verbs request. | ||
143 | */ | ||
144 | vq_req = vq_req_alloc(c2dev); | ||
145 | if (!vq_req) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | /* | ||
149 | * Build the WR | ||
150 | */ | ||
151 | c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); | ||
152 | wr.hdr.context = (u64) (unsigned long) vq_req; | ||
153 | wr.rnic_handle = c2dev->adapter_handle; | ||
154 | wr.local_addr = cm_id->local_addr.sin_addr.s_addr; | ||
155 | wr.local_port = cm_id->local_addr.sin_port; | ||
156 | wr.backlog = cpu_to_be32(backlog); | ||
157 | wr.user_context = (u64) (unsigned long) cm_id; | ||
158 | |||
159 | /* | ||
160 | * Reference the request struct. Dereferenced in the int handler. | ||
161 | */ | ||
162 | vq_req_get(c2dev, vq_req); | ||
163 | |||
164 | /* | ||
165 | * Send WR to adapter | ||
166 | */ | ||
167 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
168 | if (err) { | ||
169 | vq_req_put(c2dev, vq_req); | ||
170 | goto bail0; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Wait for reply from adapter | ||
175 | */ | ||
176 | err = vq_wait_for_reply(c2dev, vq_req); | ||
177 | if (err) | ||
178 | goto bail0; | ||
179 | |||
180 | /* | ||
181 | * Process reply | ||
182 | */ | ||
183 | reply = | ||
184 | (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; | ||
185 | if (!reply) { | ||
186 | err = -ENOMEM; | ||
187 | goto bail1; | ||
188 | } | ||
189 | |||
190 | if ((err = c2_errno(reply)) != 0) | ||
191 | goto bail1; | ||
192 | |||
193 | /* | ||
194 | * Keep the adapter handle. Used in subsequent destroy | ||
195 | */ | ||
196 | cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; | ||
197 | |||
198 | /* | ||
199 | * free vq stuff | ||
200 | */ | ||
201 | vq_repbuf_free(c2dev, reply); | ||
202 | vq_req_free(c2dev, vq_req); | ||
203 | |||
204 | return 0; | ||
205 | |||
206 | bail1: | ||
207 | vq_repbuf_free(c2dev, reply); | ||
208 | bail0: | ||
209 | vq_req_free(c2dev, vq_req); | ||
210 | return err; | ||
211 | } | ||
212 | |||
213 | |||
214 | int c2_llp_service_destroy(struct iw_cm_id *cm_id) | ||
215 | { | ||
216 | |||
217 | struct c2_dev *c2dev; | ||
218 | struct c2wr_ep_listen_destroy_req wr; | ||
219 | struct c2wr_ep_listen_destroy_rep *reply; | ||
220 | struct c2_vq_req *vq_req; | ||
221 | int err; | ||
222 | |||
223 | c2dev = to_c2dev(cm_id->device); | ||
224 | if (c2dev == NULL) | ||
225 | return -EINVAL; | ||
226 | |||
227 | /* | ||
228 | * Allocate verbs request. | ||
229 | */ | ||
230 | vq_req = vq_req_alloc(c2dev); | ||
231 | if (!vq_req) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | /* | ||
235 | * Build the WR | ||
236 | */ | ||
237 | c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); | ||
238 | wr.hdr.context = (unsigned long) vq_req; | ||
239 | wr.rnic_handle = c2dev->adapter_handle; | ||
240 | wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; | ||
241 | |||
242 | /* | ||
243 | * reference the request struct. dereferenced in the int handler. | ||
244 | */ | ||
245 | vq_req_get(c2dev, vq_req); | ||
246 | |||
247 | /* | ||
248 | * Send WR to adapter | ||
249 | */ | ||
250 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
251 | if (err) { | ||
252 | vq_req_put(c2dev, vq_req); | ||
253 | goto bail0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Wait for reply from adapter | ||
258 | */ | ||
259 | err = vq_wait_for_reply(c2dev, vq_req); | ||
260 | if (err) | ||
261 | goto bail0; | ||
262 | |||
263 | /* | ||
264 | * Process reply | ||
265 | */ | ||
266 | reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; | ||
267 | if (!reply) { | ||
268 | err = -ENOMEM; | ||
269 | goto bail0; | ||
270 | } | ||
271 | if ((err = c2_errno(reply)) != 0) | ||
272 | goto bail1; | ||
273 | |||
274 | bail1: | ||
275 | vq_repbuf_free(c2dev, reply); | ||
276 | bail0: | ||
277 | vq_req_free(c2dev, vq_req); | ||
278 | return err; | ||
279 | } | ||
280 | |||
281 | int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
282 | { | ||
283 | struct c2_dev *c2dev = to_c2dev(cm_id->device); | ||
284 | struct c2_qp *qp; | ||
285 | struct ib_qp *ibqp; | ||
286 | struct c2wr_cr_accept_req *wr; /* variable length WR */ | ||
287 | struct c2_vq_req *vq_req; | ||
288 | struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ | ||
289 | int err; | ||
290 | |||
291 | ibqp = c2_get_qp(cm_id->device, iw_param->qpn); | ||
292 | if (!ibqp) | ||
293 | return -EINVAL; | ||
294 | qp = to_c2qp(ibqp); | ||
295 | |||
296 | /* Set the RDMA read limits */ | ||
297 | err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); | ||
298 | if (err) | ||
299 | goto bail0; | ||
300 | |||
301 | /* Allocate verbs request. */ | ||
302 | vq_req = vq_req_alloc(c2dev); | ||
303 | if (!vq_req) { | ||
304 | err = -ENOMEM; | ||
305 | goto bail1; | ||
306 | } | ||
307 | vq_req->qp = qp; | ||
308 | vq_req->cm_id = cm_id; | ||
309 | vq_req->event = IW_CM_EVENT_ESTABLISHED; | ||
310 | |||
311 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
312 | if (!wr) { | ||
313 | err = -ENOMEM; | ||
314 | goto bail2; | ||
315 | } | ||
316 | |||
317 | /* Build the WR */ | ||
318 | c2_wr_set_id(wr, CCWR_CR_ACCEPT); | ||
319 | wr->hdr.context = (unsigned long) vq_req; | ||
320 | wr->rnic_handle = c2dev->adapter_handle; | ||
321 | wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; | ||
322 | wr->qp_handle = qp->adapter_handle; | ||
323 | |||
324 | /* Replace the cr_handle with the QP after accept */ | ||
325 | cm_id->provider_data = qp; | ||
326 | cm_id->add_ref(cm_id); | ||
327 | qp->cm_id = cm_id; | ||
328 | |||
329 | cm_id->provider_data = qp; | ||
330 | |||
331 | /* Validate private_data length */ | ||
332 | if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { | ||
333 | err = -EINVAL; | ||
334 | goto bail2; | ||
335 | } | ||
336 | |||
337 | if (iw_param->private_data) { | ||
338 | wr->private_data_length = cpu_to_be32(iw_param->private_data_len); | ||
339 | memcpy(&wr->private_data[0], | ||
340 | iw_param->private_data, iw_param->private_data_len); | ||
341 | } else | ||
342 | wr->private_data_length = 0; | ||
343 | |||
344 | /* Reference the request struct. Dereferenced in the int handler. */ | ||
345 | vq_req_get(c2dev, vq_req); | ||
346 | |||
347 | /* Send WR to adapter */ | ||
348 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
349 | if (err) { | ||
350 | vq_req_put(c2dev, vq_req); | ||
351 | goto bail2; | ||
352 | } | ||
353 | |||
354 | /* Wait for reply from adapter */ | ||
355 | err = vq_wait_for_reply(c2dev, vq_req); | ||
356 | if (err) | ||
357 | goto bail2; | ||
358 | |||
359 | /* Check that reply is present */ | ||
360 | reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; | ||
361 | if (!reply) { | ||
362 | err = -ENOMEM; | ||
363 | goto bail2; | ||
364 | } | ||
365 | |||
366 | err = c2_errno(reply); | ||
367 | vq_repbuf_free(c2dev, reply); | ||
368 | |||
369 | if (!err) | ||
370 | c2_set_qp_state(qp, C2_QP_STATE_RTS); | ||
371 | bail2: | ||
372 | kfree(wr); | ||
373 | bail1: | ||
374 | vq_req_free(c2dev, vq_req); | ||
375 | bail0: | ||
376 | if (err) { | ||
377 | /* | ||
378 | * If we fail, release reference on QP and | ||
379 | * disassociate QP from CM_ID | ||
380 | */ | ||
381 | cm_id->provider_data = NULL; | ||
382 | qp->cm_id = NULL; | ||
383 | cm_id->rem_ref(cm_id); | ||
384 | } | ||
385 | return err; | ||
386 | } | ||
387 | |||
388 | int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | ||
389 | { | ||
390 | struct c2_dev *c2dev; | ||
391 | struct c2wr_cr_reject_req wr; | ||
392 | struct c2_vq_req *vq_req; | ||
393 | struct c2wr_cr_reject_rep *reply; | ||
394 | int err; | ||
395 | |||
396 | c2dev = to_c2dev(cm_id->device); | ||
397 | |||
398 | /* | ||
399 | * Allocate verbs request. | ||
400 | */ | ||
401 | vq_req = vq_req_alloc(c2dev); | ||
402 | if (!vq_req) | ||
403 | return -ENOMEM; | ||
404 | |||
405 | /* | ||
406 | * Build the WR | ||
407 | */ | ||
408 | c2_wr_set_id(&wr, CCWR_CR_REJECT); | ||
409 | wr.hdr.context = (unsigned long) vq_req; | ||
410 | wr.rnic_handle = c2dev->adapter_handle; | ||
411 | wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; | ||
412 | |||
413 | /* | ||
414 | * reference the request struct. dereferenced in the int handler. | ||
415 | */ | ||
416 | vq_req_get(c2dev, vq_req); | ||
417 | |||
418 | /* | ||
419 | * Send WR to adapter | ||
420 | */ | ||
421 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
422 | if (err) { | ||
423 | vq_req_put(c2dev, vq_req); | ||
424 | goto bail0; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Wait for reply from adapter | ||
429 | */ | ||
430 | err = vq_wait_for_reply(c2dev, vq_req); | ||
431 | if (err) | ||
432 | goto bail0; | ||
433 | |||
434 | /* | ||
435 | * Process reply | ||
436 | */ | ||
437 | reply = (struct c2wr_cr_reject_rep *) (unsigned long) | ||
438 | vq_req->reply_msg; | ||
439 | if (!reply) { | ||
440 | err = -ENOMEM; | ||
441 | goto bail0; | ||
442 | } | ||
443 | err = c2_errno(reply); | ||
444 | /* | ||
445 | * free vq stuff | ||
446 | */ | ||
447 | vq_repbuf_free(c2dev, reply); | ||
448 | |||
449 | bail0: | ||
450 | vq_req_free(c2dev, vq_req); | ||
451 | return err; | ||
452 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c new file mode 100644 index 000000000000..9d7bcc5ade93 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_cq.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
4 | * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. | ||
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | ||
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | ||
7 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
8 | * | ||
9 | * This software is available to you under a choice of one of two | ||
10 | * licenses. You may choose to be licensed under the terms of the GNU | ||
11 | * General Public License (GPL) Version 2, available from the file | ||
12 | * COPYING in the main directory of this source tree, or the | ||
13 | * OpenIB.org BSD license below: | ||
14 | * | ||
15 | * Redistribution and use in source and binary forms, with or | ||
16 | * without modification, are permitted provided that the following | ||
17 | * conditions are met: | ||
18 | * | ||
19 | * - Redistributions of source code must retain the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer. | ||
22 | * | ||
23 | * - Redistributions in binary form must reproduce the above | ||
24 | * copyright notice, this list of conditions and the following | ||
25 | * disclaimer in the documentation and/or other materials | ||
26 | * provided with the distribution. | ||
27 | * | ||
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
35 | * SOFTWARE. | ||
36 | * | ||
37 | */ | ||
38 | #include "c2.h" | ||
39 | #include "c2_vq.h" | ||
40 | #include "c2_status.h" | ||
41 | |||
42 | #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1)) | ||
43 | |||
44 | static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn) | ||
45 | { | ||
46 | struct c2_cq *cq; | ||
47 | unsigned long flags; | ||
48 | |||
49 | spin_lock_irqsave(&c2dev->lock, flags); | ||
50 | cq = c2dev->qptr_array[cqn]; | ||
51 | if (!cq) { | ||
52 | spin_unlock_irqrestore(&c2dev->lock, flags); | ||
53 | return NULL; | ||
54 | } | ||
55 | atomic_inc(&cq->refcount); | ||
56 | spin_unlock_irqrestore(&c2dev->lock, flags); | ||
57 | return cq; | ||
58 | } | ||
59 | |||
60 | static void c2_cq_put(struct c2_cq *cq) | ||
61 | { | ||
62 | if (atomic_dec_and_test(&cq->refcount)) | ||
63 | wake_up(&cq->wait); | ||
64 | } | ||
65 | |||
66 | void c2_cq_event(struct c2_dev *c2dev, u32 mq_index) | ||
67 | { | ||
68 | struct c2_cq *cq; | ||
69 | |||
70 | cq = c2_cq_get(c2dev, mq_index); | ||
71 | if (!cq) { | ||
72 | printk("discarding events on destroyed CQN=%d\n", mq_index); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); | ||
77 | c2_cq_put(cq); | ||
78 | } | ||
79 | |||
80 | void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) | ||
81 | { | ||
82 | struct c2_cq *cq; | ||
83 | struct c2_mq *q; | ||
84 | |||
85 | cq = c2_cq_get(c2dev, mq_index); | ||
86 | if (!cq) | ||
87 | return; | ||
88 | |||
89 | spin_lock_irq(&cq->lock); | ||
90 | q = &cq->mq; | ||
91 | if (q && !c2_mq_empty(q)) { | ||
92 | u16 priv = q->priv; | ||
93 | struct c2wr_ce *msg; | ||
94 | |||
95 | while (priv != be16_to_cpu(*q->shared)) { | ||
96 | msg = (struct c2wr_ce *) | ||
97 | (q->msg_pool.host + priv * q->msg_size); | ||
98 | if (msg->qp_user_context == (u64) (unsigned long) qp) { | ||
99 | msg->qp_user_context = (u64) 0; | ||
100 | } | ||
101 | priv = (priv + 1) % q->q_size; | ||
102 | } | ||
103 | } | ||
104 | spin_unlock_irq(&cq->lock); | ||
105 | c2_cq_put(cq); | ||
106 | } | ||
107 | |||
108 | static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status) | ||
109 | { | ||
110 | switch (status) { | ||
111 | case C2_OK: | ||
112 | return IB_WC_SUCCESS; | ||
113 | case CCERR_FLUSHED: | ||
114 | return IB_WC_WR_FLUSH_ERR; | ||
115 | case CCERR_BASE_AND_BOUNDS_VIOLATION: | ||
116 | return IB_WC_LOC_PROT_ERR; | ||
117 | case CCERR_ACCESS_VIOLATION: | ||
118 | return IB_WC_LOC_ACCESS_ERR; | ||
119 | case CCERR_TOTAL_LENGTH_TOO_BIG: | ||
120 | return IB_WC_LOC_LEN_ERR; | ||
121 | case CCERR_INVALID_WINDOW: | ||
122 | return IB_WC_MW_BIND_ERR; | ||
123 | default: | ||
124 | return IB_WC_GENERAL_ERR; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | |||
129 | static inline int c2_poll_one(struct c2_dev *c2dev, | ||
130 | struct c2_cq *cq, struct ib_wc *entry) | ||
131 | { | ||
132 | struct c2wr_ce *ce; | ||
133 | struct c2_qp *qp; | ||
134 | int is_recv = 0; | ||
135 | |||
136 | ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); | ||
137 | if (!ce) { | ||
138 | return -EAGAIN; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * if the qp returned is null then this qp has already | ||
143 | * been freed and we are unable process the completion. | ||
144 | * try pulling the next message | ||
145 | */ | ||
146 | while ((qp = | ||
147 | (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { | ||
148 | c2_mq_free(&cq->mq); | ||
149 | ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); | ||
150 | if (!ce) | ||
151 | return -EAGAIN; | ||
152 | } | ||
153 | |||
154 | entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); | ||
155 | entry->wr_id = ce->hdr.context; | ||
156 | entry->qp_num = ce->handle; | ||
157 | entry->wc_flags = 0; | ||
158 | entry->slid = 0; | ||
159 | entry->sl = 0; | ||
160 | entry->src_qp = 0; | ||
161 | entry->dlid_path_bits = 0; | ||
162 | entry->pkey_index = 0; | ||
163 | |||
164 | switch (c2_wr_get_id(ce)) { | ||
165 | case C2_WR_TYPE_SEND: | ||
166 | entry->opcode = IB_WC_SEND; | ||
167 | break; | ||
168 | case C2_WR_TYPE_RDMA_WRITE: | ||
169 | entry->opcode = IB_WC_RDMA_WRITE; | ||
170 | break; | ||
171 | case C2_WR_TYPE_RDMA_READ: | ||
172 | entry->opcode = IB_WC_RDMA_READ; | ||
173 | break; | ||
174 | case C2_WR_TYPE_BIND_MW: | ||
175 | entry->opcode = IB_WC_BIND_MW; | ||
176 | break; | ||
177 | case C2_WR_TYPE_RECV: | ||
178 | entry->byte_len = be32_to_cpu(ce->bytes_rcvd); | ||
179 | entry->opcode = IB_WC_RECV; | ||
180 | is_recv = 1; | ||
181 | break; | ||
182 | default: | ||
183 | break; | ||
184 | } | ||
185 | |||
186 | /* consume the WQEs */ | ||
187 | if (is_recv) | ||
188 | c2_mq_lconsume(&qp->rq_mq, 1); | ||
189 | else | ||
190 | c2_mq_lconsume(&qp->sq_mq, | ||
191 | be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1); | ||
192 | |||
193 | /* free the message */ | ||
194 | c2_mq_free(&cq->mq); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | ||
200 | { | ||
201 | struct c2_dev *c2dev = to_c2dev(ibcq->device); | ||
202 | struct c2_cq *cq = to_c2cq(ibcq); | ||
203 | unsigned long flags; | ||
204 | int npolled, err; | ||
205 | |||
206 | spin_lock_irqsave(&cq->lock, flags); | ||
207 | |||
208 | for (npolled = 0; npolled < num_entries; ++npolled) { | ||
209 | |||
210 | err = c2_poll_one(c2dev, cq, entry + npolled); | ||
211 | if (err) | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | spin_unlock_irqrestore(&cq->lock, flags); | ||
216 | |||
217 | return npolled; | ||
218 | } | ||
219 | |||
220 | int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | ||
221 | { | ||
222 | struct c2_mq_shared __iomem *shared; | ||
223 | struct c2_cq *cq; | ||
224 | |||
225 | cq = to_c2cq(ibcq); | ||
226 | shared = cq->mq.peer; | ||
227 | |||
228 | if (notify == IB_CQ_NEXT_COMP) | ||
229 | writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); | ||
230 | else if (notify == IB_CQ_SOLICITED) | ||
231 | writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); | ||
232 | else | ||
233 | return -EINVAL; | ||
234 | |||
235 | writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed); | ||
236 | |||
237 | /* | ||
238 | * Now read back shared->armed to make the PCI | ||
239 | * write synchronous. This is necessary for | ||
240 | * correct cq notification semantics. | ||
241 | */ | ||
242 | readb(&shared->armed); | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) | ||
248 | { | ||
249 | |||
250 | dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), | ||
251 | mq->q_size * mq->msg_size, DMA_FROM_DEVICE); | ||
252 | free_pages((unsigned long) mq->msg_pool.host, | ||
253 | get_order(mq->q_size * mq->msg_size)); | ||
254 | } | ||
255 | |||
256 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | ||
257 | int msg_size) | ||
258 | { | ||
259 | unsigned long pool_start; | ||
260 | |||
261 | pool_start = __get_free_pages(GFP_KERNEL, | ||
262 | get_order(q_size * msg_size)); | ||
263 | if (!pool_start) | ||
264 | return -ENOMEM; | ||
265 | |||
266 | c2_mq_rep_init(mq, | ||
267 | 0, /* index (currently unknown) */ | ||
268 | q_size, | ||
269 | msg_size, | ||
270 | (u8 *) pool_start, | ||
271 | NULL, /* peer (currently unknown) */ | ||
272 | C2_MQ_HOST_TARGET); | ||
273 | |||
274 | mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
275 | (void *)pool_start, | ||
276 | q_size * msg_size, DMA_FROM_DEVICE); | ||
277 | pci_unmap_addr_set(mq, mapping, mq->host_dma); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | int c2_init_cq(struct c2_dev *c2dev, int entries, | ||
283 | struct c2_ucontext *ctx, struct c2_cq *cq) | ||
284 | { | ||
285 | struct c2wr_cq_create_req wr; | ||
286 | struct c2wr_cq_create_rep *reply; | ||
287 | unsigned long peer_pa; | ||
288 | struct c2_vq_req *vq_req; | ||
289 | int err; | ||
290 | |||
291 | might_sleep(); | ||
292 | |||
293 | cq->ibcq.cqe = entries - 1; | ||
294 | cq->is_kernel = !ctx; | ||
295 | |||
296 | /* Allocate a shared pointer */ | ||
297 | cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
298 | &cq->mq.shared_dma, GFP_KERNEL); | ||
299 | if (!cq->mq.shared) | ||
300 | return -ENOMEM; | ||
301 | |||
302 | /* Allocate pages for the message pool */ | ||
303 | err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE); | ||
304 | if (err) | ||
305 | goto bail0; | ||
306 | |||
307 | vq_req = vq_req_alloc(c2dev); | ||
308 | if (!vq_req) { | ||
309 | err = -ENOMEM; | ||
310 | goto bail1; | ||
311 | } | ||
312 | |||
313 | memset(&wr, 0, sizeof(wr)); | ||
314 | c2_wr_set_id(&wr, CCWR_CQ_CREATE); | ||
315 | wr.hdr.context = (unsigned long) vq_req; | ||
316 | wr.rnic_handle = c2dev->adapter_handle; | ||
317 | wr.msg_size = cpu_to_be32(cq->mq.msg_size); | ||
318 | wr.depth = cpu_to_be32(cq->mq.q_size); | ||
319 | wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); | ||
320 | wr.msg_pool = cpu_to_be64(cq->mq.host_dma); | ||
321 | wr.user_context = (u64) (unsigned long) (cq); | ||
322 | |||
323 | vq_req_get(c2dev, vq_req); | ||
324 | |||
325 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
326 | if (err) { | ||
327 | vq_req_put(c2dev, vq_req); | ||
328 | goto bail2; | ||
329 | } | ||
330 | |||
331 | err = vq_wait_for_reply(c2dev, vq_req); | ||
332 | if (err) | ||
333 | goto bail2; | ||
334 | |||
335 | reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg); | ||
336 | if (!reply) { | ||
337 | err = -ENOMEM; | ||
338 | goto bail2; | ||
339 | } | ||
340 | |||
341 | if ((err = c2_errno(reply)) != 0) | ||
342 | goto bail3; | ||
343 | |||
344 | cq->adapter_handle = reply->cq_handle; | ||
345 | cq->mq.index = be32_to_cpu(reply->mq_index); | ||
346 | |||
347 | peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared); | ||
348 | cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE); | ||
349 | if (!cq->mq.peer) { | ||
350 | err = -ENOMEM; | ||
351 | goto bail3; | ||
352 | } | ||
353 | |||
354 | vq_repbuf_free(c2dev, reply); | ||
355 | vq_req_free(c2dev, vq_req); | ||
356 | |||
357 | spin_lock_init(&cq->lock); | ||
358 | atomic_set(&cq->refcount, 1); | ||
359 | init_waitqueue_head(&cq->wait); | ||
360 | |||
361 | /* | ||
362 | * Use the MQ index allocated by the adapter to | ||
363 | * store the CQ in the qptr_array | ||
364 | */ | ||
365 | cq->cqn = cq->mq.index; | ||
366 | c2dev->qptr_array[cq->cqn] = cq; | ||
367 | |||
368 | return 0; | ||
369 | |||
370 | bail3: | ||
371 | vq_repbuf_free(c2dev, reply); | ||
372 | bail2: | ||
373 | vq_req_free(c2dev, vq_req); | ||
374 | bail1: | ||
375 | c2_free_cq_buf(c2dev, &cq->mq); | ||
376 | bail0: | ||
377 | c2_free_mqsp(cq->mq.shared); | ||
378 | |||
379 | return err; | ||
380 | } | ||
381 | |||
382 | void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq) | ||
383 | { | ||
384 | int err; | ||
385 | struct c2_vq_req *vq_req; | ||
386 | struct c2wr_cq_destroy_req wr; | ||
387 | struct c2wr_cq_destroy_rep *reply; | ||
388 | |||
389 | might_sleep(); | ||
390 | |||
391 | /* Clear CQ from the qptr array */ | ||
392 | spin_lock_irq(&c2dev->lock); | ||
393 | c2dev->qptr_array[cq->mq.index] = NULL; | ||
394 | atomic_dec(&cq->refcount); | ||
395 | spin_unlock_irq(&c2dev->lock); | ||
396 | |||
397 | wait_event(cq->wait, !atomic_read(&cq->refcount)); | ||
398 | |||
399 | vq_req = vq_req_alloc(c2dev); | ||
400 | if (!vq_req) { | ||
401 | goto bail0; | ||
402 | } | ||
403 | |||
404 | memset(&wr, 0, sizeof(wr)); | ||
405 | c2_wr_set_id(&wr, CCWR_CQ_DESTROY); | ||
406 | wr.hdr.context = (unsigned long) vq_req; | ||
407 | wr.rnic_handle = c2dev->adapter_handle; | ||
408 | wr.cq_handle = cq->adapter_handle; | ||
409 | |||
410 | vq_req_get(c2dev, vq_req); | ||
411 | |||
412 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
413 | if (err) { | ||
414 | vq_req_put(c2dev, vq_req); | ||
415 | goto bail1; | ||
416 | } | ||
417 | |||
418 | err = vq_wait_for_reply(c2dev, vq_req); | ||
419 | if (err) | ||
420 | goto bail1; | ||
421 | |||
422 | reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg); | ||
423 | |||
424 | vq_repbuf_free(c2dev, reply); | ||
425 | bail1: | ||
426 | vq_req_free(c2dev, vq_req); | ||
427 | bail0: | ||
428 | if (cq->is_kernel) { | ||
429 | c2_free_cq_buf(c2dev, &cq->mq); | ||
430 | } | ||
431 | |||
432 | return; | ||
433 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c new file mode 100644 index 000000000000..0d0bc33ca30a --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_intr.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include "c2.h" | ||
34 | #include <rdma/iw_cm.h> | ||
35 | #include "c2_vq.h" | ||
36 | |||
37 | static void handle_mq(struct c2_dev *c2dev, u32 index); | ||
38 | static void handle_vq(struct c2_dev *c2dev, u32 mq_index); | ||
39 | |||
40 | /* | ||
41 | * Handle RNIC interrupts | ||
42 | */ | ||
43 | void c2_rnic_interrupt(struct c2_dev *c2dev) | ||
44 | { | ||
45 | unsigned int mq_index; | ||
46 | |||
47 | while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) { | ||
48 | mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT); | ||
49 | if (mq_index & 0x80000000) { | ||
50 | break; | ||
51 | } | ||
52 | |||
53 | c2dev->hints_read++; | ||
54 | handle_mq(c2dev, mq_index); | ||
55 | } | ||
56 | |||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Top level MQ handler | ||
61 | */ | ||
62 | static void handle_mq(struct c2_dev *c2dev, u32 mq_index) | ||
63 | { | ||
64 | if (c2dev->qptr_array[mq_index] == NULL) { | ||
65 | pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n", | ||
66 | mq_index); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | switch (mq_index) { | ||
71 | case (0): | ||
72 | /* | ||
73 | * An index of 0 in the activity queue | ||
74 | * indicates the req vq now has messages | ||
75 | * available... | ||
76 | * | ||
77 | * Wake up any waiters waiting on req VQ | ||
78 | * message availability. | ||
79 | */ | ||
80 | wake_up(&c2dev->req_vq_wo); | ||
81 | break; | ||
82 | case (1): | ||
83 | handle_vq(c2dev, mq_index); | ||
84 | break; | ||
85 | case (2): | ||
86 | /* We have to purge the VQ in case there are pending | ||
87 | * accept reply requests that would result in the | ||
88 | * generation of an ESTABLISHED event. If we don't | ||
89 | * generate these first, a CLOSE event could end up | ||
90 | * being delivered before the ESTABLISHED event. | ||
91 | */ | ||
92 | handle_vq(c2dev, 1); | ||
93 | |||
94 | c2_ae_event(c2dev, mq_index); | ||
95 | break; | ||
96 | default: | ||
97 | /* There is no event synchronization between CQ events | ||
98 | * and AE or CM events. In fact, CQE could be | ||
99 | * delivered for all of the I/O up to and including the | ||
100 | * FLUSH for a peer disconenct prior to the ESTABLISHED | ||
101 | * event being delivered to the app. The reason for this | ||
102 | * is that CM events are delivered on a thread, while AE | ||
103 | * and CM events are delivered on interrupt context. | ||
104 | */ | ||
105 | c2_cq_event(c2dev, mq_index); | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | return; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Handles verbs WR replies. | ||
114 | */ | ||
115 | static void handle_vq(struct c2_dev *c2dev, u32 mq_index) | ||
116 | { | ||
117 | void *adapter_msg, *reply_msg; | ||
118 | struct c2wr_hdr *host_msg; | ||
119 | struct c2wr_hdr tmp; | ||
120 | struct c2_mq *reply_vq; | ||
121 | struct c2_vq_req *req; | ||
122 | struct iw_cm_event cm_event; | ||
123 | int err; | ||
124 | |||
125 | reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index]; | ||
126 | |||
127 | /* | ||
128 | * get next msg from mq_index into adapter_msg. | ||
129 | * don't free it yet. | ||
130 | */ | ||
131 | adapter_msg = c2_mq_consume(reply_vq); | ||
132 | if (adapter_msg == NULL) { | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | host_msg = vq_repbuf_alloc(c2dev); | ||
137 | |||
138 | /* | ||
139 | * If we can't get a host buffer, then we'll still | ||
140 | * wakeup the waiter, we just won't give him the msg. | ||
141 | * It is assumed the waiter will deal with this... | ||
142 | */ | ||
143 | if (!host_msg) { | ||
144 | pr_debug("handle_vq: no repbufs!\n"); | ||
145 | |||
146 | /* | ||
147 | * just copy the WR header into a local variable. | ||
148 | * this allows us to still demux on the context | ||
149 | */ | ||
150 | host_msg = &tmp; | ||
151 | memcpy(host_msg, adapter_msg, sizeof(tmp)); | ||
152 | reply_msg = NULL; | ||
153 | } else { | ||
154 | memcpy(host_msg, adapter_msg, reply_vq->msg_size); | ||
155 | reply_msg = host_msg; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * consume the msg from the MQ | ||
160 | */ | ||
161 | c2_mq_free(reply_vq); | ||
162 | |||
163 | /* | ||
164 | * wakeup the waiter. | ||
165 | */ | ||
166 | req = (struct c2_vq_req *) (unsigned long) host_msg->context; | ||
167 | if (req == NULL) { | ||
168 | /* | ||
169 | * We should never get here, as the adapter should | ||
170 | * never send us a reply that we're not expecting. | ||
171 | */ | ||
172 | vq_repbuf_free(c2dev, host_msg); | ||
173 | pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n"); | ||
174 | return; | ||
175 | } | ||
176 | |||
177 | err = c2_errno(reply_msg); | ||
178 | if (!err) switch (req->event) { | ||
179 | case IW_CM_EVENT_ESTABLISHED: | ||
180 | c2_set_qp_state(req->qp, | ||
181 | C2_QP_STATE_RTS); | ||
182 | case IW_CM_EVENT_CLOSE: | ||
183 | |||
184 | /* | ||
185 | * Move the QP to RTS if this is | ||
186 | * the established event | ||
187 | */ | ||
188 | cm_event.event = req->event; | ||
189 | cm_event.status = 0; | ||
190 | cm_event.local_addr = req->cm_id->local_addr; | ||
191 | cm_event.remote_addr = req->cm_id->remote_addr; | ||
192 | cm_event.private_data = NULL; | ||
193 | cm_event.private_data_len = 0; | ||
194 | req->cm_id->event_handler(req->cm_id, &cm_event); | ||
195 | break; | ||
196 | default: | ||
197 | break; | ||
198 | } | ||
199 | |||
200 | req->reply_msg = (u64) (unsigned long) (reply_msg); | ||
201 | atomic_set(&req->reply_ready, 1); | ||
202 | wake_up(&req->wait_object); | ||
203 | |||
204 | /* | ||
205 | * If the request was cancelled, then this put will | ||
206 | * free the vq_req memory...and reply_msg!!! | ||
207 | */ | ||
208 | vq_req_put(c2dev, req); | ||
209 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c new file mode 100644 index 000000000000..1e4f46493fcb --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mm.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include "c2.h" | ||
34 | #include "c2_vq.h" | ||
35 | |||
36 | #define PBL_VIRT 1 | ||
37 | #define PBL_PHYS 2 | ||
38 | |||
39 | /* | ||
40 | * Send all the PBL messages to convey the remainder of the PBL | ||
41 | * Wait for the adapter's reply on the last one. | ||
42 | * This is indicated by setting the MEM_PBL_COMPLETE in the flags. | ||
43 | * | ||
44 | * NOTE: vq_req is _not_ freed by this function. The VQ Host | ||
45 | * Reply buffer _is_ freed by this function. | ||
46 | */ | ||
47 | static int | ||
48 | send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, | ||
49 | unsigned long va, u32 pbl_depth, | ||
50 | struct c2_vq_req *vq_req, int pbl_type) | ||
51 | { | ||
52 | u32 pbe_count; /* amt that fits in a PBL msg */ | ||
53 | u32 count; /* amt in this PBL MSG. */ | ||
54 | struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ | ||
55 | struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ | ||
56 | int err, pbl_virt, pbl_index, i; | ||
57 | |||
58 | switch (pbl_type) { | ||
59 | case PBL_VIRT: | ||
60 | pbl_virt = 1; | ||
61 | break; | ||
62 | case PBL_PHYS: | ||
63 | pbl_virt = 0; | ||
64 | break; | ||
65 | default: | ||
66 | return -EINVAL; | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | pbe_count = (c2dev->req_vq.msg_size - | ||
71 | sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); | ||
72 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
73 | if (!wr) { | ||
74 | return -ENOMEM; | ||
75 | } | ||
76 | c2_wr_set_id(wr, CCWR_NSMR_PBL); | ||
77 | |||
78 | /* | ||
79 | * Only the last PBL message will generate a reply from the verbs, | ||
80 | * so we set the context to 0 indicating there is no kernel verbs | ||
81 | * handler blocked awaiting this reply. | ||
82 | */ | ||
83 | wr->hdr.context = 0; | ||
84 | wr->rnic_handle = c2dev->adapter_handle; | ||
85 | wr->stag_index = stag_index; /* already swapped */ | ||
86 | wr->flags = 0; | ||
87 | pbl_index = 0; | ||
88 | while (pbl_depth) { | ||
89 | count = min(pbe_count, pbl_depth); | ||
90 | wr->addrs_length = cpu_to_be32(count); | ||
91 | |||
92 | /* | ||
93 | * If this is the last message, then reference the | ||
94 | * vq request struct cuz we're gonna wait for a reply. | ||
95 | * also make this PBL msg as the last one. | ||
96 | */ | ||
97 | if (count == pbl_depth) { | ||
98 | /* | ||
99 | * reference the request struct. dereferenced in the | ||
100 | * int handler. | ||
101 | */ | ||
102 | vq_req_get(c2dev, vq_req); | ||
103 | wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); | ||
104 | |||
105 | /* | ||
106 | * This is the last PBL message. | ||
107 | * Set the context to our VQ Request Object so we can | ||
108 | * wait for the reply. | ||
109 | */ | ||
110 | wr->hdr.context = (unsigned long) vq_req; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * If pbl_virt is set then va is a virtual address | ||
115 | * that describes a virtually contiguous memory | ||
116 | * allocation. The wr needs the start of each virtual page | ||
117 | * to be converted to the corresponding physical address | ||
118 | * of the page. If pbl_virt is not set then va is an array | ||
119 | * of physical addresses and there is no conversion to do. | ||
120 | * Just fill in the wr with what is in the array. | ||
121 | */ | ||
122 | for (i = 0; i < count; i++) { | ||
123 | if (pbl_virt) { | ||
124 | va += PAGE_SIZE; | ||
125 | } else { | ||
126 | wr->paddrs[i] = | ||
127 | cpu_to_be64(((u64 *)va)[pbl_index + i]); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Send WR to adapter | ||
133 | */ | ||
134 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
135 | if (err) { | ||
136 | if (count <= pbe_count) { | ||
137 | vq_req_put(c2dev, vq_req); | ||
138 | } | ||
139 | goto bail0; | ||
140 | } | ||
141 | pbl_depth -= count; | ||
142 | pbl_index += count; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Now wait for the reply... | ||
147 | */ | ||
148 | err = vq_wait_for_reply(c2dev, vq_req); | ||
149 | if (err) { | ||
150 | goto bail0; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Process reply | ||
155 | */ | ||
156 | reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; | ||
157 | if (!reply) { | ||
158 | err = -ENOMEM; | ||
159 | goto bail0; | ||
160 | } | ||
161 | |||
162 | err = c2_errno(reply); | ||
163 | |||
164 | vq_repbuf_free(c2dev, reply); | ||
165 | bail0: | ||
166 | kfree(wr); | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | #define C2_PBL_MAX_DEPTH 131072 | ||
171 | int | ||
172 | c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, | ||
173 | int page_size, int pbl_depth, u32 length, | ||
174 | u32 offset, u64 *va, enum c2_acf acf, | ||
175 | struct c2_mr *mr) | ||
176 | { | ||
177 | struct c2_vq_req *vq_req; | ||
178 | struct c2wr_nsmr_register_req *wr; | ||
179 | struct c2wr_nsmr_register_rep *reply; | ||
180 | u16 flags; | ||
181 | int i, pbe_count, count; | ||
182 | int err; | ||
183 | |||
184 | if (!va || !length || !addr_list || !pbl_depth) | ||
185 | return -EINTR; | ||
186 | |||
187 | /* | ||
188 | * Verify PBL depth is within rnic max | ||
189 | */ | ||
190 | if (pbl_depth > C2_PBL_MAX_DEPTH) { | ||
191 | return -EINTR; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * allocate verbs request object | ||
196 | */ | ||
197 | vq_req = vq_req_alloc(c2dev); | ||
198 | if (!vq_req) | ||
199 | return -ENOMEM; | ||
200 | |||
201 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
202 | if (!wr) { | ||
203 | err = -ENOMEM; | ||
204 | goto bail0; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * build the WR | ||
209 | */ | ||
210 | c2_wr_set_id(wr, CCWR_NSMR_REGISTER); | ||
211 | wr->hdr.context = (unsigned long) vq_req; | ||
212 | wr->rnic_handle = c2dev->adapter_handle; | ||
213 | |||
214 | flags = (acf | MEM_VA_BASED | MEM_REMOTE); | ||
215 | |||
216 | /* | ||
217 | * compute how many pbes can fit in the message | ||
218 | */ | ||
219 | pbe_count = (c2dev->req_vq.msg_size - | ||
220 | sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); | ||
221 | |||
222 | if (pbl_depth <= pbe_count) { | ||
223 | flags |= MEM_PBL_COMPLETE; | ||
224 | } | ||
225 | wr->flags = cpu_to_be16(flags); | ||
226 | wr->stag_key = 0; //stag_key; | ||
227 | wr->va = cpu_to_be64(*va); | ||
228 | wr->pd_id = mr->pd->pd_id; | ||
229 | wr->pbe_size = cpu_to_be32(page_size); | ||
230 | wr->length = cpu_to_be32(length); | ||
231 | wr->pbl_depth = cpu_to_be32(pbl_depth); | ||
232 | wr->fbo = cpu_to_be32(offset); | ||
233 | count = min(pbl_depth, pbe_count); | ||
234 | wr->addrs_length = cpu_to_be32(count); | ||
235 | |||
236 | /* | ||
237 | * fill out the PBL for this message | ||
238 | */ | ||
239 | for (i = 0; i < count; i++) { | ||
240 | wr->paddrs[i] = cpu_to_be64(addr_list[i]); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * regerence the request struct | ||
245 | */ | ||
246 | vq_req_get(c2dev, vq_req); | ||
247 | |||
248 | /* | ||
249 | * send the WR to the adapter | ||
250 | */ | ||
251 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
252 | if (err) { | ||
253 | vq_req_put(c2dev, vq_req); | ||
254 | goto bail1; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * wait for reply from adapter | ||
259 | */ | ||
260 | err = vq_wait_for_reply(c2dev, vq_req); | ||
261 | if (err) { | ||
262 | goto bail1; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * process reply | ||
267 | */ | ||
268 | reply = | ||
269 | (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); | ||
270 | if (!reply) { | ||
271 | err = -ENOMEM; | ||
272 | goto bail1; | ||
273 | } | ||
274 | if ((err = c2_errno(reply))) { | ||
275 | goto bail2; | ||
276 | } | ||
277 | //*p_pb_entries = be32_to_cpu(reply->pbl_depth); | ||
278 | mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); | ||
279 | vq_repbuf_free(c2dev, reply); | ||
280 | |||
281 | /* | ||
282 | * if there are still more PBEs we need to send them to | ||
283 | * the adapter and wait for a reply on the final one. | ||
284 | * reuse vq_req for this purpose. | ||
285 | */ | ||
286 | pbl_depth -= count; | ||
287 | if (pbl_depth) { | ||
288 | |||
289 | vq_req->reply_msg = (unsigned long) NULL; | ||
290 | atomic_set(&vq_req->reply_ready, 0); | ||
291 | err = send_pbl_messages(c2dev, | ||
292 | cpu_to_be32(mr->ibmr.lkey), | ||
293 | (unsigned long) &addr_list[i], | ||
294 | pbl_depth, vq_req, PBL_PHYS); | ||
295 | if (err) { | ||
296 | goto bail1; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | vq_req_free(c2dev, vq_req); | ||
301 | kfree(wr); | ||
302 | |||
303 | return err; | ||
304 | |||
305 | bail2: | ||
306 | vq_repbuf_free(c2dev, reply); | ||
307 | bail1: | ||
308 | kfree(wr); | ||
309 | bail0: | ||
310 | vq_req_free(c2dev, vq_req); | ||
311 | return err; | ||
312 | } | ||
313 | |||
314 | int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) | ||
315 | { | ||
316 | struct c2_vq_req *vq_req; /* verbs request object */ | ||
317 | struct c2wr_stag_dealloc_req wr; /* work request */ | ||
318 | struct c2wr_stag_dealloc_rep *reply; /* WR reply */ | ||
319 | int err; | ||
320 | |||
321 | |||
322 | /* | ||
323 | * allocate verbs request object | ||
324 | */ | ||
325 | vq_req = vq_req_alloc(c2dev); | ||
326 | if (!vq_req) { | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Build the WR | ||
332 | */ | ||
333 | c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); | ||
334 | wr.hdr.context = (u64) (unsigned long) vq_req; | ||
335 | wr.rnic_handle = c2dev->adapter_handle; | ||
336 | wr.stag_index = cpu_to_be32(stag_index); | ||
337 | |||
338 | /* | ||
339 | * reference the request struct. dereferenced in the int handler. | ||
340 | */ | ||
341 | vq_req_get(c2dev, vq_req); | ||
342 | |||
343 | /* | ||
344 | * Send WR to adapter | ||
345 | */ | ||
346 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
347 | if (err) { | ||
348 | vq_req_put(c2dev, vq_req); | ||
349 | goto bail0; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Wait for reply from adapter | ||
354 | */ | ||
355 | err = vq_wait_for_reply(c2dev, vq_req); | ||
356 | if (err) { | ||
357 | goto bail0; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Process reply | ||
362 | */ | ||
363 | reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; | ||
364 | if (!reply) { | ||
365 | err = -ENOMEM; | ||
366 | goto bail0; | ||
367 | } | ||
368 | |||
369 | err = c2_errno(reply); | ||
370 | |||
371 | vq_repbuf_free(c2dev, reply); | ||
372 | bail0: | ||
373 | vq_req_free(c2dev, vq_req); | ||
374 | return err; | ||
375 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c new file mode 100644 index 000000000000..b88a75592102 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mq.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include "c2.h" | ||
34 | #include "c2_mq.h" | ||
35 | |||
36 | void *c2_mq_alloc(struct c2_mq *q) | ||
37 | { | ||
38 | BUG_ON(q->magic != C2_MQ_MAGIC); | ||
39 | BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); | ||
40 | |||
41 | if (c2_mq_full(q)) { | ||
42 | return NULL; | ||
43 | } else { | ||
44 | #ifdef DEBUG | ||
45 | struct c2wr_hdr *m = | ||
46 | (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); | ||
47 | #ifdef CCMSGMAGIC | ||
48 | BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC)); | ||
49 | m->magic = cpu_to_be32(CCWR_MAGIC); | ||
50 | #endif | ||
51 | return m; | ||
52 | #else | ||
53 | return q->msg_pool.host + q->priv * q->msg_size; | ||
54 | #endif | ||
55 | } | ||
56 | } | ||
57 | |||
58 | void c2_mq_produce(struct c2_mq *q) | ||
59 | { | ||
60 | BUG_ON(q->magic != C2_MQ_MAGIC); | ||
61 | BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); | ||
62 | |||
63 | if (!c2_mq_full(q)) { | ||
64 | q->priv = (q->priv + 1) % q->q_size; | ||
65 | q->hint_count++; | ||
66 | /* Update peer's offset. */ | ||
67 | __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | void *c2_mq_consume(struct c2_mq *q) | ||
72 | { | ||
73 | BUG_ON(q->magic != C2_MQ_MAGIC); | ||
74 | BUG_ON(q->type != C2_MQ_HOST_TARGET); | ||
75 | |||
76 | if (c2_mq_empty(q)) { | ||
77 | return NULL; | ||
78 | } else { | ||
79 | #ifdef DEBUG | ||
80 | struct c2wr_hdr *m = (struct c2wr_hdr *) | ||
81 | (q->msg_pool.host + q->priv * q->msg_size); | ||
82 | #ifdef CCMSGMAGIC | ||
83 | BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC)); | ||
84 | #endif | ||
85 | return m; | ||
86 | #else | ||
87 | return q->msg_pool.host + q->priv * q->msg_size; | ||
88 | #endif | ||
89 | } | ||
90 | } | ||
91 | |||
92 | void c2_mq_free(struct c2_mq *q) | ||
93 | { | ||
94 | BUG_ON(q->magic != C2_MQ_MAGIC); | ||
95 | BUG_ON(q->type != C2_MQ_HOST_TARGET); | ||
96 | |||
97 | if (!c2_mq_empty(q)) { | ||
98 | |||
99 | #ifdef CCMSGMAGIC | ||
100 | { | ||
101 | struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *) | ||
102 | (q->msg_pool.adapter + q->priv * q->msg_size); | ||
103 | __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic); | ||
104 | } | ||
105 | #endif | ||
106 | q->priv = (q->priv + 1) % q->q_size; | ||
107 | /* Update peer's offset. */ | ||
108 | __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | |||
113 | void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count) | ||
114 | { | ||
115 | BUG_ON(q->magic != C2_MQ_MAGIC); | ||
116 | BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); | ||
117 | |||
118 | while (wqe_count--) { | ||
119 | BUG_ON(c2_mq_empty(q)); | ||
120 | *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | #if 0 | ||
125 | u32 c2_mq_count(struct c2_mq *q) | ||
126 | { | ||
127 | s32 count; | ||
128 | |||
129 | if (q->type == C2_MQ_HOST_TARGET) | ||
130 | count = be16_to_cpu(*q->shared) - q->priv; | ||
131 | else | ||
132 | count = q->priv - be16_to_cpu(*q->shared); | ||
133 | |||
134 | if (count < 0) | ||
135 | count += q->q_size; | ||
136 | |||
137 | return (u32) count; | ||
138 | } | ||
139 | #endif /* 0 */ | ||
140 | |||
141 | void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, | ||
142 | u8 __iomem *pool_start, u16 __iomem *peer, u32 type) | ||
143 | { | ||
144 | BUG_ON(!q->shared); | ||
145 | |||
146 | /* This code assumes the byte swapping has already been done! */ | ||
147 | q->index = index; | ||
148 | q->q_size = q_size; | ||
149 | q->msg_size = msg_size; | ||
150 | q->msg_pool.adapter = pool_start; | ||
151 | q->peer = (struct c2_mq_shared __iomem *) peer; | ||
152 | q->magic = C2_MQ_MAGIC; | ||
153 | q->type = type; | ||
154 | q->priv = 0; | ||
155 | q->hint_count = 0; | ||
156 | return; | ||
157 | } | ||
158 | void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, | ||
159 | u8 *pool_start, u16 __iomem *peer, u32 type) | ||
160 | { | ||
161 | BUG_ON(!q->shared); | ||
162 | |||
163 | /* This code assumes the byte swapping has already been done! */ | ||
164 | q->index = index; | ||
165 | q->q_size = q_size; | ||
166 | q->msg_size = msg_size; | ||
167 | q->msg_pool.host = pool_start; | ||
168 | q->peer = (struct c2_mq_shared __iomem *) peer; | ||
169 | q->magic = C2_MQ_MAGIC; | ||
170 | q->type = type; | ||
171 | q->priv = 0; | ||
172 | q->hint_count = 0; | ||
173 | return; | ||
174 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h new file mode 100644 index 000000000000..9185bbb21658 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mq.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef _C2_MQ_H_ | ||
35 | #define _C2_MQ_H_ | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include "c2_wr.h" | ||
39 | |||
40 | enum c2_shared_regs { | ||
41 | |||
42 | C2_SHARED_ARMED = 0x10, | ||
43 | C2_SHARED_NOTIFY = 0x18, | ||
44 | C2_SHARED_SHARED = 0x40, | ||
45 | }; | ||
46 | |||
47 | struct c2_mq_shared { | ||
48 | u16 unused1; | ||
49 | u8 armed; | ||
50 | u8 notification_type; | ||
51 | u32 unused2; | ||
52 | u16 shared; | ||
53 | /* Pad to 64 bytes. */ | ||
54 | u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)]; | ||
55 | }; | ||
56 | |||
57 | enum c2_mq_type { | ||
58 | C2_MQ_HOST_TARGET = 1, | ||
59 | C2_MQ_ADAPTER_TARGET = 2, | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ. | ||
64 | * c2_user_mq_t (which is the same format) is for user-mode MQs... | ||
65 | */ | ||
66 | #define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */ | ||
67 | struct c2_mq { | ||
68 | u32 magic; | ||
69 | union { | ||
70 | u8 *host; | ||
71 | u8 __iomem *adapter; | ||
72 | } msg_pool; | ||
73 | dma_addr_t host_dma; | ||
74 | DECLARE_PCI_UNMAP_ADDR(mapping); | ||
75 | u16 hint_count; | ||
76 | u16 priv; | ||
77 | struct c2_mq_shared __iomem *peer; | ||
78 | u16 *shared; | ||
79 | dma_addr_t shared_dma; | ||
80 | u32 q_size; | ||
81 | u32 msg_size; | ||
82 | u32 index; | ||
83 | enum c2_mq_type type; | ||
84 | }; | ||
85 | |||
86 | static __inline__ int c2_mq_empty(struct c2_mq *q) | ||
87 | { | ||
88 | return q->priv == be16_to_cpu(*q->shared); | ||
89 | } | ||
90 | |||
91 | static __inline__ int c2_mq_full(struct c2_mq *q) | ||
92 | { | ||
93 | return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size; | ||
94 | } | ||
95 | |||
96 | extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count); | ||
97 | extern void *c2_mq_alloc(struct c2_mq *q); | ||
98 | extern void c2_mq_produce(struct c2_mq *q); | ||
99 | extern void *c2_mq_consume(struct c2_mq *q); | ||
100 | extern void c2_mq_free(struct c2_mq *q); | ||
101 | extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, | ||
102 | u8 __iomem *pool_start, u16 __iomem *peer, u32 type); | ||
103 | extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, | ||
104 | u8 *pool_start, u16 __iomem *peer, u32 type); | ||
105 | |||
106 | #endif /* _C2_MQ_H_ */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c new file mode 100644 index 000000000000..00c709926c8d --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_pd.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | ||
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | ||
5 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
6 | * | ||
7 | * This software is available to you under a choice of one of two | ||
8 | * licenses. You may choose to be licensed under the terms of the GNU | ||
9 | * General Public License (GPL) Version 2, available from the file | ||
10 | * COPYING in the main directory of this source tree, or the | ||
11 | * OpenIB.org BSD license below: | ||
12 | * | ||
13 | * Redistribution and use in source and binary forms, with or | ||
14 | * without modification, are permitted provided that the following | ||
15 | * conditions are met: | ||
16 | * | ||
17 | * - Redistributions of source code must retain the above | ||
18 | * copyright notice, this list of conditions and the following | ||
19 | * disclaimer. | ||
20 | * | ||
21 | * - Redistributions in binary form must reproduce the above | ||
22 | * copyright notice, this list of conditions and the following | ||
23 | * disclaimer in the documentation and/or other materials | ||
24 | * provided with the distribution. | ||
25 | * | ||
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
33 | * SOFTWARE. | ||
34 | */ | ||
35 | |||
36 | #include <linux/init.h> | ||
37 | #include <linux/errno.h> | ||
38 | |||
39 | #include "c2.h" | ||
40 | #include "c2_provider.h" | ||
41 | |||
42 | int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd) | ||
43 | { | ||
44 | u32 obj; | ||
45 | int ret = 0; | ||
46 | |||
47 | spin_lock(&c2dev->pd_table.lock); | ||
48 | obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max, | ||
49 | c2dev->pd_table.last); | ||
50 | if (obj >= c2dev->pd_table.max) | ||
51 | obj = find_first_zero_bit(c2dev->pd_table.table, | ||
52 | c2dev->pd_table.max); | ||
53 | if (obj < c2dev->pd_table.max) { | ||
54 | pd->pd_id = obj; | ||
55 | __set_bit(obj, c2dev->pd_table.table); | ||
56 | c2dev->pd_table.last = obj+1; | ||
57 | if (c2dev->pd_table.last >= c2dev->pd_table.max) | ||
58 | c2dev->pd_table.last = 0; | ||
59 | } else | ||
60 | ret = -ENOMEM; | ||
61 | spin_unlock(&c2dev->pd_table.lock); | ||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd) | ||
66 | { | ||
67 | spin_lock(&c2dev->pd_table.lock); | ||
68 | __clear_bit(pd->pd_id, c2dev->pd_table.table); | ||
69 | spin_unlock(&c2dev->pd_table.lock); | ||
70 | } | ||
71 | |||
72 | int __devinit c2_init_pd_table(struct c2_dev *c2dev) | ||
73 | { | ||
74 | |||
75 | c2dev->pd_table.last = 0; | ||
76 | c2dev->pd_table.max = c2dev->props.max_pd; | ||
77 | spin_lock_init(&c2dev->pd_table.lock); | ||
78 | c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) * | ||
79 | sizeof(long), GFP_KERNEL); | ||
80 | if (!c2dev->pd_table.table) | ||
81 | return -ENOMEM; | ||
82 | bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev) | ||
87 | { | ||
88 | kfree(c2dev->pd_table.table); | ||
89 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c new file mode 100644 index 000000000000..8fddc8cccdf3 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -0,0 +1,869 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/moduleparam.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/etherdevice.h> | ||
40 | #include <linux/inetdevice.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/ethtool.h> | ||
43 | #include <linux/mii.h> | ||
44 | #include <linux/if_vlan.h> | ||
45 | #include <linux/crc32.h> | ||
46 | #include <linux/in.h> | ||
47 | #include <linux/ip.h> | ||
48 | #include <linux/tcp.h> | ||
49 | #include <linux/init.h> | ||
50 | #include <linux/dma-mapping.h> | ||
51 | #include <linux/if_arp.h> | ||
52 | |||
53 | #include <asm/io.h> | ||
54 | #include <asm/irq.h> | ||
55 | #include <asm/byteorder.h> | ||
56 | |||
57 | #include <rdma/ib_smi.h> | ||
58 | #include <rdma/ib_user_verbs.h> | ||
59 | #include "c2.h" | ||
60 | #include "c2_provider.h" | ||
61 | #include "c2_user.h" | ||
62 | |||
63 | static int c2_query_device(struct ib_device *ibdev, | ||
64 | struct ib_device_attr *props) | ||
65 | { | ||
66 | struct c2_dev *c2dev = to_c2dev(ibdev); | ||
67 | |||
68 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
69 | |||
70 | *props = c2dev->props; | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static int c2_query_port(struct ib_device *ibdev, | ||
75 | u8 port, struct ib_port_attr *props) | ||
76 | { | ||
77 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
78 | |||
79 | props->max_mtu = IB_MTU_4096; | ||
80 | props->lid = 0; | ||
81 | props->lmc = 0; | ||
82 | props->sm_lid = 0; | ||
83 | props->sm_sl = 0; | ||
84 | props->state = IB_PORT_ACTIVE; | ||
85 | props->phys_state = 0; | ||
86 | props->port_cap_flags = | ||
87 | IB_PORT_CM_SUP | | ||
88 | IB_PORT_REINIT_SUP | | ||
89 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | ||
90 | props->gid_tbl_len = 1; | ||
91 | props->pkey_tbl_len = 1; | ||
92 | props->qkey_viol_cntr = 0; | ||
93 | props->active_width = 1; | ||
94 | props->active_speed = 1; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static int c2_modify_port(struct ib_device *ibdev, | ||
100 | u8 port, int port_modify_mask, | ||
101 | struct ib_port_modify *props) | ||
102 | { | ||
103 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int c2_query_pkey(struct ib_device *ibdev, | ||
108 | u8 port, u16 index, u16 * pkey) | ||
109 | { | ||
110 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
111 | *pkey = 0; | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int c2_query_gid(struct ib_device *ibdev, u8 port, | ||
116 | int index, union ib_gid *gid) | ||
117 | { | ||
118 | struct c2_dev *c2dev = to_c2dev(ibdev); | ||
119 | |||
120 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
121 | memset(&(gid->raw[0]), 0, sizeof(gid->raw)); | ||
122 | memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | /* Allocate the user context data structure. This keeps track | ||
128 | * of all objects associated with a particular user-mode client. | ||
129 | */ | ||
130 | static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev, | ||
131 | struct ib_udata *udata) | ||
132 | { | ||
133 | struct c2_ucontext *context; | ||
134 | |||
135 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
136 | context = kmalloc(sizeof(*context), GFP_KERNEL); | ||
137 | if (!context) | ||
138 | return ERR_PTR(-ENOMEM); | ||
139 | |||
140 | return &context->ibucontext; | ||
141 | } | ||
142 | |||
143 | static int c2_dealloc_ucontext(struct ib_ucontext *context) | ||
144 | { | ||
145 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
146 | kfree(context); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
151 | { | ||
152 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
153 | return -ENOSYS; | ||
154 | } | ||
155 | |||
156 | static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev, | ||
157 | struct ib_ucontext *context, | ||
158 | struct ib_udata *udata) | ||
159 | { | ||
160 | struct c2_pd *pd; | ||
161 | int err; | ||
162 | |||
163 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
164 | |||
165 | pd = kmalloc(sizeof(*pd), GFP_KERNEL); | ||
166 | if (!pd) | ||
167 | return ERR_PTR(-ENOMEM); | ||
168 | |||
169 | err = c2_pd_alloc(to_c2dev(ibdev), !context, pd); | ||
170 | if (err) { | ||
171 | kfree(pd); | ||
172 | return ERR_PTR(err); | ||
173 | } | ||
174 | |||
175 | if (context) { | ||
176 | if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) { | ||
177 | c2_pd_free(to_c2dev(ibdev), pd); | ||
178 | kfree(pd); | ||
179 | return ERR_PTR(-EFAULT); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | return &pd->ibpd; | ||
184 | } | ||
185 | |||
186 | static int c2_dealloc_pd(struct ib_pd *pd) | ||
187 | { | ||
188 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
189 | c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); | ||
190 | kfree(pd); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | ||
196 | { | ||
197 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
198 | return ERR_PTR(-ENOSYS); | ||
199 | } | ||
200 | |||
201 | static int c2_ah_destroy(struct ib_ah *ah) | ||
202 | { | ||
203 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
204 | return -ENOSYS; | ||
205 | } | ||
206 | |||
207 | static void c2_add_ref(struct ib_qp *ibqp) | ||
208 | { | ||
209 | struct c2_qp *qp; | ||
210 | BUG_ON(!ibqp); | ||
211 | qp = to_c2qp(ibqp); | ||
212 | atomic_inc(&qp->refcount); | ||
213 | } | ||
214 | |||
215 | static void c2_rem_ref(struct ib_qp *ibqp) | ||
216 | { | ||
217 | struct c2_qp *qp; | ||
218 | BUG_ON(!ibqp); | ||
219 | qp = to_c2qp(ibqp); | ||
220 | if (atomic_dec_and_test(&qp->refcount)) | ||
221 | wake_up(&qp->wait); | ||
222 | } | ||
223 | |||
224 | struct ib_qp *c2_get_qp(struct ib_device *device, int qpn) | ||
225 | { | ||
226 | struct c2_dev* c2dev = to_c2dev(device); | ||
227 | struct c2_qp *qp; | ||
228 | |||
229 | qp = c2_find_qpn(c2dev, qpn); | ||
230 | pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", | ||
231 | __FUNCTION__, qp, qpn, device, | ||
232 | (qp?atomic_read(&qp->refcount):0)); | ||
233 | |||
234 | return (qp?&qp->ibqp:NULL); | ||
235 | } | ||
236 | |||
237 | static struct ib_qp *c2_create_qp(struct ib_pd *pd, | ||
238 | struct ib_qp_init_attr *init_attr, | ||
239 | struct ib_udata *udata) | ||
240 | { | ||
241 | struct c2_qp *qp; | ||
242 | int err; | ||
243 | |||
244 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
245 | |||
246 | switch (init_attr->qp_type) { | ||
247 | case IB_QPT_RC: | ||
248 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | ||
249 | if (!qp) { | ||
250 | pr_debug("%s: Unable to allocate QP\n", __FUNCTION__); | ||
251 | return ERR_PTR(-ENOMEM); | ||
252 | } | ||
253 | spin_lock_init(&qp->lock); | ||
254 | if (pd->uobject) { | ||
255 | /* userspace specific */ | ||
256 | } | ||
257 | |||
258 | err = c2_alloc_qp(to_c2dev(pd->device), | ||
259 | to_c2pd(pd), init_attr, qp); | ||
260 | |||
261 | if (err && pd->uobject) { | ||
262 | /* userspace specific */ | ||
263 | } | ||
264 | |||
265 | break; | ||
266 | default: | ||
267 | pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__, | ||
268 | init_attr->qp_type); | ||
269 | return ERR_PTR(-EINVAL); | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | if (err) { | ||
274 | kfree(qp); | ||
275 | return ERR_PTR(err); | ||
276 | } | ||
277 | |||
278 | return &qp->ibqp; | ||
279 | } | ||
280 | |||
281 | static int c2_destroy_qp(struct ib_qp *ib_qp) | ||
282 | { | ||
283 | struct c2_qp *qp = to_c2qp(ib_qp); | ||
284 | |||
285 | pr_debug("%s:%u qp=%p,qp->state=%d\n", | ||
286 | __FUNCTION__, __LINE__,ib_qp,qp->state); | ||
287 | c2_free_qp(to_c2dev(ib_qp->device), qp); | ||
288 | kfree(qp); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, | ||
293 | struct ib_ucontext *context, | ||
294 | struct ib_udata *udata) | ||
295 | { | ||
296 | struct c2_cq *cq; | ||
297 | int err; | ||
298 | |||
299 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | ||
300 | if (!cq) { | ||
301 | pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__); | ||
302 | return ERR_PTR(-ENOMEM); | ||
303 | } | ||
304 | |||
305 | err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); | ||
306 | if (err) { | ||
307 | pr_debug("%s: error initializing CQ\n", __FUNCTION__); | ||
308 | kfree(cq); | ||
309 | return ERR_PTR(err); | ||
310 | } | ||
311 | |||
312 | return &cq->ibcq; | ||
313 | } | ||
314 | |||
315 | static int c2_destroy_cq(struct ib_cq *ib_cq) | ||
316 | { | ||
317 | struct c2_cq *cq = to_c2cq(ib_cq); | ||
318 | |||
319 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
320 | |||
321 | c2_free_cq(to_c2dev(ib_cq->device), cq); | ||
322 | kfree(cq); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static inline u32 c2_convert_access(int acc) | ||
328 | { | ||
329 | return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) | | ||
330 | (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) | | ||
331 | (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) | | ||
332 | C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND; | ||
333 | } | ||
334 | |||
335 | static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd, | ||
336 | struct ib_phys_buf *buffer_list, | ||
337 | int num_phys_buf, int acc, u64 * iova_start) | ||
338 | { | ||
339 | struct c2_mr *mr; | ||
340 | u64 *page_list; | ||
341 | u32 total_len; | ||
342 | int err, i, j, k, page_shift, pbl_depth; | ||
343 | |||
344 | pbl_depth = 0; | ||
345 | total_len = 0; | ||
346 | |||
347 | page_shift = PAGE_SHIFT; | ||
348 | /* | ||
349 | * If there is only 1 buffer we assume this could | ||
350 | * be a map of all phy mem...use a 32k page_shift. | ||
351 | */ | ||
352 | if (num_phys_buf == 1) | ||
353 | page_shift += 3; | ||
354 | |||
355 | for (i = 0; i < num_phys_buf; i++) { | ||
356 | |||
357 | if (buffer_list[i].addr & ~PAGE_MASK) { | ||
358 | pr_debug("Unaligned Memory Buffer: 0x%x\n", | ||
359 | (unsigned int) buffer_list[i].addr); | ||
360 | return ERR_PTR(-EINVAL); | ||
361 | } | ||
362 | |||
363 | if (!buffer_list[i].size) { | ||
364 | pr_debug("Invalid Buffer Size\n"); | ||
365 | return ERR_PTR(-EINVAL); | ||
366 | } | ||
367 | |||
368 | total_len += buffer_list[i].size; | ||
369 | pbl_depth += ALIGN(buffer_list[i].size, | ||
370 | (1 << page_shift)) >> page_shift; | ||
371 | } | ||
372 | |||
373 | page_list = vmalloc(sizeof(u64) * pbl_depth); | ||
374 | if (!page_list) { | ||
375 | pr_debug("couldn't vmalloc page_list of size %zd\n", | ||
376 | (sizeof(u64) * pbl_depth)); | ||
377 | return ERR_PTR(-ENOMEM); | ||
378 | } | ||
379 | |||
380 | for (i = 0, j = 0; i < num_phys_buf; i++) { | ||
381 | |||
382 | int naddrs; | ||
383 | |||
384 | naddrs = ALIGN(buffer_list[i].size, | ||
385 | (1 << page_shift)) >> page_shift; | ||
386 | for (k = 0; k < naddrs; k++) | ||
387 | page_list[j++] = (buffer_list[i].addr + | ||
388 | (k << page_shift)); | ||
389 | } | ||
390 | |||
391 | mr = kmalloc(sizeof(*mr), GFP_KERNEL); | ||
392 | if (!mr) | ||
393 | return ERR_PTR(-ENOMEM); | ||
394 | |||
395 | mr->pd = to_c2pd(ib_pd); | ||
396 | pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " | ||
397 | "*iova_start %llx, first pa %llx, last pa %llx\n", | ||
398 | __FUNCTION__, page_shift, pbl_depth, total_len, | ||
399 | *iova_start, page_list[0], page_list[pbl_depth-1]); | ||
400 | err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, | ||
401 | (1 << page_shift), pbl_depth, | ||
402 | total_len, 0, iova_start, | ||
403 | c2_convert_access(acc), mr); | ||
404 | vfree(page_list); | ||
405 | if (err) { | ||
406 | kfree(mr); | ||
407 | return ERR_PTR(err); | ||
408 | } | ||
409 | |||
410 | return &mr->ibmr; | ||
411 | } | ||
412 | |||
413 | static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc) | ||
414 | { | ||
415 | struct ib_phys_buf bl; | ||
416 | u64 kva = 0; | ||
417 | |||
418 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
419 | |||
420 | /* AMSO1100 limit */ | ||
421 | bl.size = 0xffffffff; | ||
422 | bl.addr = 0; | ||
423 | return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); | ||
424 | } | ||
425 | |||
426 | static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | ||
427 | int acc, struct ib_udata *udata) | ||
428 | { | ||
429 | u64 *pages; | ||
430 | u64 kva = 0; | ||
431 | int shift, n, len; | ||
432 | int i, j, k; | ||
433 | int err = 0; | ||
434 | struct ib_umem_chunk *chunk; | ||
435 | struct c2_pd *c2pd = to_c2pd(pd); | ||
436 | struct c2_mr *c2mr; | ||
437 | |||
438 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
439 | shift = ffs(region->page_size) - 1; | ||
440 | |||
441 | c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); | ||
442 | if (!c2mr) | ||
443 | return ERR_PTR(-ENOMEM); | ||
444 | c2mr->pd = c2pd; | ||
445 | |||
446 | n = 0; | ||
447 | list_for_each_entry(chunk, ®ion->chunk_list, list) | ||
448 | n += chunk->nents; | ||
449 | |||
450 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); | ||
451 | if (!pages) { | ||
452 | err = -ENOMEM; | ||
453 | goto err; | ||
454 | } | ||
455 | |||
456 | i = 0; | ||
457 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | ||
458 | for (j = 0; j < chunk->nmap; ++j) { | ||
459 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | ||
460 | for (k = 0; k < len; ++k) { | ||
461 | pages[i++] = | ||
462 | sg_dma_address(&chunk->page_list[j]) + | ||
463 | (region->page_size * k); | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | kva = (u64)region->virt_base; | ||
469 | err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), | ||
470 | pages, | ||
471 | region->page_size, | ||
472 | i, | ||
473 | region->length, | ||
474 | region->offset, | ||
475 | &kva, | ||
476 | c2_convert_access(acc), | ||
477 | c2mr); | ||
478 | kfree(pages); | ||
479 | if (err) { | ||
480 | kfree(c2mr); | ||
481 | return ERR_PTR(err); | ||
482 | } | ||
483 | return &c2mr->ibmr; | ||
484 | |||
485 | err: | ||
486 | kfree(c2mr); | ||
487 | return ERR_PTR(err); | ||
488 | } | ||
489 | |||
490 | static int c2_dereg_mr(struct ib_mr *ib_mr) | ||
491 | { | ||
492 | struct c2_mr *mr = to_c2mr(ib_mr); | ||
493 | int err; | ||
494 | |||
495 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
496 | |||
497 | err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); | ||
498 | if (err) | ||
499 | pr_debug("c2_stag_dealloc failed: %d\n", err); | ||
500 | else | ||
501 | kfree(mr); | ||
502 | |||
503 | return err; | ||
504 | } | ||
505 | |||
506 | static ssize_t show_rev(struct class_device *cdev, char *buf) | ||
507 | { | ||
508 | struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); | ||
509 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
510 | return sprintf(buf, "%x\n", dev->props.hw_ver); | ||
511 | } | ||
512 | |||
513 | static ssize_t show_fw_ver(struct class_device *cdev, char *buf) | ||
514 | { | ||
515 | struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); | ||
516 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
517 | return sprintf(buf, "%x.%x.%x\n", | ||
518 | (int) (dev->props.fw_ver >> 32), | ||
519 | (int) (dev->props.fw_ver >> 16) & 0xffff, | ||
520 | (int) (dev->props.fw_ver & 0xffff)); | ||
521 | } | ||
522 | |||
523 | static ssize_t show_hca(struct class_device *cdev, char *buf) | ||
524 | { | ||
525 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
526 | return sprintf(buf, "AMSO1100\n"); | ||
527 | } | ||
528 | |||
529 | static ssize_t show_board(struct class_device *cdev, char *buf) | ||
530 | { | ||
531 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
532 | return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID"); | ||
533 | } | ||
534 | |||
535 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
536 | static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
537 | static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | ||
538 | static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); | ||
539 | |||
540 | static struct class_device_attribute *c2_class_attributes[] = { | ||
541 | &class_device_attr_hw_rev, | ||
542 | &class_device_attr_fw_ver, | ||
543 | &class_device_attr_hca_type, | ||
544 | &class_device_attr_board_id | ||
545 | }; | ||
546 | |||
547 | static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
548 | int attr_mask, struct ib_udata *udata) | ||
549 | { | ||
550 | int err; | ||
551 | |||
552 | err = | ||
553 | c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr, | ||
554 | attr_mask); | ||
555 | |||
556 | return err; | ||
557 | } | ||
558 | |||
559 | static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
560 | { | ||
561 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
562 | return -ENOSYS; | ||
563 | } | ||
564 | |||
565 | static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
566 | { | ||
567 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
568 | return -ENOSYS; | ||
569 | } | ||
570 | |||
571 | static int c2_process_mad(struct ib_device *ibdev, | ||
572 | int mad_flags, | ||
573 | u8 port_num, | ||
574 | struct ib_wc *in_wc, | ||
575 | struct ib_grh *in_grh, | ||
576 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
577 | { | ||
578 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
579 | return -ENOSYS; | ||
580 | } | ||
581 | |||
582 | static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
583 | { | ||
584 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
585 | |||
586 | /* Request a connection */ | ||
587 | return c2_llp_connect(cm_id, iw_param); | ||
588 | } | ||
589 | |||
590 | static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
591 | { | ||
592 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
593 | |||
594 | /* Accept the new connection */ | ||
595 | return c2_llp_accept(cm_id, iw_param); | ||
596 | } | ||
597 | |||
598 | static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | ||
599 | { | ||
600 | int err; | ||
601 | |||
602 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
603 | |||
604 | err = c2_llp_reject(cm_id, pdata, pdata_len); | ||
605 | return err; | ||
606 | } | ||
607 | |||
608 | static int c2_service_create(struct iw_cm_id *cm_id, int backlog) | ||
609 | { | ||
610 | int err; | ||
611 | |||
612 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
613 | err = c2_llp_service_create(cm_id, backlog); | ||
614 | pr_debug("%s:%u err=%d\n", | ||
615 | __FUNCTION__, __LINE__, | ||
616 | err); | ||
617 | return err; | ||
618 | } | ||
619 | |||
620 | static int c2_service_destroy(struct iw_cm_id *cm_id) | ||
621 | { | ||
622 | int err; | ||
623 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
624 | |||
625 | err = c2_llp_service_destroy(cm_id); | ||
626 | |||
627 | return err; | ||
628 | } | ||
629 | |||
630 | static int c2_pseudo_up(struct net_device *netdev) | ||
631 | { | ||
632 | struct in_device *ind; | ||
633 | struct c2_dev *c2dev = netdev->priv; | ||
634 | |||
635 | ind = in_dev_get(netdev); | ||
636 | if (!ind) | ||
637 | return 0; | ||
638 | |||
639 | pr_debug("adding...\n"); | ||
640 | for_ifa(ind) { | ||
641 | #ifdef DEBUG | ||
642 | u8 *ip = (u8 *) & ifa->ifa_address; | ||
643 | |||
644 | pr_debug("%s: %d.%d.%d.%d\n", | ||
645 | ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); | ||
646 | #endif | ||
647 | c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); | ||
648 | } | ||
649 | endfor_ifa(ind); | ||
650 | in_dev_put(ind); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int c2_pseudo_down(struct net_device *netdev) | ||
656 | { | ||
657 | struct in_device *ind; | ||
658 | struct c2_dev *c2dev = netdev->priv; | ||
659 | |||
660 | ind = in_dev_get(netdev); | ||
661 | if (!ind) | ||
662 | return 0; | ||
663 | |||
664 | pr_debug("deleting...\n"); | ||
665 | for_ifa(ind) { | ||
666 | #ifdef DEBUG | ||
667 | u8 *ip = (u8 *) & ifa->ifa_address; | ||
668 | |||
669 | pr_debug("%s: %d.%d.%d.%d\n", | ||
670 | ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); | ||
671 | #endif | ||
672 | c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); | ||
673 | } | ||
674 | endfor_ifa(ind); | ||
675 | in_dev_put(ind); | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
681 | { | ||
682 | kfree_skb(skb); | ||
683 | return NETDEV_TX_OK; | ||
684 | } | ||
685 | |||
686 | static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu) | ||
687 | { | ||
688 | int ret = 0; | ||
689 | |||
690 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | ||
691 | return -EINVAL; | ||
692 | |||
693 | netdev->mtu = new_mtu; | ||
694 | |||
695 | /* TODO: Tell rnic about new rmda interface mtu */ | ||
696 | return ret; | ||
697 | } | ||
698 | |||
699 | static void setup(struct net_device *netdev) | ||
700 | { | ||
701 | SET_MODULE_OWNER(netdev); | ||
702 | netdev->open = c2_pseudo_up; | ||
703 | netdev->stop = c2_pseudo_down; | ||
704 | netdev->hard_start_xmit = c2_pseudo_xmit_frame; | ||
705 | netdev->get_stats = NULL; | ||
706 | netdev->tx_timeout = NULL; | ||
707 | netdev->set_mac_address = NULL; | ||
708 | netdev->change_mtu = c2_pseudo_change_mtu; | ||
709 | netdev->watchdog_timeo = 0; | ||
710 | netdev->type = ARPHRD_ETHER; | ||
711 | netdev->mtu = 1500; | ||
712 | netdev->hard_header_len = ETH_HLEN; | ||
713 | netdev->addr_len = ETH_ALEN; | ||
714 | netdev->tx_queue_len = 0; | ||
715 | netdev->flags |= IFF_NOARP; | ||
716 | return; | ||
717 | } | ||
718 | |||
719 | static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) | ||
720 | { | ||
721 | char name[IFNAMSIZ]; | ||
722 | struct net_device *netdev; | ||
723 | |||
724 | /* change ethxxx to iwxxx */ | ||
725 | strcpy(name, "iw"); | ||
726 | strcat(name, &c2dev->netdev->name[3]); | ||
727 | netdev = alloc_netdev(sizeof(*netdev), name, setup); | ||
728 | if (!netdev) { | ||
729 | printk(KERN_ERR PFX "%s - etherdev alloc failed", | ||
730 | __FUNCTION__); | ||
731 | return NULL; | ||
732 | } | ||
733 | |||
734 | netdev->priv = c2dev; | ||
735 | |||
736 | SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); | ||
737 | |||
738 | memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); | ||
739 | |||
740 | /* Print out the MAC address */ | ||
741 | pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n", | ||
742 | netdev->name, | ||
743 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
744 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | ||
745 | |||
746 | #if 0 | ||
747 | /* Disable network packets */ | ||
748 | netif_stop_queue(netdev); | ||
749 | #endif | ||
750 | return netdev; | ||
751 | } | ||
752 | |||
753 | int c2_register_device(struct c2_dev *dev) | ||
754 | { | ||
755 | int ret; | ||
756 | int i; | ||
757 | |||
758 | /* Register pseudo network device */ | ||
759 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); | ||
760 | if (dev->pseudo_netdev) { | ||
761 | ret = register_netdev(dev->pseudo_netdev); | ||
762 | if (ret) { | ||
763 | printk(KERN_ERR PFX | ||
764 | "Unable to register netdev, ret = %d\n", ret); | ||
765 | free_netdev(dev->pseudo_netdev); | ||
766 | return ret; | ||
767 | } | ||
768 | } | ||
769 | |||
770 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
771 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); | ||
772 | dev->ibdev.owner = THIS_MODULE; | ||
773 | dev->ibdev.uverbs_cmd_mask = | ||
774 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
775 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
776 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
777 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
778 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
779 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
780 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
781 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
782 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
783 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
784 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | ||
785 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
786 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
787 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | ||
788 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
789 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | | ||
790 | (1ull << IB_USER_VERBS_CMD_POST_RECV); | ||
791 | |||
792 | dev->ibdev.node_type = RDMA_NODE_RNIC; | ||
793 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | ||
794 | memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); | ||
795 | dev->ibdev.phys_port_cnt = 1; | ||
796 | dev->ibdev.dma_device = &dev->pcidev->dev; | ||
797 | dev->ibdev.class_dev.dev = &dev->pcidev->dev; | ||
798 | dev->ibdev.query_device = c2_query_device; | ||
799 | dev->ibdev.query_port = c2_query_port; | ||
800 | dev->ibdev.modify_port = c2_modify_port; | ||
801 | dev->ibdev.query_pkey = c2_query_pkey; | ||
802 | dev->ibdev.query_gid = c2_query_gid; | ||
803 | dev->ibdev.alloc_ucontext = c2_alloc_ucontext; | ||
804 | dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext; | ||
805 | dev->ibdev.mmap = c2_mmap_uar; | ||
806 | dev->ibdev.alloc_pd = c2_alloc_pd; | ||
807 | dev->ibdev.dealloc_pd = c2_dealloc_pd; | ||
808 | dev->ibdev.create_ah = c2_ah_create; | ||
809 | dev->ibdev.destroy_ah = c2_ah_destroy; | ||
810 | dev->ibdev.create_qp = c2_create_qp; | ||
811 | dev->ibdev.modify_qp = c2_modify_qp; | ||
812 | dev->ibdev.destroy_qp = c2_destroy_qp; | ||
813 | dev->ibdev.create_cq = c2_create_cq; | ||
814 | dev->ibdev.destroy_cq = c2_destroy_cq; | ||
815 | dev->ibdev.poll_cq = c2_poll_cq; | ||
816 | dev->ibdev.get_dma_mr = c2_get_dma_mr; | ||
817 | dev->ibdev.reg_phys_mr = c2_reg_phys_mr; | ||
818 | dev->ibdev.reg_user_mr = c2_reg_user_mr; | ||
819 | dev->ibdev.dereg_mr = c2_dereg_mr; | ||
820 | |||
821 | dev->ibdev.alloc_fmr = NULL; | ||
822 | dev->ibdev.unmap_fmr = NULL; | ||
823 | dev->ibdev.dealloc_fmr = NULL; | ||
824 | dev->ibdev.map_phys_fmr = NULL; | ||
825 | |||
826 | dev->ibdev.attach_mcast = c2_multicast_attach; | ||
827 | dev->ibdev.detach_mcast = c2_multicast_detach; | ||
828 | dev->ibdev.process_mad = c2_process_mad; | ||
829 | |||
830 | dev->ibdev.req_notify_cq = c2_arm_cq; | ||
831 | dev->ibdev.post_send = c2_post_send; | ||
832 | dev->ibdev.post_recv = c2_post_receive; | ||
833 | |||
834 | dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); | ||
835 | dev->ibdev.iwcm->add_ref = c2_add_ref; | ||
836 | dev->ibdev.iwcm->rem_ref = c2_rem_ref; | ||
837 | dev->ibdev.iwcm->get_qp = c2_get_qp; | ||
838 | dev->ibdev.iwcm->connect = c2_connect; | ||
839 | dev->ibdev.iwcm->accept = c2_accept; | ||
840 | dev->ibdev.iwcm->reject = c2_reject; | ||
841 | dev->ibdev.iwcm->create_listen = c2_service_create; | ||
842 | dev->ibdev.iwcm->destroy_listen = c2_service_destroy; | ||
843 | |||
844 | ret = ib_register_device(&dev->ibdev); | ||
845 | if (ret) | ||
846 | return ret; | ||
847 | |||
848 | for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { | ||
849 | ret = class_device_create_file(&dev->ibdev.class_dev, | ||
850 | c2_class_attributes[i]); | ||
851 | if (ret) { | ||
852 | unregister_netdev(dev->pseudo_netdev); | ||
853 | free_netdev(dev->pseudo_netdev); | ||
854 | ib_unregister_device(&dev->ibdev); | ||
855 | return ret; | ||
856 | } | ||
857 | } | ||
858 | |||
859 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | void c2_unregister_device(struct c2_dev *dev) | ||
864 | { | ||
865 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | ||
866 | unregister_netdev(dev->pseudo_netdev); | ||
867 | free_netdev(dev->pseudo_netdev); | ||
868 | ib_unregister_device(&dev->ibdev); | ||
869 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h new file mode 100644 index 000000000000..fc906223220f --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_provider.h | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #ifndef C2_PROVIDER_H | ||
36 | #define C2_PROVIDER_H | ||
37 | #include <linux/inetdevice.h> | ||
38 | |||
39 | #include <rdma/ib_verbs.h> | ||
40 | #include <rdma/ib_pack.h> | ||
41 | |||
42 | #include "c2_mq.h" | ||
43 | #include <rdma/iw_cm.h> | ||
44 | |||
45 | #define C2_MPT_FLAG_ATOMIC (1 << 14) | ||
46 | #define C2_MPT_FLAG_REMOTE_WRITE (1 << 13) | ||
47 | #define C2_MPT_FLAG_REMOTE_READ (1 << 12) | ||
48 | #define C2_MPT_FLAG_LOCAL_WRITE (1 << 11) | ||
49 | #define C2_MPT_FLAG_LOCAL_READ (1 << 10) | ||
50 | |||
51 | struct c2_buf_list { | ||
52 | void *buf; | ||
53 | DECLARE_PCI_UNMAP_ADDR(mapping) | ||
54 | }; | ||
55 | |||
56 | |||
57 | /* The user context keeps track of objects allocated for a | ||
58 | * particular user-mode client. */ | ||
59 | struct c2_ucontext { | ||
60 | struct ib_ucontext ibucontext; | ||
61 | }; | ||
62 | |||
63 | struct c2_mtt; | ||
64 | |||
65 | /* All objects associated with a PD are kept in the | ||
66 | * associated user context if present. | ||
67 | */ | ||
68 | struct c2_pd { | ||
69 | struct ib_pd ibpd; | ||
70 | u32 pd_id; | ||
71 | }; | ||
72 | |||
73 | struct c2_mr { | ||
74 | struct ib_mr ibmr; | ||
75 | struct c2_pd *pd; | ||
76 | }; | ||
77 | |||
78 | struct c2_av; | ||
79 | |||
80 | enum c2_ah_type { | ||
81 | C2_AH_ON_HCA, | ||
82 | C2_AH_PCI_POOL, | ||
83 | C2_AH_KMALLOC | ||
84 | }; | ||
85 | |||
86 | struct c2_ah { | ||
87 | struct ib_ah ibah; | ||
88 | }; | ||
89 | |||
90 | struct c2_cq { | ||
91 | struct ib_cq ibcq; | ||
92 | spinlock_t lock; | ||
93 | atomic_t refcount; | ||
94 | int cqn; | ||
95 | int is_kernel; | ||
96 | wait_queue_head_t wait; | ||
97 | |||
98 | u32 adapter_handle; | ||
99 | struct c2_mq mq; | ||
100 | }; | ||
101 | |||
102 | struct c2_wq { | ||
103 | spinlock_t lock; | ||
104 | }; | ||
105 | struct iw_cm_id; | ||
106 | struct c2_qp { | ||
107 | struct ib_qp ibqp; | ||
108 | struct iw_cm_id *cm_id; | ||
109 | spinlock_t lock; | ||
110 | atomic_t refcount; | ||
111 | wait_queue_head_t wait; | ||
112 | int qpn; | ||
113 | |||
114 | u32 adapter_handle; | ||
115 | u32 send_sgl_depth; | ||
116 | u32 recv_sgl_depth; | ||
117 | u32 rdma_write_sgl_depth; | ||
118 | u8 state; | ||
119 | |||
120 | struct c2_mq sq_mq; | ||
121 | struct c2_mq rq_mq; | ||
122 | }; | ||
123 | |||
124 | struct c2_cr_query_attrs { | ||
125 | u32 local_addr; | ||
126 | u32 remote_addr; | ||
127 | u16 local_port; | ||
128 | u16 remote_port; | ||
129 | }; | ||
130 | |||
131 | static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd) | ||
132 | { | ||
133 | return container_of(ibpd, struct c2_pd, ibpd); | ||
134 | } | ||
135 | |||
136 | static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext) | ||
137 | { | ||
138 | return container_of(ibucontext, struct c2_ucontext, ibucontext); | ||
139 | } | ||
140 | |||
141 | static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr) | ||
142 | { | ||
143 | return container_of(ibmr, struct c2_mr, ibmr); | ||
144 | } | ||
145 | |||
146 | |||
147 | static inline struct c2_ah *to_c2ah(struct ib_ah *ibah) | ||
148 | { | ||
149 | return container_of(ibah, struct c2_ah, ibah); | ||
150 | } | ||
151 | |||
152 | static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq) | ||
153 | { | ||
154 | return container_of(ibcq, struct c2_cq, ibcq); | ||
155 | } | ||
156 | |||
157 | static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp) | ||
158 | { | ||
159 | return container_of(ibqp, struct c2_qp, ibqp); | ||
160 | } | ||
161 | |||
162 | static inline int is_rnic_addr(struct net_device *netdev, u32 addr) | ||
163 | { | ||
164 | struct in_device *ind; | ||
165 | int ret = 0; | ||
166 | |||
167 | ind = in_dev_get(netdev); | ||
168 | if (!ind) | ||
169 | return 0; | ||
170 | |||
171 | for_ifa(ind) { | ||
172 | if (ifa->ifa_address == addr) { | ||
173 | ret = 1; | ||
174 | break; | ||
175 | } | ||
176 | } | ||
177 | endfor_ifa(ind); | ||
178 | in_dev_put(ind); | ||
179 | return ret; | ||
180 | } | ||
181 | #endif /* C2_PROVIDER_H */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c new file mode 100644 index 000000000000..12261132b077 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_qp.c | |||
@@ -0,0 +1,975 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | ||
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | ||
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | ||
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
7 | * | ||
8 | * This software is available to you under a choice of one of two | ||
9 | * licenses. You may choose to be licensed under the terms of the GNU | ||
10 | * General Public License (GPL) Version 2, available from the file | ||
11 | * COPYING in the main directory of this source tree, or the | ||
12 | * OpenIB.org BSD license below: | ||
13 | * | ||
14 | * Redistribution and use in source and binary forms, with or | ||
15 | * without modification, are permitted provided that the following | ||
16 | * conditions are met: | ||
17 | * | ||
18 | * - Redistributions of source code must retain the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer. | ||
21 | * | ||
22 | * - Redistributions in binary form must reproduce the above | ||
23 | * copyright notice, this list of conditions and the following | ||
24 | * disclaimer in the documentation and/or other materials | ||
25 | * provided with the distribution. | ||
26 | * | ||
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
34 | * SOFTWARE. | ||
35 | * | ||
36 | */ | ||
37 | |||
38 | #include "c2.h" | ||
39 | #include "c2_vq.h" | ||
40 | #include "c2_status.h" | ||
41 | |||
42 | #define C2_MAX_ORD_PER_QP 128 | ||
43 | #define C2_MAX_IRD_PER_QP 128 | ||
44 | |||
45 | #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count) | ||
46 | #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16) | ||
47 | #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF) | ||
48 | |||
49 | #define NO_SUPPORT -1 | ||
50 | static const u8 c2_opcode[] = { | ||
51 | [IB_WR_SEND] = C2_WR_TYPE_SEND, | ||
52 | [IB_WR_SEND_WITH_IMM] = NO_SUPPORT, | ||
53 | [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE, | ||
54 | [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT, | ||
55 | [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ, | ||
56 | [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT, | ||
57 | [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT, | ||
58 | }; | ||
59 | |||
60 | static int to_c2_state(enum ib_qp_state ib_state) | ||
61 | { | ||
62 | switch (ib_state) { | ||
63 | case IB_QPS_RESET: | ||
64 | return C2_QP_STATE_IDLE; | ||
65 | case IB_QPS_RTS: | ||
66 | return C2_QP_STATE_RTS; | ||
67 | case IB_QPS_SQD: | ||
68 | return C2_QP_STATE_CLOSING; | ||
69 | case IB_QPS_SQE: | ||
70 | return C2_QP_STATE_CLOSING; | ||
71 | case IB_QPS_ERR: | ||
72 | return C2_QP_STATE_ERROR; | ||
73 | default: | ||
74 | return -1; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | static int to_ib_state(enum c2_qp_state c2_state) | ||
79 | { | ||
80 | switch (c2_state) { | ||
81 | case C2_QP_STATE_IDLE: | ||
82 | return IB_QPS_RESET; | ||
83 | case C2_QP_STATE_CONNECTING: | ||
84 | return IB_QPS_RTR; | ||
85 | case C2_QP_STATE_RTS: | ||
86 | return IB_QPS_RTS; | ||
87 | case C2_QP_STATE_CLOSING: | ||
88 | return IB_QPS_SQD; | ||
89 | case C2_QP_STATE_ERROR: | ||
90 | return IB_QPS_ERR; | ||
91 | case C2_QP_STATE_TERMINATE: | ||
92 | return IB_QPS_SQE; | ||
93 | default: | ||
94 | return -1; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static const char *to_ib_state_str(int ib_state) | ||
99 | { | ||
100 | static const char *state_str[] = { | ||
101 | "IB_QPS_RESET", | ||
102 | "IB_QPS_INIT", | ||
103 | "IB_QPS_RTR", | ||
104 | "IB_QPS_RTS", | ||
105 | "IB_QPS_SQD", | ||
106 | "IB_QPS_SQE", | ||
107 | "IB_QPS_ERR" | ||
108 | }; | ||
109 | if (ib_state < IB_QPS_RESET || | ||
110 | ib_state > IB_QPS_ERR) | ||
111 | return "<invalid IB QP state>"; | ||
112 | |||
113 | ib_state -= IB_QPS_RESET; | ||
114 | return state_str[ib_state]; | ||
115 | } | ||
116 | |||
117 | void c2_set_qp_state(struct c2_qp *qp, int c2_state) | ||
118 | { | ||
119 | int new_state = to_ib_state(c2_state); | ||
120 | |||
121 | pr_debug("%s: qp[%p] state modify %s --> %s\n", | ||
122 | __FUNCTION__, | ||
123 | qp, | ||
124 | to_ib_state_str(qp->state), | ||
125 | to_ib_state_str(new_state)); | ||
126 | qp->state = new_state; | ||
127 | } | ||
128 | |||
129 | #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF | ||
130 | |||
131 | int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, | ||
132 | struct ib_qp_attr *attr, int attr_mask) | ||
133 | { | ||
134 | struct c2wr_qp_modify_req wr; | ||
135 | struct c2wr_qp_modify_rep *reply; | ||
136 | struct c2_vq_req *vq_req; | ||
137 | unsigned long flags; | ||
138 | u8 next_state; | ||
139 | int err; | ||
140 | |||
141 | pr_debug("%s:%d qp=%p, %s --> %s\n", | ||
142 | __FUNCTION__, __LINE__, | ||
143 | qp, | ||
144 | to_ib_state_str(qp->state), | ||
145 | to_ib_state_str(attr->qp_state)); | ||
146 | |||
147 | vq_req = vq_req_alloc(c2dev); | ||
148 | if (!vq_req) | ||
149 | return -ENOMEM; | ||
150 | |||
151 | c2_wr_set_id(&wr, CCWR_QP_MODIFY); | ||
152 | wr.hdr.context = (unsigned long) vq_req; | ||
153 | wr.rnic_handle = c2dev->adapter_handle; | ||
154 | wr.qp_handle = qp->adapter_handle; | ||
155 | wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
156 | wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
157 | wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
158 | wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
159 | |||
160 | if (attr_mask & IB_QP_STATE) { | ||
161 | /* Ensure the state is valid */ | ||
162 | if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) | ||
163 | return -EINVAL; | ||
164 | |||
165 | wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); | ||
166 | |||
167 | if (attr->qp_state == IB_QPS_ERR) { | ||
168 | spin_lock_irqsave(&qp->lock, flags); | ||
169 | if (qp->cm_id && qp->state == IB_QPS_RTS) { | ||
170 | pr_debug("Generating CLOSE event for QP-->ERR, " | ||
171 | "qp=%p, cm_id=%p\n",qp,qp->cm_id); | ||
172 | /* Generate an CLOSE event */ | ||
173 | vq_req->cm_id = qp->cm_id; | ||
174 | vq_req->event = IW_CM_EVENT_CLOSE; | ||
175 | } | ||
176 | spin_unlock_irqrestore(&qp->lock, flags); | ||
177 | } | ||
178 | next_state = attr->qp_state; | ||
179 | |||
180 | } else if (attr_mask & IB_QP_CUR_STATE) { | ||
181 | |||
182 | if (attr->cur_qp_state != IB_QPS_RTR && | ||
183 | attr->cur_qp_state != IB_QPS_RTS && | ||
184 | attr->cur_qp_state != IB_QPS_SQD && | ||
185 | attr->cur_qp_state != IB_QPS_SQE) | ||
186 | return -EINVAL; | ||
187 | else | ||
188 | wr.next_qp_state = | ||
189 | cpu_to_be32(to_c2_state(attr->cur_qp_state)); | ||
190 | |||
191 | next_state = attr->cur_qp_state; | ||
192 | |||
193 | } else { | ||
194 | err = 0; | ||
195 | goto bail0; | ||
196 | } | ||
197 | |||
198 | /* reference the request struct */ | ||
199 | vq_req_get(c2dev, vq_req); | ||
200 | |||
201 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
202 | if (err) { | ||
203 | vq_req_put(c2dev, vq_req); | ||
204 | goto bail0; | ||
205 | } | ||
206 | |||
207 | err = vq_wait_for_reply(c2dev, vq_req); | ||
208 | if (err) | ||
209 | goto bail0; | ||
210 | |||
211 | reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg; | ||
212 | if (!reply) { | ||
213 | err = -ENOMEM; | ||
214 | goto bail0; | ||
215 | } | ||
216 | |||
217 | err = c2_errno(reply); | ||
218 | if (!err) | ||
219 | qp->state = next_state; | ||
220 | #ifdef DEBUG | ||
221 | else | ||
222 | pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err); | ||
223 | #endif | ||
224 | /* | ||
225 | * If we're going to error and generating the event here, then | ||
226 | * we need to remove the reference because there will be no | ||
227 | * close event generated by the adapter | ||
228 | */ | ||
229 | spin_lock_irqsave(&qp->lock, flags); | ||
230 | if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { | ||
231 | qp->cm_id->rem_ref(qp->cm_id); | ||
232 | qp->cm_id = NULL; | ||
233 | } | ||
234 | spin_unlock_irqrestore(&qp->lock, flags); | ||
235 | |||
236 | vq_repbuf_free(c2dev, reply); | ||
237 | bail0: | ||
238 | vq_req_free(c2dev, vq_req); | ||
239 | |||
240 | pr_debug("%s:%d qp=%p, cur_state=%s\n", | ||
241 | __FUNCTION__, __LINE__, | ||
242 | qp, | ||
243 | to_ib_state_str(qp->state)); | ||
244 | return err; | ||
245 | } | ||
246 | |||
247 | int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, | ||
248 | int ord, int ird) | ||
249 | { | ||
250 | struct c2wr_qp_modify_req wr; | ||
251 | struct c2wr_qp_modify_rep *reply; | ||
252 | struct c2_vq_req *vq_req; | ||
253 | int err; | ||
254 | |||
255 | vq_req = vq_req_alloc(c2dev); | ||
256 | if (!vq_req) | ||
257 | return -ENOMEM; | ||
258 | |||
259 | c2_wr_set_id(&wr, CCWR_QP_MODIFY); | ||
260 | wr.hdr.context = (unsigned long) vq_req; | ||
261 | wr.rnic_handle = c2dev->adapter_handle; | ||
262 | wr.qp_handle = qp->adapter_handle; | ||
263 | wr.ord = cpu_to_be32(ord); | ||
264 | wr.ird = cpu_to_be32(ird); | ||
265 | wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
266 | wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
267 | wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); | ||
268 | |||
269 | /* reference the request struct */ | ||
270 | vq_req_get(c2dev, vq_req); | ||
271 | |||
272 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
273 | if (err) { | ||
274 | vq_req_put(c2dev, vq_req); | ||
275 | goto bail0; | ||
276 | } | ||
277 | |||
278 | err = vq_wait_for_reply(c2dev, vq_req); | ||
279 | if (err) | ||
280 | goto bail0; | ||
281 | |||
282 | reply = (struct c2wr_qp_modify_rep *) (unsigned long) | ||
283 | vq_req->reply_msg; | ||
284 | if (!reply) { | ||
285 | err = -ENOMEM; | ||
286 | goto bail0; | ||
287 | } | ||
288 | |||
289 | err = c2_errno(reply); | ||
290 | vq_repbuf_free(c2dev, reply); | ||
291 | bail0: | ||
292 | vq_req_free(c2dev, vq_req); | ||
293 | return err; | ||
294 | } | ||
295 | |||
296 | static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) | ||
297 | { | ||
298 | struct c2_vq_req *vq_req; | ||
299 | struct c2wr_qp_destroy_req wr; | ||
300 | struct c2wr_qp_destroy_rep *reply; | ||
301 | unsigned long flags; | ||
302 | int err; | ||
303 | |||
304 | /* | ||
305 | * Allocate a verb request message | ||
306 | */ | ||
307 | vq_req = vq_req_alloc(c2dev); | ||
308 | if (!vq_req) { | ||
309 | return -ENOMEM; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Initialize the WR | ||
314 | */ | ||
315 | c2_wr_set_id(&wr, CCWR_QP_DESTROY); | ||
316 | wr.hdr.context = (unsigned long) vq_req; | ||
317 | wr.rnic_handle = c2dev->adapter_handle; | ||
318 | wr.qp_handle = qp->adapter_handle; | ||
319 | |||
320 | /* | ||
321 | * reference the request struct. dereferenced in the int handler. | ||
322 | */ | ||
323 | vq_req_get(c2dev, vq_req); | ||
324 | |||
325 | spin_lock_irqsave(&qp->lock, flags); | ||
326 | if (qp->cm_id && qp->state == IB_QPS_RTS) { | ||
327 | pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, " | ||
328 | "qp=%p, cm_id=%p\n",qp,qp->cm_id); | ||
329 | /* Generate an CLOSE event */ | ||
330 | vq_req->qp = qp; | ||
331 | vq_req->cm_id = qp->cm_id; | ||
332 | vq_req->event = IW_CM_EVENT_CLOSE; | ||
333 | } | ||
334 | spin_unlock_irqrestore(&qp->lock, flags); | ||
335 | |||
336 | /* | ||
337 | * Send WR to adapter | ||
338 | */ | ||
339 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
340 | if (err) { | ||
341 | vq_req_put(c2dev, vq_req); | ||
342 | goto bail0; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Wait for reply from adapter | ||
347 | */ | ||
348 | err = vq_wait_for_reply(c2dev, vq_req); | ||
349 | if (err) { | ||
350 | goto bail0; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Process reply | ||
355 | */ | ||
356 | reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg); | ||
357 | if (!reply) { | ||
358 | err = -ENOMEM; | ||
359 | goto bail0; | ||
360 | } | ||
361 | |||
362 | spin_lock_irqsave(&qp->lock, flags); | ||
363 | if (qp->cm_id) { | ||
364 | qp->cm_id->rem_ref(qp->cm_id); | ||
365 | qp->cm_id = NULL; | ||
366 | } | ||
367 | spin_unlock_irqrestore(&qp->lock, flags); | ||
368 | |||
369 | vq_repbuf_free(c2dev, reply); | ||
370 | bail0: | ||
371 | vq_req_free(c2dev, vq_req); | ||
372 | return err; | ||
373 | } | ||
374 | |||
375 | static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) | ||
376 | { | ||
377 | int ret; | ||
378 | |||
379 | do { | ||
380 | spin_lock_irq(&c2dev->qp_table.lock); | ||
381 | ret = idr_get_new_above(&c2dev->qp_table.idr, qp, | ||
382 | c2dev->qp_table.last++, &qp->qpn); | ||
383 | spin_unlock_irq(&c2dev->qp_table.lock); | ||
384 | } while ((ret == -EAGAIN) && | ||
385 | idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL)); | ||
386 | return ret; | ||
387 | } | ||
388 | |||
389 | static void c2_free_qpn(struct c2_dev *c2dev, int qpn) | ||
390 | { | ||
391 | spin_lock_irq(&c2dev->qp_table.lock); | ||
392 | idr_remove(&c2dev->qp_table.idr, qpn); | ||
393 | spin_unlock_irq(&c2dev->qp_table.lock); | ||
394 | } | ||
395 | |||
396 | struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) | ||
397 | { | ||
398 | unsigned long flags; | ||
399 | struct c2_qp *qp; | ||
400 | |||
401 | spin_lock_irqsave(&c2dev->qp_table.lock, flags); | ||
402 | qp = idr_find(&c2dev->qp_table.idr, qpn); | ||
403 | spin_unlock_irqrestore(&c2dev->qp_table.lock, flags); | ||
404 | return qp; | ||
405 | } | ||
406 | |||
407 | int c2_alloc_qp(struct c2_dev *c2dev, | ||
408 | struct c2_pd *pd, | ||
409 | struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) | ||
410 | { | ||
411 | struct c2wr_qp_create_req wr; | ||
412 | struct c2wr_qp_create_rep *reply; | ||
413 | struct c2_vq_req *vq_req; | ||
414 | struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); | ||
415 | struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq); | ||
416 | unsigned long peer_pa; | ||
417 | u32 q_size, msg_size, mmap_size; | ||
418 | void __iomem *mmap; | ||
419 | int err; | ||
420 | |||
421 | err = c2_alloc_qpn(c2dev, qp); | ||
422 | if (err) | ||
423 | return err; | ||
424 | qp->ibqp.qp_num = qp->qpn; | ||
425 | qp->ibqp.qp_type = IB_QPT_RC; | ||
426 | |||
427 | /* Allocate the SQ and RQ shared pointers */ | ||
428 | qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
429 | &qp->sq_mq.shared_dma, GFP_KERNEL); | ||
430 | if (!qp->sq_mq.shared) { | ||
431 | err = -ENOMEM; | ||
432 | goto bail0; | ||
433 | } | ||
434 | |||
435 | qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
436 | &qp->rq_mq.shared_dma, GFP_KERNEL); | ||
437 | if (!qp->rq_mq.shared) { | ||
438 | err = -ENOMEM; | ||
439 | goto bail1; | ||
440 | } | ||
441 | |||
442 | /* Allocate the verbs request */ | ||
443 | vq_req = vq_req_alloc(c2dev); | ||
444 | if (vq_req == NULL) { | ||
445 | err = -ENOMEM; | ||
446 | goto bail2; | ||
447 | } | ||
448 | |||
449 | /* Initialize the work request */ | ||
450 | memset(&wr, 0, sizeof(wr)); | ||
451 | c2_wr_set_id(&wr, CCWR_QP_CREATE); | ||
452 | wr.hdr.context = (unsigned long) vq_req; | ||
453 | wr.rnic_handle = c2dev->adapter_handle; | ||
454 | wr.sq_cq_handle = send_cq->adapter_handle; | ||
455 | wr.rq_cq_handle = recv_cq->adapter_handle; | ||
456 | wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1); | ||
457 | wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1); | ||
458 | wr.srq_handle = 0; | ||
459 | wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND | | ||
460 | QP_ZERO_STAG | QP_RDMA_READ_RESPONSE); | ||
461 | wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); | ||
462 | wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge); | ||
463 | wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); | ||
464 | wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); | ||
465 | wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); | ||
466 | wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP); | ||
467 | wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP); | ||
468 | wr.pd_id = pd->pd_id; | ||
469 | wr.user_context = (unsigned long) qp; | ||
470 | |||
471 | vq_req_get(c2dev, vq_req); | ||
472 | |||
473 | /* Send the WR to the adapter */ | ||
474 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
475 | if (err) { | ||
476 | vq_req_put(c2dev, vq_req); | ||
477 | goto bail3; | ||
478 | } | ||
479 | |||
480 | /* Wait for the verb reply */ | ||
481 | err = vq_wait_for_reply(c2dev, vq_req); | ||
482 | if (err) { | ||
483 | goto bail3; | ||
484 | } | ||
485 | |||
486 | /* Process the reply */ | ||
487 | reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg); | ||
488 | if (!reply) { | ||
489 | err = -ENOMEM; | ||
490 | goto bail3; | ||
491 | } | ||
492 | |||
493 | if ((err = c2_wr_get_result(reply)) != 0) { | ||
494 | goto bail4; | ||
495 | } | ||
496 | |||
497 | /* Fill in the kernel QP struct */ | ||
498 | atomic_set(&qp->refcount, 1); | ||
499 | qp->adapter_handle = reply->qp_handle; | ||
500 | qp->state = IB_QPS_RESET; | ||
501 | qp->send_sgl_depth = qp_attrs->cap.max_send_sge; | ||
502 | qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; | ||
503 | qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; | ||
504 | |||
505 | /* Initialize the SQ MQ */ | ||
506 | q_size = be32_to_cpu(reply->sq_depth); | ||
507 | msg_size = be32_to_cpu(reply->sq_msg_size); | ||
508 | peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start); | ||
509 | mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size); | ||
510 | mmap = ioremap_nocache(peer_pa, mmap_size); | ||
511 | if (!mmap) { | ||
512 | err = -ENOMEM; | ||
513 | goto bail5; | ||
514 | } | ||
515 | |||
516 | c2_mq_req_init(&qp->sq_mq, | ||
517 | be32_to_cpu(reply->sq_mq_index), | ||
518 | q_size, | ||
519 | msg_size, | ||
520 | mmap + sizeof(struct c2_mq_shared), /* pool start */ | ||
521 | mmap, /* peer */ | ||
522 | C2_MQ_ADAPTER_TARGET); | ||
523 | |||
524 | /* Initialize the RQ mq */ | ||
525 | q_size = be32_to_cpu(reply->rq_depth); | ||
526 | msg_size = be32_to_cpu(reply->rq_msg_size); | ||
527 | peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start); | ||
528 | mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size); | ||
529 | mmap = ioremap_nocache(peer_pa, mmap_size); | ||
530 | if (!mmap) { | ||
531 | err = -ENOMEM; | ||
532 | goto bail6; | ||
533 | } | ||
534 | |||
535 | c2_mq_req_init(&qp->rq_mq, | ||
536 | be32_to_cpu(reply->rq_mq_index), | ||
537 | q_size, | ||
538 | msg_size, | ||
539 | mmap + sizeof(struct c2_mq_shared), /* pool start */ | ||
540 | mmap, /* peer */ | ||
541 | C2_MQ_ADAPTER_TARGET); | ||
542 | |||
543 | vq_repbuf_free(c2dev, reply); | ||
544 | vq_req_free(c2dev, vq_req); | ||
545 | |||
546 | return 0; | ||
547 | |||
548 | bail6: | ||
549 | iounmap(qp->sq_mq.peer); | ||
550 | bail5: | ||
551 | destroy_qp(c2dev, qp); | ||
552 | bail4: | ||
553 | vq_repbuf_free(c2dev, reply); | ||
554 | bail3: | ||
555 | vq_req_free(c2dev, vq_req); | ||
556 | bail2: | ||
557 | c2_free_mqsp(qp->rq_mq.shared); | ||
558 | bail1: | ||
559 | c2_free_mqsp(qp->sq_mq.shared); | ||
560 | bail0: | ||
561 | c2_free_qpn(c2dev, qp->qpn); | ||
562 | return err; | ||
563 | } | ||
564 | |||
565 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | ||
566 | { | ||
567 | struct c2_cq *send_cq; | ||
568 | struct c2_cq *recv_cq; | ||
569 | |||
570 | send_cq = to_c2cq(qp->ibqp.send_cq); | ||
571 | recv_cq = to_c2cq(qp->ibqp.recv_cq); | ||
572 | |||
573 | /* | ||
574 | * Lock CQs here, so that CQ polling code can do QP lookup | ||
575 | * without taking a lock. | ||
576 | */ | ||
577 | spin_lock_irq(&send_cq->lock); | ||
578 | if (send_cq != recv_cq) | ||
579 | spin_lock(&recv_cq->lock); | ||
580 | |||
581 | c2_free_qpn(c2dev, qp->qpn); | ||
582 | |||
583 | if (send_cq != recv_cq) | ||
584 | spin_unlock(&recv_cq->lock); | ||
585 | spin_unlock_irq(&send_cq->lock); | ||
586 | |||
587 | /* | ||
588 | * Destory qp in the rnic... | ||
589 | */ | ||
590 | destroy_qp(c2dev, qp); | ||
591 | |||
592 | /* | ||
593 | * Mark any unreaped CQEs as null and void. | ||
594 | */ | ||
595 | c2_cq_clean(c2dev, qp, send_cq->cqn); | ||
596 | if (send_cq != recv_cq) | ||
597 | c2_cq_clean(c2dev, qp, recv_cq->cqn); | ||
598 | /* | ||
599 | * Unmap the MQs and return the shared pointers | ||
600 | * to the message pool. | ||
601 | */ | ||
602 | iounmap(qp->sq_mq.peer); | ||
603 | iounmap(qp->rq_mq.peer); | ||
604 | c2_free_mqsp(qp->sq_mq.shared); | ||
605 | c2_free_mqsp(qp->rq_mq.shared); | ||
606 | |||
607 | atomic_dec(&qp->refcount); | ||
608 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Function: move_sgl | ||
613 | * | ||
614 | * Description: | ||
615 | * Move an SGL from the user's work request struct into a CCIL Work Request | ||
616 | * message, swapping to WR byte order and ensure the total length doesn't | ||
617 | * overflow. | ||
618 | * | ||
619 | * IN: | ||
620 | * dst - ptr to CCIL Work Request message SGL memory. | ||
621 | * src - ptr to the consumers SGL memory. | ||
622 | * | ||
623 | * OUT: none | ||
624 | * | ||
625 | * Return: | ||
626 | * CCIL status codes. | ||
627 | */ | ||
628 | static int | ||
629 | move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len, | ||
630 | u8 * actual_count) | ||
631 | { | ||
632 | u32 tot = 0; /* running total */ | ||
633 | u8 acount = 0; /* running total non-0 len sge's */ | ||
634 | |||
635 | while (count > 0) { | ||
636 | /* | ||
637 | * If the addition of this SGE causes the | ||
638 | * total SGL length to exceed 2^32-1, then | ||
639 | * fail-n-bail. | ||
640 | * | ||
641 | * If the current total plus the next element length | ||
642 | * wraps, then it will go negative and be less than the | ||
643 | * current total... | ||
644 | */ | ||
645 | if ((tot + src->length) < tot) { | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | /* | ||
649 | * Bug: 1456 (as well as 1498 & 1643) | ||
650 | * Skip over any sge's supplied with len=0 | ||
651 | */ | ||
652 | if (src->length) { | ||
653 | tot += src->length; | ||
654 | dst->stag = cpu_to_be32(src->lkey); | ||
655 | dst->to = cpu_to_be64(src->addr); | ||
656 | dst->length = cpu_to_be32(src->length); | ||
657 | dst++; | ||
658 | acount++; | ||
659 | } | ||
660 | src++; | ||
661 | count--; | ||
662 | } | ||
663 | |||
664 | if (acount == 0) { | ||
665 | /* | ||
666 | * Bug: 1476 (as well as 1498, 1456 and 1643) | ||
667 | * Setup the SGL in the WR to make it easier for the RNIC. | ||
668 | * This way, the FW doesn't have to deal with special cases. | ||
669 | * Setting length=0 should be sufficient. | ||
670 | */ | ||
671 | dst->stag = 0; | ||
672 | dst->to = 0; | ||
673 | dst->length = 0; | ||
674 | } | ||
675 | |||
676 | *p_len = tot; | ||
677 | *actual_count = acount; | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | /* | ||
682 | * Function: c2_activity (private function) | ||
683 | * | ||
684 | * Description: | ||
685 | * Post an mq index to the host->adapter activity fifo. | ||
686 | * | ||
687 | * IN: | ||
688 | * c2dev - ptr to c2dev structure | ||
689 | * mq_index - mq index to post | ||
690 | * shared - value most recently written to shared | ||
691 | * | ||
692 | * OUT: | ||
693 | * | ||
694 | * Return: | ||
695 | * none | ||
696 | */ | ||
697 | static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared) | ||
698 | { | ||
699 | /* | ||
700 | * First read the register to see if the FIFO is full, and if so, | ||
701 | * spin until it's not. This isn't perfect -- there is no | ||
702 | * synchronization among the clients of the register, but in | ||
703 | * practice it prevents multiple CPU from hammering the bus | ||
704 | * with PCI RETRY. Note that when this does happen, the card | ||
705 | * cannot get on the bus and the card and system hang in a | ||
706 | * deadlock -- thus the need for this code. [TOT] | ||
707 | */ | ||
708 | while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) { | ||
709 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
710 | schedule_timeout(0); | ||
711 | } | ||
712 | |||
713 | __raw_writel(C2_HINT_MAKE(mq_index, shared), | ||
714 | c2dev->regs + PCI_BAR0_ADAPTER_HINT); | ||
715 | } | ||
716 | |||
717 | /* | ||
718 | * Function: qp_wr_post | ||
719 | * | ||
720 | * Description: | ||
721 | * This in-line function allocates a MQ msg, then moves the host-copy of | ||
722 | * the completed WR into msg. Then it posts the message. | ||
723 | * | ||
724 | * IN: | ||
725 | * q - ptr to user MQ. | ||
726 | * wr - ptr to host-copy of the WR. | ||
727 | * qp - ptr to user qp | ||
728 | * size - Number of bytes to post. Assumed to be divisible by 4. | ||
729 | * | ||
730 | * OUT: none | ||
731 | * | ||
732 | * Return: | ||
733 | * CCIL status codes. | ||
734 | */ | ||
735 | static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) | ||
736 | { | ||
737 | union c2wr *msg; | ||
738 | |||
739 | msg = c2_mq_alloc(q); | ||
740 | if (msg == NULL) { | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | #ifdef CCMSGMAGIC | ||
744 | ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC); | ||
745 | #endif | ||
746 | |||
747 | /* | ||
748 | * Since all header fields in the WR are the same as the | ||
749 | * CQE, set the following so the adapter need not. | ||
750 | */ | ||
751 | c2_wr_set_result(wr, CCERR_PENDING); | ||
752 | |||
753 | /* | ||
754 | * Copy the wr down to the adapter | ||
755 | */ | ||
756 | memcpy((void *) msg, (void *) wr, size); | ||
757 | |||
758 | c2_mq_produce(q); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | |||
763 | int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | ||
764 | struct ib_send_wr **bad_wr) | ||
765 | { | ||
766 | struct c2_dev *c2dev = to_c2dev(ibqp->device); | ||
767 | struct c2_qp *qp = to_c2qp(ibqp); | ||
768 | union c2wr wr; | ||
769 | int err = 0; | ||
770 | |||
771 | u32 flags; | ||
772 | u32 tot_len; | ||
773 | u8 actual_sge_count; | ||
774 | u32 msg_size; | ||
775 | |||
776 | if (qp->state > IB_QPS_RTS) | ||
777 | return -EINVAL; | ||
778 | |||
779 | while (ib_wr) { | ||
780 | |||
781 | flags = 0; | ||
782 | wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id; | ||
783 | if (ib_wr->send_flags & IB_SEND_SIGNALED) { | ||
784 | flags |= SQ_SIGNALED; | ||
785 | } | ||
786 | |||
787 | switch (ib_wr->opcode) { | ||
788 | case IB_WR_SEND: | ||
789 | if (ib_wr->send_flags & IB_SEND_SOLICITED) { | ||
790 | c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE); | ||
791 | msg_size = sizeof(struct c2wr_send_req); | ||
792 | } else { | ||
793 | c2_wr_set_id(&wr, C2_WR_TYPE_SEND); | ||
794 | msg_size = sizeof(struct c2wr_send_req); | ||
795 | } | ||
796 | |||
797 | wr.sqwr.send.remote_stag = 0; | ||
798 | msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge; | ||
799 | if (ib_wr->num_sge > qp->send_sgl_depth) { | ||
800 | err = -EINVAL; | ||
801 | break; | ||
802 | } | ||
803 | if (ib_wr->send_flags & IB_SEND_FENCE) { | ||
804 | flags |= SQ_READ_FENCE; | ||
805 | } | ||
806 | err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data), | ||
807 | ib_wr->sg_list, | ||
808 | ib_wr->num_sge, | ||
809 | &tot_len, &actual_sge_count); | ||
810 | wr.sqwr.send.sge_len = cpu_to_be32(tot_len); | ||
811 | c2_wr_set_sge_count(&wr, actual_sge_count); | ||
812 | break; | ||
813 | case IB_WR_RDMA_WRITE: | ||
814 | c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE); | ||
815 | msg_size = sizeof(struct c2wr_rdma_write_req) + | ||
816 | (sizeof(struct c2_data_addr) * ib_wr->num_sge); | ||
817 | if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { | ||
818 | err = -EINVAL; | ||
819 | break; | ||
820 | } | ||
821 | if (ib_wr->send_flags & IB_SEND_FENCE) { | ||
822 | flags |= SQ_READ_FENCE; | ||
823 | } | ||
824 | wr.sqwr.rdma_write.remote_stag = | ||
825 | cpu_to_be32(ib_wr->wr.rdma.rkey); | ||
826 | wr.sqwr.rdma_write.remote_to = | ||
827 | cpu_to_be64(ib_wr->wr.rdma.remote_addr); | ||
828 | err = move_sgl((struct c2_data_addr *) | ||
829 | & (wr.sqwr.rdma_write.data), | ||
830 | ib_wr->sg_list, | ||
831 | ib_wr->num_sge, | ||
832 | &tot_len, &actual_sge_count); | ||
833 | wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len); | ||
834 | c2_wr_set_sge_count(&wr, actual_sge_count); | ||
835 | break; | ||
836 | case IB_WR_RDMA_READ: | ||
837 | c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ); | ||
838 | msg_size = sizeof(struct c2wr_rdma_read_req); | ||
839 | |||
840 | /* IWarp only suppots 1 sge for RDMA reads */ | ||
841 | if (ib_wr->num_sge > 1) { | ||
842 | err = -EINVAL; | ||
843 | break; | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Move the local and remote stag/to/len into the WR. | ||
848 | */ | ||
849 | wr.sqwr.rdma_read.local_stag = | ||
850 | cpu_to_be32(ib_wr->sg_list->lkey); | ||
851 | wr.sqwr.rdma_read.local_to = | ||
852 | cpu_to_be64(ib_wr->sg_list->addr); | ||
853 | wr.sqwr.rdma_read.remote_stag = | ||
854 | cpu_to_be32(ib_wr->wr.rdma.rkey); | ||
855 | wr.sqwr.rdma_read.remote_to = | ||
856 | cpu_to_be64(ib_wr->wr.rdma.remote_addr); | ||
857 | wr.sqwr.rdma_read.length = | ||
858 | cpu_to_be32(ib_wr->sg_list->length); | ||
859 | break; | ||
860 | default: | ||
861 | /* error */ | ||
862 | msg_size = 0; | ||
863 | err = -EINVAL; | ||
864 | break; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * If we had an error on the last wr build, then | ||
869 | * break out. Possible errors include bogus WR | ||
870 | * type, and a bogus SGL length... | ||
871 | */ | ||
872 | if (err) { | ||
873 | break; | ||
874 | } | ||
875 | |||
876 | /* | ||
877 | * Store flags | ||
878 | */ | ||
879 | c2_wr_set_flags(&wr, flags); | ||
880 | |||
881 | /* | ||
882 | * Post the puppy! | ||
883 | */ | ||
884 | err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); | ||
885 | if (err) { | ||
886 | break; | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * Enqueue mq index to activity FIFO. | ||
891 | */ | ||
892 | c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); | ||
893 | |||
894 | ib_wr = ib_wr->next; | ||
895 | } | ||
896 | |||
897 | if (err) | ||
898 | *bad_wr = ib_wr; | ||
899 | return err; | ||
900 | } | ||
901 | |||
902 | int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | ||
903 | struct ib_recv_wr **bad_wr) | ||
904 | { | ||
905 | struct c2_dev *c2dev = to_c2dev(ibqp->device); | ||
906 | struct c2_qp *qp = to_c2qp(ibqp); | ||
907 | union c2wr wr; | ||
908 | int err = 0; | ||
909 | |||
910 | if (qp->state > IB_QPS_RTS) | ||
911 | return -EINVAL; | ||
912 | |||
913 | /* | ||
914 | * Try and post each work request | ||
915 | */ | ||
916 | while (ib_wr) { | ||
917 | u32 tot_len; | ||
918 | u8 actual_sge_count; | ||
919 | |||
920 | if (ib_wr->num_sge > qp->recv_sgl_depth) { | ||
921 | err = -EINVAL; | ||
922 | break; | ||
923 | } | ||
924 | |||
925 | /* | ||
926 | * Create local host-copy of the WR | ||
927 | */ | ||
928 | wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id; | ||
929 | c2_wr_set_id(&wr, CCWR_RECV); | ||
930 | c2_wr_set_flags(&wr, 0); | ||
931 | |||
932 | /* sge_count is limited to eight bits. */ | ||
933 | BUG_ON(ib_wr->num_sge >= 256); | ||
934 | err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data), | ||
935 | ib_wr->sg_list, | ||
936 | ib_wr->num_sge, &tot_len, &actual_sge_count); | ||
937 | c2_wr_set_sge_count(&wr, actual_sge_count); | ||
938 | |||
939 | /* | ||
940 | * If we had an error on the last wr build, then | ||
941 | * break out. Possible errors include bogus WR | ||
942 | * type, and a bogus SGL length... | ||
943 | */ | ||
944 | if (err) { | ||
945 | break; | ||
946 | } | ||
947 | |||
948 | err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); | ||
949 | if (err) { | ||
950 | break; | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Enqueue mq index to activity FIFO | ||
955 | */ | ||
956 | c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); | ||
957 | |||
958 | ib_wr = ib_wr->next; | ||
959 | } | ||
960 | |||
961 | if (err) | ||
962 | *bad_wr = ib_wr; | ||
963 | return err; | ||
964 | } | ||
965 | |||
966 | void __devinit c2_init_qp_table(struct c2_dev *c2dev) | ||
967 | { | ||
968 | spin_lock_init(&c2dev->qp_table.lock); | ||
969 | idr_init(&c2dev->qp_table.idr); | ||
970 | } | ||
971 | |||
972 | void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev) | ||
973 | { | ||
974 | idr_destroy(&c2dev->qp_table.idr); | ||
975 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c new file mode 100644 index 000000000000..1c3c9d65ecea --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -0,0 +1,663 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/moduleparam.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include <linux/etherdevice.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/ethtool.h> | ||
43 | #include <linux/mii.h> | ||
44 | #include <linux/if_vlan.h> | ||
45 | #include <linux/crc32.h> | ||
46 | #include <linux/in.h> | ||
47 | #include <linux/ip.h> | ||
48 | #include <linux/tcp.h> | ||
49 | #include <linux/init.h> | ||
50 | #include <linux/dma-mapping.h> | ||
51 | #include <linux/mm.h> | ||
52 | #include <linux/inet.h> | ||
53 | |||
54 | #include <linux/route.h> | ||
55 | |||
56 | #include <asm/io.h> | ||
57 | #include <asm/irq.h> | ||
58 | #include <asm/byteorder.h> | ||
59 | #include <rdma/ib_smi.h> | ||
60 | #include "c2.h" | ||
61 | #include "c2_vq.h" | ||
62 | |||
63 | /* Device capabilities */ | ||
64 | #define C2_MIN_PAGESIZE 1024 | ||
65 | |||
66 | #define C2_MAX_MRS 32768 | ||
67 | #define C2_MAX_QPS 16000 | ||
68 | #define C2_MAX_WQE_SZ 256 | ||
69 | #define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ) | ||
70 | #define C2_MAX_SGES 4 | ||
71 | #define C2_MAX_SGE_RD 1 | ||
72 | #define C2_MAX_CQS 32768 | ||
73 | #define C2_MAX_CQES 4096 | ||
74 | #define C2_MAX_PDS 16384 | ||
75 | |||
76 | /* | ||
77 | * Send the adapter INIT message to the amso1100 | ||
78 | */ | ||
79 | static int c2_adapter_init(struct c2_dev *c2dev) | ||
80 | { | ||
81 | struct c2wr_init_req wr; | ||
82 | int err; | ||
83 | |||
84 | memset(&wr, 0, sizeof(wr)); | ||
85 | c2_wr_set_id(&wr, CCWR_INIT); | ||
86 | wr.hdr.context = 0; | ||
87 | wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); | ||
88 | wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); | ||
89 | wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); | ||
90 | wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); | ||
91 | wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); | ||
92 | wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); | ||
93 | |||
94 | /* Post the init message */ | ||
95 | err = vq_send_wr(c2dev, (union c2wr *) & wr); | ||
96 | |||
97 | return err; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Send the adapter TERM message to the amso1100 | ||
102 | */ | ||
103 | static void c2_adapter_term(struct c2_dev *c2dev) | ||
104 | { | ||
105 | struct c2wr_init_req wr; | ||
106 | |||
107 | memset(&wr, 0, sizeof(wr)); | ||
108 | c2_wr_set_id(&wr, CCWR_TERM); | ||
109 | wr.hdr.context = 0; | ||
110 | |||
111 | /* Post the init message */ | ||
112 | vq_send_wr(c2dev, (union c2wr *) & wr); | ||
113 | c2dev->init = 0; | ||
114 | |||
115 | return; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Query the adapter | ||
120 | */ | ||
121 | static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) | ||
122 | { | ||
123 | struct c2_vq_req *vq_req; | ||
124 | struct c2wr_rnic_query_req wr; | ||
125 | struct c2wr_rnic_query_rep *reply; | ||
126 | int err; | ||
127 | |||
128 | vq_req = vq_req_alloc(c2dev); | ||
129 | if (!vq_req) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | c2_wr_set_id(&wr, CCWR_RNIC_QUERY); | ||
133 | wr.hdr.context = (unsigned long) vq_req; | ||
134 | wr.rnic_handle = c2dev->adapter_handle; | ||
135 | |||
136 | vq_req_get(c2dev, vq_req); | ||
137 | |||
138 | err = vq_send_wr(c2dev, (union c2wr *) &wr); | ||
139 | if (err) { | ||
140 | vq_req_put(c2dev, vq_req); | ||
141 | goto bail1; | ||
142 | } | ||
143 | |||
144 | err = vq_wait_for_reply(c2dev, vq_req); | ||
145 | if (err) | ||
146 | goto bail1; | ||
147 | |||
148 | reply = | ||
149 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); | ||
150 | if (!reply) | ||
151 | err = -ENOMEM; | ||
152 | |||
153 | err = c2_errno(reply); | ||
154 | if (err) | ||
155 | goto bail2; | ||
156 | |||
157 | props->fw_ver = | ||
158 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | | ||
159 | ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | | ||
160 | (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); | ||
161 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); | ||
162 | props->max_mr_size = 0xFFFFFFFF; | ||
163 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); | ||
164 | props->vendor_id = be32_to_cpu(reply->vendor_id); | ||
165 | props->vendor_part_id = be32_to_cpu(reply->part_number); | ||
166 | props->hw_ver = be32_to_cpu(reply->hw_version); | ||
167 | props->max_qp = be32_to_cpu(reply->max_qps); | ||
168 | props->max_qp_wr = be32_to_cpu(reply->max_qp_depth); | ||
169 | props->device_cap_flags = c2dev->device_cap_flags; | ||
170 | props->max_sge = C2_MAX_SGES; | ||
171 | props->max_sge_rd = C2_MAX_SGE_RD; | ||
172 | props->max_cq = be32_to_cpu(reply->max_cqs); | ||
173 | props->max_cqe = be32_to_cpu(reply->max_cq_depth); | ||
174 | props->max_mr = be32_to_cpu(reply->max_mrs); | ||
175 | props->max_pd = be32_to_cpu(reply->max_pds); | ||
176 | props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird); | ||
177 | props->max_ee_rd_atom = 0; | ||
178 | props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird); | ||
179 | props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord); | ||
180 | props->max_ee_init_rd_atom = 0; | ||
181 | props->atomic_cap = IB_ATOMIC_NONE; | ||
182 | props->max_ee = 0; | ||
183 | props->max_rdd = 0; | ||
184 | props->max_mw = be32_to_cpu(reply->max_mws); | ||
185 | props->max_raw_ipv6_qp = 0; | ||
186 | props->max_raw_ethy_qp = 0; | ||
187 | props->max_mcast_grp = 0; | ||
188 | props->max_mcast_qp_attach = 0; | ||
189 | props->max_total_mcast_qp_attach = 0; | ||
190 | props->max_ah = 0; | ||
191 | props->max_fmr = 0; | ||
192 | props->max_map_per_fmr = 0; | ||
193 | props->max_srq = 0; | ||
194 | props->max_srq_wr = 0; | ||
195 | props->max_srq_sge = 0; | ||
196 | props->max_pkeys = 0; | ||
197 | props->local_ca_ack_delay = 0; | ||
198 | |||
199 | bail2: | ||
200 | vq_repbuf_free(c2dev, reply); | ||
201 | |||
202 | bail1: | ||
203 | vq_req_free(c2dev, vq_req); | ||
204 | return err; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Add an IP address to the RNIC interface | ||
209 | */ | ||
210 | int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) | ||
211 | { | ||
212 | struct c2_vq_req *vq_req; | ||
213 | struct c2wr_rnic_setconfig_req *wr; | ||
214 | struct c2wr_rnic_setconfig_rep *reply; | ||
215 | struct c2_netaddr netaddr; | ||
216 | int err, len; | ||
217 | |||
218 | vq_req = vq_req_alloc(c2dev); | ||
219 | if (!vq_req) | ||
220 | return -ENOMEM; | ||
221 | |||
222 | len = sizeof(struct c2_netaddr); | ||
223 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
224 | if (!wr) { | ||
225 | err = -ENOMEM; | ||
226 | goto bail0; | ||
227 | } | ||
228 | |||
229 | c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); | ||
230 | wr->hdr.context = (unsigned long) vq_req; | ||
231 | wr->rnic_handle = c2dev->adapter_handle; | ||
232 | wr->option = cpu_to_be32(C2_CFG_ADD_ADDR); | ||
233 | |||
234 | netaddr.ip_addr = inaddr; | ||
235 | netaddr.netmask = inmask; | ||
236 | netaddr.mtu = 0; | ||
237 | |||
238 | memcpy(wr->data, &netaddr, len); | ||
239 | |||
240 | vq_req_get(c2dev, vq_req); | ||
241 | |||
242 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
243 | if (err) { | ||
244 | vq_req_put(c2dev, vq_req); | ||
245 | goto bail1; | ||
246 | } | ||
247 | |||
248 | err = vq_wait_for_reply(c2dev, vq_req); | ||
249 | if (err) | ||
250 | goto bail1; | ||
251 | |||
252 | reply = | ||
253 | (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); | ||
254 | if (!reply) { | ||
255 | err = -ENOMEM; | ||
256 | goto bail1; | ||
257 | } | ||
258 | |||
259 | err = c2_errno(reply); | ||
260 | vq_repbuf_free(c2dev, reply); | ||
261 | |||
262 | bail1: | ||
263 | kfree(wr); | ||
264 | bail0: | ||
265 | vq_req_free(c2dev, vq_req); | ||
266 | return err; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Delete an IP address from the RNIC interface | ||
271 | */ | ||
272 | int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) | ||
273 | { | ||
274 | struct c2_vq_req *vq_req; | ||
275 | struct c2wr_rnic_setconfig_req *wr; | ||
276 | struct c2wr_rnic_setconfig_rep *reply; | ||
277 | struct c2_netaddr netaddr; | ||
278 | int err, len; | ||
279 | |||
280 | vq_req = vq_req_alloc(c2dev); | ||
281 | if (!vq_req) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | len = sizeof(struct c2_netaddr); | ||
285 | wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); | ||
286 | if (!wr) { | ||
287 | err = -ENOMEM; | ||
288 | goto bail0; | ||
289 | } | ||
290 | |||
291 | c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); | ||
292 | wr->hdr.context = (unsigned long) vq_req; | ||
293 | wr->rnic_handle = c2dev->adapter_handle; | ||
294 | wr->option = cpu_to_be32(C2_CFG_DEL_ADDR); | ||
295 | |||
296 | netaddr.ip_addr = inaddr; | ||
297 | netaddr.netmask = inmask; | ||
298 | netaddr.mtu = 0; | ||
299 | |||
300 | memcpy(wr->data, &netaddr, len); | ||
301 | |||
302 | vq_req_get(c2dev, vq_req); | ||
303 | |||
304 | err = vq_send_wr(c2dev, (union c2wr *) wr); | ||
305 | if (err) { | ||
306 | vq_req_put(c2dev, vq_req); | ||
307 | goto bail1; | ||
308 | } | ||
309 | |||
310 | err = vq_wait_for_reply(c2dev, vq_req); | ||
311 | if (err) | ||
312 | goto bail1; | ||
313 | |||
314 | reply = | ||
315 | (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); | ||
316 | if (!reply) { | ||
317 | err = -ENOMEM; | ||
318 | goto bail1; | ||
319 | } | ||
320 | |||
321 | err = c2_errno(reply); | ||
322 | vq_repbuf_free(c2dev, reply); | ||
323 | |||
324 | bail1: | ||
325 | kfree(wr); | ||
326 | bail0: | ||
327 | vq_req_free(c2dev, vq_req); | ||
328 | return err; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Open a single RNIC instance to use with all | ||
333 | * low level openib calls | ||
334 | */ | ||
335 | static int c2_rnic_open(struct c2_dev *c2dev) | ||
336 | { | ||
337 | struct c2_vq_req *vq_req; | ||
338 | union c2wr wr; | ||
339 | struct c2wr_rnic_open_rep *reply; | ||
340 | int err; | ||
341 | |||
342 | vq_req = vq_req_alloc(c2dev); | ||
343 | if (vq_req == NULL) { | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | |||
347 | memset(&wr, 0, sizeof(wr)); | ||
348 | c2_wr_set_id(&wr, CCWR_RNIC_OPEN); | ||
349 | wr.rnic_open.req.hdr.context = (unsigned long) (vq_req); | ||
350 | wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE); | ||
351 | wr.rnic_open.req.port_num = cpu_to_be16(0); | ||
352 | wr.rnic_open.req.user_context = (unsigned long) c2dev; | ||
353 | |||
354 | vq_req_get(c2dev, vq_req); | ||
355 | |||
356 | err = vq_send_wr(c2dev, &wr); | ||
357 | if (err) { | ||
358 | vq_req_put(c2dev, vq_req); | ||
359 | goto bail0; | ||
360 | } | ||
361 | |||
362 | err = vq_wait_for_reply(c2dev, vq_req); | ||
363 | if (err) { | ||
364 | goto bail0; | ||
365 | } | ||
366 | |||
367 | reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg); | ||
368 | if (!reply) { | ||
369 | err = -ENOMEM; | ||
370 | goto bail0; | ||
371 | } | ||
372 | |||
373 | if ((err = c2_errno(reply)) != 0) { | ||
374 | goto bail1; | ||
375 | } | ||
376 | |||
377 | c2dev->adapter_handle = reply->rnic_handle; | ||
378 | |||
379 | bail1: | ||
380 | vq_repbuf_free(c2dev, reply); | ||
381 | bail0: | ||
382 | vq_req_free(c2dev, vq_req); | ||
383 | return err; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Close the RNIC instance | ||
388 | */ | ||
389 | static int c2_rnic_close(struct c2_dev *c2dev) | ||
390 | { | ||
391 | struct c2_vq_req *vq_req; | ||
392 | union c2wr wr; | ||
393 | struct c2wr_rnic_close_rep *reply; | ||
394 | int err; | ||
395 | |||
396 | vq_req = vq_req_alloc(c2dev); | ||
397 | if (vq_req == NULL) { | ||
398 | return -ENOMEM; | ||
399 | } | ||
400 | |||
401 | memset(&wr, 0, sizeof(wr)); | ||
402 | c2_wr_set_id(&wr, CCWR_RNIC_CLOSE); | ||
403 | wr.rnic_close.req.hdr.context = (unsigned long) vq_req; | ||
404 | wr.rnic_close.req.rnic_handle = c2dev->adapter_handle; | ||
405 | |||
406 | vq_req_get(c2dev, vq_req); | ||
407 | |||
408 | err = vq_send_wr(c2dev, &wr); | ||
409 | if (err) { | ||
410 | vq_req_put(c2dev, vq_req); | ||
411 | goto bail0; | ||
412 | } | ||
413 | |||
414 | err = vq_wait_for_reply(c2dev, vq_req); | ||
415 | if (err) { | ||
416 | goto bail0; | ||
417 | } | ||
418 | |||
419 | reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg); | ||
420 | if (!reply) { | ||
421 | err = -ENOMEM; | ||
422 | goto bail0; | ||
423 | } | ||
424 | |||
425 | if ((err = c2_errno(reply)) != 0) { | ||
426 | goto bail1; | ||
427 | } | ||
428 | |||
429 | c2dev->adapter_handle = 0; | ||
430 | |||
431 | bail1: | ||
432 | vq_repbuf_free(c2dev, reply); | ||
433 | bail0: | ||
434 | vq_req_free(c2dev, vq_req); | ||
435 | return err; | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * Called by c2_probe to initialize the RNIC. This principally | ||
440 | * involves initalizing the various limits and resouce pools that | ||
441 | * comprise the RNIC instance. | ||
442 | */ | ||
443 | int c2_rnic_init(struct c2_dev *c2dev) | ||
444 | { | ||
445 | int err; | ||
446 | u32 qsize, msgsize; | ||
447 | void *q1_pages; | ||
448 | void *q2_pages; | ||
449 | void __iomem *mmio_regs; | ||
450 | |||
451 | /* Device capabilities */ | ||
452 | c2dev->device_cap_flags = | ||
453 | (IB_DEVICE_RESIZE_MAX_WR | | ||
454 | IB_DEVICE_CURR_QP_STATE_MOD | | ||
455 | IB_DEVICE_SYS_IMAGE_GUID | | ||
456 | IB_DEVICE_ZERO_STAG | | ||
457 | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); | ||
458 | |||
459 | /* Allocate the qptr_array */ | ||
460 | c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); | ||
461 | if (!c2dev->qptr_array) { | ||
462 | return -ENOMEM; | ||
463 | } | ||
464 | |||
465 | /* Inialize the qptr_array */ | ||
466 | memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); | ||
467 | c2dev->qptr_array[0] = (void *) &c2dev->req_vq; | ||
468 | c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; | ||
469 | c2dev->qptr_array[2] = (void *) &c2dev->aeq; | ||
470 | |||
471 | /* Initialize data structures */ | ||
472 | init_waitqueue_head(&c2dev->req_vq_wo); | ||
473 | spin_lock_init(&c2dev->vqlock); | ||
474 | spin_lock_init(&c2dev->lock); | ||
475 | |||
476 | /* Allocate MQ shared pointer pool for kernel clients. User | ||
477 | * mode client pools are hung off the user context | ||
478 | */ | ||
479 | err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool); | ||
480 | if (err) { | ||
481 | goto bail0; | ||
482 | } | ||
483 | |||
484 | /* Allocate shared pointers for Q0, Q1, and Q2 from | ||
485 | * the shared pointer pool. | ||
486 | */ | ||
487 | |||
488 | c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
489 | &c2dev->hint_count_dma, | ||
490 | GFP_KERNEL); | ||
491 | c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
492 | &c2dev->req_vq.shared_dma, | ||
493 | GFP_KERNEL); | ||
494 | c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
495 | &c2dev->rep_vq.shared_dma, | ||
496 | GFP_KERNEL); | ||
497 | c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, | ||
498 | &c2dev->aeq.shared_dma, GFP_KERNEL); | ||
499 | if (!c2dev->hint_count || !c2dev->req_vq.shared || | ||
500 | !c2dev->rep_vq.shared || !c2dev->aeq.shared) { | ||
501 | err = -ENOMEM; | ||
502 | goto bail1; | ||
503 | } | ||
504 | |||
505 | mmio_regs = c2dev->kva; | ||
506 | /* Initialize the Verbs Request Queue */ | ||
507 | c2_mq_req_init(&c2dev->req_vq, 0, | ||
508 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), | ||
509 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), | ||
510 | mmio_regs + | ||
511 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), | ||
512 | mmio_regs + | ||
513 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), | ||
514 | C2_MQ_ADAPTER_TARGET); | ||
515 | |||
516 | /* Initialize the Verbs Reply Queue */ | ||
517 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); | ||
518 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); | ||
519 | q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | ||
520 | if (!q1_pages) { | ||
521 | err = -ENOMEM; | ||
522 | goto bail1; | ||
523 | } | ||
524 | c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
525 | (void *)q1_pages, qsize * msgsize, | ||
526 | DMA_FROM_DEVICE); | ||
527 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); | ||
528 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, | ||
529 | (u64)c2dev->rep_vq.host_dma); | ||
530 | c2_mq_rep_init(&c2dev->rep_vq, | ||
531 | 1, | ||
532 | qsize, | ||
533 | msgsize, | ||
534 | q1_pages, | ||
535 | mmio_regs + | ||
536 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), | ||
537 | C2_MQ_HOST_TARGET); | ||
538 | |||
539 | /* Initialize the Asynchronus Event Queue */ | ||
540 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); | ||
541 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); | ||
542 | q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | ||
543 | if (!q2_pages) { | ||
544 | err = -ENOMEM; | ||
545 | goto bail2; | ||
546 | } | ||
547 | c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
548 | (void *)q2_pages, qsize * msgsize, | ||
549 | DMA_FROM_DEVICE); | ||
550 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); | ||
551 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, | ||
552 | (u64)c2dev->rep_vq.host_dma); | ||
553 | c2_mq_rep_init(&c2dev->aeq, | ||
554 | 2, | ||
555 | qsize, | ||
556 | msgsize, | ||
557 | q2_pages, | ||
558 | mmio_regs + | ||
559 | be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), | ||
560 | C2_MQ_HOST_TARGET); | ||
561 | |||
562 | /* Initialize the verbs request allocator */ | ||
563 | err = vq_init(c2dev); | ||
564 | if (err) | ||
565 | goto bail3; | ||
566 | |||
567 | /* Enable interrupts on the adapter */ | ||
568 | writel(0, c2dev->regs + C2_IDIS); | ||
569 | |||
570 | /* create the WR init message */ | ||
571 | err = c2_adapter_init(c2dev); | ||
572 | if (err) | ||
573 | goto bail4; | ||
574 | c2dev->init++; | ||
575 | |||
576 | /* open an adapter instance */ | ||
577 | err = c2_rnic_open(c2dev); | ||
578 | if (err) | ||
579 | goto bail4; | ||
580 | |||
581 | /* Initialize cached the adapter limits */ | ||
582 | if (c2_rnic_query(c2dev, &c2dev->props)) | ||
583 | goto bail5; | ||
584 | |||
585 | /* Initialize the PD pool */ | ||
586 | err = c2_init_pd_table(c2dev); | ||
587 | if (err) | ||
588 | goto bail5; | ||
589 | |||
590 | /* Initialize the QP pool */ | ||
591 | c2_init_qp_table(c2dev); | ||
592 | return 0; | ||
593 | |||
594 | bail5: | ||
595 | c2_rnic_close(c2dev); | ||
596 | bail4: | ||
597 | vq_term(c2dev); | ||
598 | bail3: | ||
599 | dma_unmap_single(c2dev->ibdev.dma_device, | ||
600 | pci_unmap_addr(&c2dev->aeq, mapping), | ||
601 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | ||
602 | DMA_FROM_DEVICE); | ||
603 | kfree(q2_pages); | ||
604 | bail2: | ||
605 | dma_unmap_single(c2dev->ibdev.dma_device, | ||
606 | pci_unmap_addr(&c2dev->rep_vq, mapping), | ||
607 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | ||
608 | DMA_FROM_DEVICE); | ||
609 | kfree(q1_pages); | ||
610 | bail1: | ||
611 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | ||
612 | bail0: | ||
613 | vfree(c2dev->qptr_array); | ||
614 | |||
615 | return err; | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * Called by c2_remove to cleanup the RNIC resources. | ||
620 | */ | ||
621 | void c2_rnic_term(struct c2_dev *c2dev) | ||
622 | { | ||
623 | |||
624 | /* Close the open adapter instance */ | ||
625 | c2_rnic_close(c2dev); | ||
626 | |||
627 | /* Send the TERM message to the adapter */ | ||
628 | c2_adapter_term(c2dev); | ||
629 | |||
630 | /* Disable interrupts on the adapter */ | ||
631 | writel(1, c2dev->regs + C2_IDIS); | ||
632 | |||
633 | /* Free the QP pool */ | ||
634 | c2_cleanup_qp_table(c2dev); | ||
635 | |||
636 | /* Free the PD pool */ | ||
637 | c2_cleanup_pd_table(c2dev); | ||
638 | |||
639 | /* Free the verbs request allocator */ | ||
640 | vq_term(c2dev); | ||
641 | |||
642 | /* Unmap and free the asynchronus event queue */ | ||
643 | dma_unmap_single(c2dev->ibdev.dma_device, | ||
644 | pci_unmap_addr(&c2dev->aeq, mapping), | ||
645 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | ||
646 | DMA_FROM_DEVICE); | ||
647 | kfree(c2dev->aeq.msg_pool.host); | ||
648 | |||
649 | /* Unmap and free the verbs reply queue */ | ||
650 | dma_unmap_single(c2dev->ibdev.dma_device, | ||
651 | pci_unmap_addr(&c2dev->rep_vq, mapping), | ||
652 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | ||
653 | DMA_FROM_DEVICE); | ||
654 | kfree(c2dev->rep_vq.msg_pool.host); | ||
655 | |||
656 | /* Free the MQ shared pointer pool */ | ||
657 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | ||
658 | |||
659 | /* Free the qptr_array */ | ||
660 | vfree(c2dev->qptr_array); | ||
661 | |||
662 | return; | ||
663 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_status.h b/drivers/infiniband/hw/amso1100/c2_status.h new file mode 100644 index 000000000000..6ee4aa92d875 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_status.h | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _C2_STATUS_H_ | ||
34 | #define _C2_STATUS_H_ | ||
35 | |||
36 | /* | ||
37 | * Verbs Status Codes | ||
38 | */ | ||
39 | enum c2_status { | ||
40 | C2_OK = 0, /* This must be zero */ | ||
41 | CCERR_INSUFFICIENT_RESOURCES = 1, | ||
42 | CCERR_INVALID_MODIFIER = 2, | ||
43 | CCERR_INVALID_MODE = 3, | ||
44 | CCERR_IN_USE = 4, | ||
45 | CCERR_INVALID_RNIC = 5, | ||
46 | CCERR_INTERRUPTED_OPERATION = 6, | ||
47 | CCERR_INVALID_EH = 7, | ||
48 | CCERR_INVALID_CQ = 8, | ||
49 | CCERR_CQ_EMPTY = 9, | ||
50 | CCERR_NOT_IMPLEMENTED = 10, | ||
51 | CCERR_CQ_DEPTH_TOO_SMALL = 11, | ||
52 | CCERR_PD_IN_USE = 12, | ||
53 | CCERR_INVALID_PD = 13, | ||
54 | CCERR_INVALID_SRQ = 14, | ||
55 | CCERR_INVALID_ADDRESS = 15, | ||
56 | CCERR_INVALID_NETMASK = 16, | ||
57 | CCERR_INVALID_QP = 17, | ||
58 | CCERR_INVALID_QP_STATE = 18, | ||
59 | CCERR_TOO_MANY_WRS_POSTED = 19, | ||
60 | CCERR_INVALID_WR_TYPE = 20, | ||
61 | CCERR_INVALID_SGL_LENGTH = 21, | ||
62 | CCERR_INVALID_SQ_DEPTH = 22, | ||
63 | CCERR_INVALID_RQ_DEPTH = 23, | ||
64 | CCERR_INVALID_ORD = 24, | ||
65 | CCERR_INVALID_IRD = 25, | ||
66 | CCERR_QP_ATTR_CANNOT_CHANGE = 26, | ||
67 | CCERR_INVALID_STAG = 27, | ||
68 | CCERR_QP_IN_USE = 28, | ||
69 | CCERR_OUTSTANDING_WRS = 29, | ||
70 | CCERR_STAG_IN_USE = 30, | ||
71 | CCERR_INVALID_STAG_INDEX = 31, | ||
72 | CCERR_INVALID_SGL_FORMAT = 32, | ||
73 | CCERR_ADAPTER_TIMEOUT = 33, | ||
74 | CCERR_INVALID_CQ_DEPTH = 34, | ||
75 | CCERR_INVALID_PRIVATE_DATA_LENGTH = 35, | ||
76 | CCERR_INVALID_EP = 36, | ||
77 | CCERR_MR_IN_USE = CCERR_STAG_IN_USE, | ||
78 | CCERR_FLUSHED = 38, | ||
79 | CCERR_INVALID_WQE = 39, | ||
80 | CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40, | ||
81 | CCERR_REMOTE_TERMINATION_ERROR = 41, | ||
82 | CCERR_BASE_AND_BOUNDS_VIOLATION = 42, | ||
83 | CCERR_ACCESS_VIOLATION = 43, | ||
84 | CCERR_INVALID_PD_ID = 44, | ||
85 | CCERR_WRAP_ERROR = 45, | ||
86 | CCERR_INV_STAG_ACCESS_ERROR = 46, | ||
87 | CCERR_ZERO_RDMA_READ_RESOURCES = 47, | ||
88 | CCERR_QP_NOT_PRIVILEGED = 48, | ||
89 | CCERR_STAG_STATE_NOT_INVALID = 49, | ||
90 | CCERR_INVALID_PAGE_SIZE = 50, | ||
91 | CCERR_INVALID_BUFFER_SIZE = 51, | ||
92 | CCERR_INVALID_PBE = 52, | ||
93 | CCERR_INVALID_FBO = 53, | ||
94 | CCERR_INVALID_LENGTH = 54, | ||
95 | CCERR_INVALID_ACCESS_RIGHTS = 55, | ||
96 | CCERR_PBL_TOO_BIG = 56, | ||
97 | CCERR_INVALID_VA = 57, | ||
98 | CCERR_INVALID_REGION = 58, | ||
99 | CCERR_INVALID_WINDOW = 59, | ||
100 | CCERR_TOTAL_LENGTH_TOO_BIG = 60, | ||
101 | CCERR_INVALID_QP_ID = 61, | ||
102 | CCERR_ADDR_IN_USE = 62, | ||
103 | CCERR_ADDR_NOT_AVAIL = 63, | ||
104 | CCERR_NET_DOWN = 64, | ||
105 | CCERR_NET_UNREACHABLE = 65, | ||
106 | CCERR_CONN_ABORTED = 66, | ||
107 | CCERR_CONN_RESET = 67, | ||
108 | CCERR_NO_BUFS = 68, | ||
109 | CCERR_CONN_TIMEDOUT = 69, | ||
110 | CCERR_CONN_REFUSED = 70, | ||
111 | CCERR_HOST_UNREACHABLE = 71, | ||
112 | CCERR_INVALID_SEND_SGL_DEPTH = 72, | ||
113 | CCERR_INVALID_RECV_SGL_DEPTH = 73, | ||
114 | CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74, | ||
115 | CCERR_INSUFFICIENT_PRIVILEGES = 75, | ||
116 | CCERR_STACK_ERROR = 76, | ||
117 | CCERR_INVALID_VERSION = 77, | ||
118 | CCERR_INVALID_MTU = 78, | ||
119 | CCERR_INVALID_IMAGE = 79, | ||
120 | CCERR_PENDING = 98, /* not an error; user internally by adapter */ | ||
121 | CCERR_DEFER = 99, /* not an error; used internally by adapter */ | ||
122 | CCERR_FAILED_WRITE = 100, | ||
123 | CCERR_FAILED_ERASE = 101, | ||
124 | CCERR_FAILED_VERIFICATION = 102, | ||
125 | CCERR_NOT_FOUND = 103, | ||
126 | |||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * CCAE_ACTIVE_CONNECT_RESULTS status result codes. | ||
131 | */ | ||
132 | enum c2_connect_status { | ||
133 | C2_CONN_STATUS_SUCCESS = C2_OK, | ||
134 | C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES, | ||
135 | C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT, | ||
136 | C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED, | ||
137 | C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE, | ||
138 | C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE, | ||
139 | C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC, | ||
140 | C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP, | ||
141 | C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE, | ||
142 | C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET, | ||
143 | C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL, | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * Flash programming status codes. | ||
148 | */ | ||
149 | enum c2_flash_status { | ||
150 | C2_FLASH_STATUS_SUCCESS = 0x0000, | ||
151 | C2_FLASH_STATUS_VERIFY_ERR = 0x0002, | ||
152 | C2_FLASH_STATUS_IMAGE_ERR = 0x0004, | ||
153 | C2_FLASH_STATUS_ECLBS = 0x0400, | ||
154 | C2_FLASH_STATUS_PSLBS = 0x0800, | ||
155 | C2_FLASH_STATUS_VPENS = 0x1000, | ||
156 | }; | ||
157 | |||
158 | #endif /* _C2_STATUS_H_ */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_user.h b/drivers/infiniband/hw/amso1100/c2_user.h new file mode 100644 index 000000000000..7e9e7ad65467 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_user.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | ||
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | ||
4 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | * | ||
34 | */ | ||
35 | |||
36 | #ifndef C2_USER_H | ||
37 | #define C2_USER_H | ||
38 | |||
39 | #include <linux/types.h> | ||
40 | |||
41 | /* | ||
42 | * Make sure that all structs defined in this file remain laid out so | ||
43 | * that they pack the same way on 32-bit and 64-bit architectures (to | ||
44 | * avoid incompatibility between 32-bit userspace and 64-bit kernels). | ||
45 | * In particular do not use pointer types -- pass pointers in __u64 | ||
46 | * instead. | ||
47 | */ | ||
48 | |||
49 | struct c2_alloc_ucontext_resp { | ||
50 | __u32 qp_tab_size; | ||
51 | __u32 uarc_size; | ||
52 | }; | ||
53 | |||
54 | struct c2_alloc_pd_resp { | ||
55 | __u32 pdn; | ||
56 | __u32 reserved; | ||
57 | }; | ||
58 | |||
59 | struct c2_create_cq { | ||
60 | __u32 lkey; | ||
61 | __u32 pdn; | ||
62 | __u64 arm_db_page; | ||
63 | __u64 set_db_page; | ||
64 | __u32 arm_db_index; | ||
65 | __u32 set_db_index; | ||
66 | }; | ||
67 | |||
68 | struct c2_create_cq_resp { | ||
69 | __u32 cqn; | ||
70 | __u32 reserved; | ||
71 | }; | ||
72 | |||
73 | struct c2_create_qp { | ||
74 | __u32 lkey; | ||
75 | __u32 reserved; | ||
76 | __u64 sq_db_page; | ||
77 | __u64 rq_db_page; | ||
78 | __u32 sq_db_index; | ||
79 | __u32 rq_db_index; | ||
80 | }; | ||
81 | |||
82 | #endif /* C2_USER_H */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c new file mode 100644 index 000000000000..40caeb5f41b4 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_vq.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | |||
36 | #include "c2_vq.h" | ||
37 | #include "c2_provider.h" | ||
38 | |||
39 | /* | ||
40 | * Verbs Request Objects: | ||
41 | * | ||
42 | * VQ Request Objects are allocated by the kernel verbs handlers. | ||
43 | * They contain a wait object, a refcnt, an atomic bool indicating that the | ||
44 | * adapter has replied, and a copy of the verb reply work request. | ||
45 | * A pointer to the VQ Request Object is passed down in the context | ||
46 | * field of the work request message, and reflected back by the adapter | ||
47 | * in the verbs reply message. The function handle_vq() in the interrupt | ||
48 | * path will use this pointer to: | ||
49 | * 1) append a copy of the verbs reply message | ||
50 | * 2) mark that the reply is ready | ||
51 | * 3) wake up the kernel verbs handler blocked awaiting the reply. | ||
52 | * | ||
53 | * | ||
54 | * The kernel verbs handlers do a "get" to put a 2nd reference on the | ||
55 | * VQ Request object. If the kernel verbs handler exits before the adapter | ||
56 | * can respond, this extra reference will keep the VQ Request object around | ||
57 | * until the adapter's reply can be processed. The reason we need this is | ||
58 | * because a pointer to this object is stuffed into the context field of | ||
59 | * the verbs work request message, and reflected back in the reply message. | ||
60 | * It is used in the interrupt handler (handle_vq()) to wake up the appropriate | ||
61 | * kernel verb handler that is blocked awaiting the verb reply. | ||
62 | * So handle_vq() will do a "put" on the object when it's done accessing it. | ||
63 | * NOTE: If we guarantee that the kernel verb handler will never bail before | ||
64 | * getting the reply, then we don't need these refcnts. | ||
65 | * | ||
66 | * | ||
67 | * VQ Request objects are freed by the kernel verbs handlers only | ||
68 | * after the verb has been processed, or when the adapter fails and | ||
69 | * does not reply. | ||
70 | * | ||
71 | * | ||
72 | * Verbs Reply Buffers: | ||
73 | * | ||
74 | * VQ Reply bufs are local host memory copies of a | ||
75 | * outstanding Verb Request reply | ||
76 | * message. The are always allocated by the kernel verbs handlers, and _may_ be | ||
77 | * freed by either the kernel verbs handler -or- the interrupt handler. The | ||
78 | * kernel verbs handler _must_ free the repbuf, then free the vq request object | ||
79 | * in that order. | ||
80 | */ | ||
81 | |||
82 | int vq_init(struct c2_dev *c2dev) | ||
83 | { | ||
84 | sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", | ||
85 | (char) ('0' + c2dev->devnum)); | ||
86 | c2dev->host_msg_cache = | ||
87 | kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, | ||
88 | SLAB_HWCACHE_ALIGN, NULL, NULL); | ||
89 | if (c2dev->host_msg_cache == NULL) { | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void vq_term(struct c2_dev *c2dev) | ||
96 | { | ||
97 | kmem_cache_destroy(c2dev->host_msg_cache); | ||
98 | } | ||
99 | |||
100 | /* vq_req_alloc - allocate a VQ Request Object and initialize it. | ||
101 | * The refcnt is set to 1. | ||
102 | */ | ||
103 | struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) | ||
104 | { | ||
105 | struct c2_vq_req *r; | ||
106 | |||
107 | r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); | ||
108 | if (r) { | ||
109 | init_waitqueue_head(&r->wait_object); | ||
110 | r->reply_msg = (u64) NULL; | ||
111 | r->event = 0; | ||
112 | r->cm_id = NULL; | ||
113 | r->qp = NULL; | ||
114 | atomic_set(&r->refcnt, 1); | ||
115 | atomic_set(&r->reply_ready, 0); | ||
116 | } | ||
117 | return r; | ||
118 | } | ||
119 | |||
120 | |||
121 | /* vq_req_free - free the VQ Request Object. It is assumed the verbs handler | ||
122 | * has already free the VQ Reply Buffer if it existed. | ||
123 | */ | ||
124 | void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) | ||
125 | { | ||
126 | r->reply_msg = (u64) NULL; | ||
127 | if (atomic_dec_and_test(&r->refcnt)) { | ||
128 | kfree(r); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* vq_req_get - reference a VQ Request Object. Done | ||
133 | * only in the kernel verbs handlers. | ||
134 | */ | ||
135 | void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) | ||
136 | { | ||
137 | atomic_inc(&r->refcnt); | ||
138 | } | ||
139 | |||
140 | |||
141 | /* vq_req_put - dereference and potentially free a VQ Request Object. | ||
142 | * | ||
143 | * This is only called by handle_vq() on the | ||
144 | * interrupt when it is done processing | ||
145 | * a verb reply message. If the associated | ||
146 | * kernel verbs handler has already bailed, | ||
147 | * then this put will actually free the VQ | ||
148 | * Request object _and_ the VQ Reply Buffer | ||
149 | * if it exists. | ||
150 | */ | ||
151 | void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) | ||
152 | { | ||
153 | if (atomic_dec_and_test(&r->refcnt)) { | ||
154 | if (r->reply_msg != (u64) NULL) | ||
155 | vq_repbuf_free(c2dev, | ||
156 | (void *) (unsigned long) r->reply_msg); | ||
157 | kfree(r); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | |||
162 | /* | ||
163 | * vq_repbuf_alloc - allocate a VQ Reply Buffer. | ||
164 | */ | ||
165 | void *vq_repbuf_alloc(struct c2_dev *c2dev) | ||
166 | { | ||
167 | return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * vq_send_wr - post a verbs request message to the Verbs Request Queue. | ||
172 | * If a message is not available in the MQ, then block until one is available. | ||
173 | * NOTE: handle_mq() on the interrupt context will wake up threads blocked here. | ||
174 | * When the adapter drains the Verbs Request Queue, | ||
175 | * it inserts MQ index 0 in to the | ||
176 | * adapter->host activity fifo and interrupts the host. | ||
177 | */ | ||
178 | int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) | ||
179 | { | ||
180 | void *msg; | ||
181 | wait_queue_t __wait; | ||
182 | |||
183 | /* | ||
184 | * grab adapter vq lock | ||
185 | */ | ||
186 | spin_lock(&c2dev->vqlock); | ||
187 | |||
188 | /* | ||
189 | * allocate msg | ||
190 | */ | ||
191 | msg = c2_mq_alloc(&c2dev->req_vq); | ||
192 | |||
193 | /* | ||
194 | * If we cannot get a msg, then we'll wait | ||
195 | * When a messages are available, the int handler will wake_up() | ||
196 | * any waiters. | ||
197 | */ | ||
198 | while (msg == NULL) { | ||
199 | pr_debug("%s:%d no available msg in VQ, waiting...\n", | ||
200 | __FUNCTION__, __LINE__); | ||
201 | init_waitqueue_entry(&__wait, current); | ||
202 | add_wait_queue(&c2dev->req_vq_wo, &__wait); | ||
203 | spin_unlock(&c2dev->vqlock); | ||
204 | for (;;) { | ||
205 | set_current_state(TASK_INTERRUPTIBLE); | ||
206 | if (!c2_mq_full(&c2dev->req_vq)) { | ||
207 | break; | ||
208 | } | ||
209 | if (!signal_pending(current)) { | ||
210 | schedule_timeout(1 * HZ); /* 1 second... */ | ||
211 | continue; | ||
212 | } | ||
213 | set_current_state(TASK_RUNNING); | ||
214 | remove_wait_queue(&c2dev->req_vq_wo, &__wait); | ||
215 | return -EINTR; | ||
216 | } | ||
217 | set_current_state(TASK_RUNNING); | ||
218 | remove_wait_queue(&c2dev->req_vq_wo, &__wait); | ||
219 | spin_lock(&c2dev->vqlock); | ||
220 | msg = c2_mq_alloc(&c2dev->req_vq); | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * copy wr into adapter msg | ||
225 | */ | ||
226 | memcpy(msg, wr, c2dev->req_vq.msg_size); | ||
227 | |||
228 | /* | ||
229 | * post msg | ||
230 | */ | ||
231 | c2_mq_produce(&c2dev->req_vq); | ||
232 | |||
233 | /* | ||
234 | * release adapter vq lock | ||
235 | */ | ||
236 | spin_unlock(&c2dev->vqlock); | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | |||
241 | /* | ||
242 | * vq_wait_for_reply - block until the adapter posts a Verb Reply Message. | ||
243 | */ | ||
244 | int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req) | ||
245 | { | ||
246 | if (!wait_event_timeout(req->wait_object, | ||
247 | atomic_read(&req->reply_ready), | ||
248 | 60*HZ)) | ||
249 | return -ETIMEDOUT; | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * vq_repbuf_free - Free a Verbs Reply Buffer. | ||
256 | */ | ||
257 | void vq_repbuf_free(struct c2_dev *c2dev, void *reply) | ||
258 | { | ||
259 | kmem_cache_free(c2dev->host_msg_cache, reply); | ||
260 | } | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.h b/drivers/infiniband/hw/amso1100/c2_vq.h new file mode 100644 index 000000000000..33805627a607 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_vq.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _C2_VQ_H_ | ||
34 | #define _C2_VQ_H_ | ||
35 | #include <linux/sched.h> | ||
36 | #include "c2.h" | ||
37 | #include "c2_wr.h" | ||
38 | #include "c2_provider.h" | ||
39 | |||
40 | struct c2_vq_req { | ||
41 | u64 reply_msg; /* ptr to reply msg */ | ||
42 | wait_queue_head_t wait_object; /* wait object for vq reqs */ | ||
43 | atomic_t reply_ready; /* set when reply is ready */ | ||
44 | atomic_t refcnt; /* used to cancel WRs... */ | ||
45 | int event; | ||
46 | struct iw_cm_id *cm_id; | ||
47 | struct c2_qp *qp; | ||
48 | }; | ||
49 | |||
50 | extern int vq_init(struct c2_dev *c2dev); | ||
51 | extern void vq_term(struct c2_dev *c2dev); | ||
52 | |||
53 | extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev); | ||
54 | extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req); | ||
55 | extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req); | ||
56 | extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req); | ||
57 | extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr); | ||
58 | |||
59 | extern void *vq_repbuf_alloc(struct c2_dev *c2dev); | ||
60 | extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply); | ||
61 | |||
62 | extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req); | ||
63 | #endif /* _C2_VQ_H_ */ | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h new file mode 100644 index 000000000000..3ec6c43bb0ef --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_wr.h | |||
@@ -0,0 +1,1520 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef _C2_WR_H_ | ||
34 | #define _C2_WR_H_ | ||
35 | |||
36 | #ifdef CCDEBUG | ||
37 | #define CCWR_MAGIC 0xb07700b0 | ||
38 | #endif | ||
39 | |||
40 | #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF | ||
41 | |||
42 | /* Maximum allowed size in bytes of private_data exchange | ||
43 | * on connect. | ||
44 | */ | ||
45 | #define C2_MAX_PRIVATE_DATA_SIZE 200 | ||
46 | |||
47 | /* | ||
48 | * These types are shared among the adapter, host, and CCIL consumer. | ||
49 | */ | ||
50 | enum c2_cq_notification_type { | ||
51 | C2_CQ_NOTIFICATION_TYPE_NONE = 1, | ||
52 | C2_CQ_NOTIFICATION_TYPE_NEXT, | ||
53 | C2_CQ_NOTIFICATION_TYPE_NEXT_SE | ||
54 | }; | ||
55 | |||
56 | enum c2_setconfig_cmd { | ||
57 | C2_CFG_ADD_ADDR = 1, | ||
58 | C2_CFG_DEL_ADDR = 2, | ||
59 | C2_CFG_ADD_ROUTE = 3, | ||
60 | C2_CFG_DEL_ROUTE = 4 | ||
61 | }; | ||
62 | |||
63 | enum c2_getconfig_cmd { | ||
64 | C2_GETCONFIG_ROUTES = 1, | ||
65 | C2_GETCONFIG_ADDRS | ||
66 | }; | ||
67 | |||
68 | /* | ||
69 | * CCIL Work Request Identifiers | ||
70 | */ | ||
71 | enum c2wr_ids { | ||
72 | CCWR_RNIC_OPEN = 1, | ||
73 | CCWR_RNIC_QUERY, | ||
74 | CCWR_RNIC_SETCONFIG, | ||
75 | CCWR_RNIC_GETCONFIG, | ||
76 | CCWR_RNIC_CLOSE, | ||
77 | CCWR_CQ_CREATE, | ||
78 | CCWR_CQ_QUERY, | ||
79 | CCWR_CQ_MODIFY, | ||
80 | CCWR_CQ_DESTROY, | ||
81 | CCWR_QP_CONNECT, | ||
82 | CCWR_PD_ALLOC, | ||
83 | CCWR_PD_DEALLOC, | ||
84 | CCWR_SRQ_CREATE, | ||
85 | CCWR_SRQ_QUERY, | ||
86 | CCWR_SRQ_MODIFY, | ||
87 | CCWR_SRQ_DESTROY, | ||
88 | CCWR_QP_CREATE, | ||
89 | CCWR_QP_QUERY, | ||
90 | CCWR_QP_MODIFY, | ||
91 | CCWR_QP_DESTROY, | ||
92 | CCWR_NSMR_STAG_ALLOC, | ||
93 | CCWR_NSMR_REGISTER, | ||
94 | CCWR_NSMR_PBL, | ||
95 | CCWR_STAG_DEALLOC, | ||
96 | CCWR_NSMR_REREGISTER, | ||
97 | CCWR_SMR_REGISTER, | ||
98 | CCWR_MR_QUERY, | ||
99 | CCWR_MW_ALLOC, | ||
100 | CCWR_MW_QUERY, | ||
101 | CCWR_EP_CREATE, | ||
102 | CCWR_EP_GETOPT, | ||
103 | CCWR_EP_SETOPT, | ||
104 | CCWR_EP_DESTROY, | ||
105 | CCWR_EP_BIND, | ||
106 | CCWR_EP_CONNECT, | ||
107 | CCWR_EP_LISTEN, | ||
108 | CCWR_EP_SHUTDOWN, | ||
109 | CCWR_EP_LISTEN_CREATE, | ||
110 | CCWR_EP_LISTEN_DESTROY, | ||
111 | CCWR_EP_QUERY, | ||
112 | CCWR_CR_ACCEPT, | ||
113 | CCWR_CR_REJECT, | ||
114 | CCWR_CONSOLE, | ||
115 | CCWR_TERM, | ||
116 | CCWR_FLASH_INIT, | ||
117 | CCWR_FLASH, | ||
118 | CCWR_BUF_ALLOC, | ||
119 | CCWR_BUF_FREE, | ||
120 | CCWR_FLASH_WRITE, | ||
121 | CCWR_INIT, /* WARNING: Don't move this ever again! */ | ||
122 | |||
123 | |||
124 | |||
125 | /* Add new IDs here */ | ||
126 | |||
127 | |||
128 | |||
129 | /* | ||
130 | * WARNING: CCWR_LAST must always be the last verbs id defined! | ||
131 | * All the preceding IDs are fixed, and must not change. | ||
132 | * You can add new IDs, but must not remove or reorder | ||
133 | * any IDs. If you do, YOU will ruin any hope of | ||
134 | * compatability between versions. | ||
135 | */ | ||
136 | CCWR_LAST, | ||
137 | |||
138 | /* | ||
139 | * Start over at 1 so that arrays indexed by user wr id's | ||
140 | * begin at 1. This is OK since the verbs and user wr id's | ||
141 | * are always used on disjoint sets of queues. | ||
142 | */ | ||
143 | /* | ||
144 | * The order of the CCWR_SEND_XX verbs must | ||
145 | * match the order of the RDMA_OPs | ||
146 | */ | ||
147 | CCWR_SEND = 1, | ||
148 | CCWR_SEND_INV, | ||
149 | CCWR_SEND_SE, | ||
150 | CCWR_SEND_SE_INV, | ||
151 | CCWR_RDMA_WRITE, | ||
152 | CCWR_RDMA_READ, | ||
153 | CCWR_RDMA_READ_INV, | ||
154 | CCWR_MW_BIND, | ||
155 | CCWR_NSMR_FASTREG, | ||
156 | CCWR_STAG_INVALIDATE, | ||
157 | CCWR_RECV, | ||
158 | CCWR_NOP, | ||
159 | CCWR_UNIMPL, | ||
160 | /* WARNING: This must always be the last user wr id defined! */ | ||
161 | }; | ||
162 | #define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2) | ||
163 | |||
164 | /* | ||
165 | * SQ/RQ Work Request Types | ||
166 | */ | ||
167 | enum c2_wr_type { | ||
168 | C2_WR_TYPE_SEND = CCWR_SEND, | ||
169 | C2_WR_TYPE_SEND_SE = CCWR_SEND_SE, | ||
170 | C2_WR_TYPE_SEND_INV = CCWR_SEND_INV, | ||
171 | C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV, | ||
172 | C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE, | ||
173 | C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ, | ||
174 | C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV, | ||
175 | C2_WR_TYPE_BIND_MW = CCWR_MW_BIND, | ||
176 | C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG, | ||
177 | C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE, | ||
178 | C2_WR_TYPE_RECV = CCWR_RECV, | ||
179 | C2_WR_TYPE_NOP = CCWR_NOP, | ||
180 | }; | ||
181 | |||
182 | struct c2_netaddr { | ||
183 | u32 ip_addr; | ||
184 | u32 netmask; | ||
185 | u32 mtu; | ||
186 | }; | ||
187 | |||
188 | struct c2_route { | ||
189 | u32 ip_addr; /* 0 indicates the default route */ | ||
190 | u32 netmask; /* netmask associated with dst */ | ||
191 | u32 flags; | ||
192 | union { | ||
193 | u32 ipaddr; /* address of the nexthop interface */ | ||
194 | u8 enaddr[6]; | ||
195 | } nexthop; | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * A Scatter Gather Entry. | ||
200 | */ | ||
201 | struct c2_data_addr { | ||
202 | u32 stag; | ||
203 | u32 length; | ||
204 | u64 to; | ||
205 | }; | ||
206 | |||
207 | /* | ||
208 | * MR and MW flags used by the consumer, RI, and RNIC. | ||
209 | */ | ||
210 | enum c2_mm_flags { | ||
211 | MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */ | ||
212 | MEM_VA_BASED = 0x0002, /* Not Zero-based */ | ||
213 | MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */ | ||
214 | MEM_LOCAL_READ = 0x0008, /* allow local reads */ | ||
215 | MEM_LOCAL_WRITE = 0x0010, /* allow local writes */ | ||
216 | MEM_REMOTE_READ = 0x0020, /* allow remote reads */ | ||
217 | MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */ | ||
218 | MEM_WINDOW_BIND = 0x0080, /* binds allowed */ | ||
219 | MEM_SHARED = 0x0100, /* set if MR is shared */ | ||
220 | MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */ | ||
221 | }; | ||
222 | |||
223 | /* | ||
224 | * CCIL API ACF flags defined in terms of the low level mem flags. | ||
225 | * This minimizes translation needed in the user API | ||
226 | */ | ||
227 | enum c2_acf { | ||
228 | C2_ACF_LOCAL_READ = MEM_LOCAL_READ, | ||
229 | C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE, | ||
230 | C2_ACF_REMOTE_READ = MEM_REMOTE_READ, | ||
231 | C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE, | ||
232 | C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * Image types of objects written to flash | ||
237 | */ | ||
238 | #define C2_FLASH_IMG_BITFILE 1 | ||
239 | #define C2_FLASH_IMG_OPTION_ROM 2 | ||
240 | #define C2_FLASH_IMG_VPD 3 | ||
241 | |||
242 | /* | ||
243 | * to fix bug 1815 we define the max size allowable of the | ||
244 | * terminate message (per the IETF spec).Refer to the IETF | ||
245 | * protocal specification, section 12.1.6, page 64) | ||
246 | * The message is prefixed by 20 types of DDP info. | ||
247 | * | ||
248 | * Then the message has 6 bytes for the terminate control | ||
249 | * and DDP segment length info plus a DDP header (either | ||
250 | * 14 or 18 byts) plus 28 bytes for the RDMA header. | ||
251 | * Thus the max size in: | ||
252 | * 20 + (6 + 18 + 28) = 72 | ||
253 | */ | ||
254 | #define C2_MAX_TERMINATE_MESSAGE_SIZE (72) | ||
255 | |||
256 | /* | ||
257 | * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h | ||
258 | */ | ||
259 | #define WR_BUILD_STR_LEN 64 | ||
260 | |||
261 | /* | ||
262 | * WARNING: All of these structs need to align any 64bit types on | ||
263 | * 64 bit boundaries! 64bit types include u64 and u64. | ||
264 | */ | ||
265 | |||
266 | /* | ||
267 | * Clustercore Work Request Header. Be sensitive to field layout | ||
268 | * and alignment. | ||
269 | */ | ||
270 | struct c2wr_hdr { | ||
271 | /* wqe_count is part of the cqe. It is put here so the | ||
272 | * adapter can write to it while the wr is pending without | ||
273 | * clobbering part of the wr. This word need not be dma'd | ||
274 | * from the host to adapter by libccil, but we copy it anyway | ||
275 | * to make the memcpy to the adapter better aligned. | ||
276 | */ | ||
277 | u32 wqe_count; | ||
278 | |||
279 | /* Put these fields next so that later 32- and 64-bit | ||
280 | * quantities are naturally aligned. | ||
281 | */ | ||
282 | u8 id; | ||
283 | u8 result; /* adapter -> host */ | ||
284 | u8 sge_count; /* host -> adapter */ | ||
285 | u8 flags; /* host -> adapter */ | ||
286 | |||
287 | u64 context; | ||
288 | #ifdef CCMSGMAGIC | ||
289 | u32 magic; | ||
290 | u32 pad; | ||
291 | #endif | ||
292 | } __attribute__((packed)); | ||
293 | |||
294 | /* | ||
295 | *------------------------ RNIC ------------------------ | ||
296 | */ | ||
297 | |||
298 | /* | ||
299 | * WR_RNIC_OPEN | ||
300 | */ | ||
301 | |||
302 | /* | ||
303 | * Flags for the RNIC WRs | ||
304 | */ | ||
305 | enum c2_rnic_flags { | ||
306 | RNIC_IRD_STATIC = 0x0001, | ||
307 | RNIC_ORD_STATIC = 0x0002, | ||
308 | RNIC_QP_STATIC = 0x0004, | ||
309 | RNIC_SRQ_SUPPORTED = 0x0008, | ||
310 | RNIC_PBL_BLOCK_MODE = 0x0010, | ||
311 | RNIC_SRQ_MODEL_ARRIVAL = 0x0020, | ||
312 | RNIC_CQ_OVF_DETECTED = 0x0040, | ||
313 | RNIC_PRIV_MODE = 0x0080 | ||
314 | }; | ||
315 | |||
316 | struct c2wr_rnic_open_req { | ||
317 | struct c2wr_hdr hdr; | ||
318 | u64 user_context; | ||
319 | u16 flags; /* See enum c2_rnic_flags */ | ||
320 | u16 port_num; | ||
321 | } __attribute__((packed)); | ||
322 | |||
323 | struct c2wr_rnic_open_rep { | ||
324 | struct c2wr_hdr hdr; | ||
325 | u32 rnic_handle; | ||
326 | } __attribute__((packed)); | ||
327 | |||
328 | union c2wr_rnic_open { | ||
329 | struct c2wr_rnic_open_req req; | ||
330 | struct c2wr_rnic_open_rep rep; | ||
331 | } __attribute__((packed)); | ||
332 | |||
333 | struct c2wr_rnic_query_req { | ||
334 | struct c2wr_hdr hdr; | ||
335 | u32 rnic_handle; | ||
336 | } __attribute__((packed)); | ||
337 | |||
338 | /* | ||
339 | * WR_RNIC_QUERY | ||
340 | */ | ||
341 | struct c2wr_rnic_query_rep { | ||
342 | struct c2wr_hdr hdr; | ||
343 | u64 user_context; | ||
344 | u32 vendor_id; | ||
345 | u32 part_number; | ||
346 | u32 hw_version; | ||
347 | u32 fw_ver_major; | ||
348 | u32 fw_ver_minor; | ||
349 | u32 fw_ver_patch; | ||
350 | char fw_ver_build_str[WR_BUILD_STR_LEN]; | ||
351 | u32 max_qps; | ||
352 | u32 max_qp_depth; | ||
353 | u32 max_srq_depth; | ||
354 | u32 max_send_sgl_depth; | ||
355 | u32 max_rdma_sgl_depth; | ||
356 | u32 max_cqs; | ||
357 | u32 max_cq_depth; | ||
358 | u32 max_cq_event_handlers; | ||
359 | u32 max_mrs; | ||
360 | u32 max_pbl_depth; | ||
361 | u32 max_pds; | ||
362 | u32 max_global_ird; | ||
363 | u32 max_global_ord; | ||
364 | u32 max_qp_ird; | ||
365 | u32 max_qp_ord; | ||
366 | u32 flags; | ||
367 | u32 max_mws; | ||
368 | u32 pbe_range_low; | ||
369 | u32 pbe_range_high; | ||
370 | u32 max_srqs; | ||
371 | u32 page_size; | ||
372 | } __attribute__((packed)); | ||
373 | |||
374 | union c2wr_rnic_query { | ||
375 | struct c2wr_rnic_query_req req; | ||
376 | struct c2wr_rnic_query_rep rep; | ||
377 | } __attribute__((packed)); | ||
378 | |||
379 | /* | ||
380 | * WR_RNIC_GETCONFIG | ||
381 | */ | ||
382 | |||
383 | struct c2wr_rnic_getconfig_req { | ||
384 | struct c2wr_hdr hdr; | ||
385 | u32 rnic_handle; | ||
386 | u32 option; /* see c2_getconfig_cmd_t */ | ||
387 | u64 reply_buf; | ||
388 | u32 reply_buf_len; | ||
389 | } __attribute__((packed)) ; | ||
390 | |||
391 | struct c2wr_rnic_getconfig_rep { | ||
392 | struct c2wr_hdr hdr; | ||
393 | u32 option; /* see c2_getconfig_cmd_t */ | ||
394 | u32 count_len; /* length of the number of addresses configured */ | ||
395 | } __attribute__((packed)) ; | ||
396 | |||
397 | union c2wr_rnic_getconfig { | ||
398 | struct c2wr_rnic_getconfig_req req; | ||
399 | struct c2wr_rnic_getconfig_rep rep; | ||
400 | } __attribute__((packed)) ; | ||
401 | |||
402 | /* | ||
403 | * WR_RNIC_SETCONFIG | ||
404 | */ | ||
405 | struct c2wr_rnic_setconfig_req { | ||
406 | struct c2wr_hdr hdr; | ||
407 | u32 rnic_handle; | ||
408 | u32 option; /* See c2_setconfig_cmd_t */ | ||
409 | /* variable data and pad. See c2_netaddr and c2_route */ | ||
410 | u8 data[0]; | ||
411 | } __attribute__((packed)) ; | ||
412 | |||
413 | struct c2wr_rnic_setconfig_rep { | ||
414 | struct c2wr_hdr hdr; | ||
415 | } __attribute__((packed)) ; | ||
416 | |||
417 | union c2wr_rnic_setconfig { | ||
418 | struct c2wr_rnic_setconfig_req req; | ||
419 | struct c2wr_rnic_setconfig_rep rep; | ||
420 | } __attribute__((packed)) ; | ||
421 | |||
422 | /* | ||
423 | * WR_RNIC_CLOSE | ||
424 | */ | ||
425 | struct c2wr_rnic_close_req { | ||
426 | struct c2wr_hdr hdr; | ||
427 | u32 rnic_handle; | ||
428 | } __attribute__((packed)) ; | ||
429 | |||
430 | struct c2wr_rnic_close_rep { | ||
431 | struct c2wr_hdr hdr; | ||
432 | } __attribute__((packed)) ; | ||
433 | |||
434 | union c2wr_rnic_close { | ||
435 | struct c2wr_rnic_close_req req; | ||
436 | struct c2wr_rnic_close_rep rep; | ||
437 | } __attribute__((packed)) ; | ||
438 | |||
439 | /* | ||
440 | *------------------------ CQ ------------------------ | ||
441 | */ | ||
442 | struct c2wr_cq_create_req { | ||
443 | struct c2wr_hdr hdr; | ||
444 | u64 shared_ht; | ||
445 | u64 user_context; | ||
446 | u64 msg_pool; | ||
447 | u32 rnic_handle; | ||
448 | u32 msg_size; | ||
449 | u32 depth; | ||
450 | } __attribute__((packed)) ; | ||
451 | |||
452 | struct c2wr_cq_create_rep { | ||
453 | struct c2wr_hdr hdr; | ||
454 | u32 mq_index; | ||
455 | u32 adapter_shared; | ||
456 | u32 cq_handle; | ||
457 | } __attribute__((packed)) ; | ||
458 | |||
459 | union c2wr_cq_create { | ||
460 | struct c2wr_cq_create_req req; | ||
461 | struct c2wr_cq_create_rep rep; | ||
462 | } __attribute__((packed)) ; | ||
463 | |||
464 | struct c2wr_cq_modify_req { | ||
465 | struct c2wr_hdr hdr; | ||
466 | u32 rnic_handle; | ||
467 | u32 cq_handle; | ||
468 | u32 new_depth; | ||
469 | u64 new_msg_pool; | ||
470 | } __attribute__((packed)) ; | ||
471 | |||
472 | struct c2wr_cq_modify_rep { | ||
473 | struct c2wr_hdr hdr; | ||
474 | } __attribute__((packed)) ; | ||
475 | |||
476 | union c2wr_cq_modify { | ||
477 | struct c2wr_cq_modify_req req; | ||
478 | struct c2wr_cq_modify_rep rep; | ||
479 | } __attribute__((packed)) ; | ||
480 | |||
481 | struct c2wr_cq_destroy_req { | ||
482 | struct c2wr_hdr hdr; | ||
483 | u32 rnic_handle; | ||
484 | u32 cq_handle; | ||
485 | } __attribute__((packed)) ; | ||
486 | |||
487 | struct c2wr_cq_destroy_rep { | ||
488 | struct c2wr_hdr hdr; | ||
489 | } __attribute__((packed)) ; | ||
490 | |||
491 | union c2wr_cq_destroy { | ||
492 | struct c2wr_cq_destroy_req req; | ||
493 | struct c2wr_cq_destroy_rep rep; | ||
494 | } __attribute__((packed)) ; | ||
495 | |||
496 | /* | ||
497 | *------------------------ PD ------------------------ | ||
498 | */ | ||
499 | struct c2wr_pd_alloc_req { | ||
500 | struct c2wr_hdr hdr; | ||
501 | u32 rnic_handle; | ||
502 | u32 pd_id; | ||
503 | } __attribute__((packed)) ; | ||
504 | |||
505 | struct c2wr_pd_alloc_rep { | ||
506 | struct c2wr_hdr hdr; | ||
507 | } __attribute__((packed)) ; | ||
508 | |||
509 | union c2wr_pd_alloc { | ||
510 | struct c2wr_pd_alloc_req req; | ||
511 | struct c2wr_pd_alloc_rep rep; | ||
512 | } __attribute__((packed)) ; | ||
513 | |||
514 | struct c2wr_pd_dealloc_req { | ||
515 | struct c2wr_hdr hdr; | ||
516 | u32 rnic_handle; | ||
517 | u32 pd_id; | ||
518 | } __attribute__((packed)) ; | ||
519 | |||
520 | struct c2wr_pd_dealloc_rep { | ||
521 | struct c2wr_hdr hdr; | ||
522 | } __attribute__((packed)) ; | ||
523 | |||
524 | union c2wr_pd_dealloc { | ||
525 | struct c2wr_pd_dealloc_req req; | ||
526 | struct c2wr_pd_dealloc_rep rep; | ||
527 | } __attribute__((packed)) ; | ||
528 | |||
529 | /* | ||
530 | *------------------------ SRQ ------------------------ | ||
531 | */ | ||
532 | struct c2wr_srq_create_req { | ||
533 | struct c2wr_hdr hdr; | ||
534 | u64 shared_ht; | ||
535 | u64 user_context; | ||
536 | u32 rnic_handle; | ||
537 | u32 srq_depth; | ||
538 | u32 srq_limit; | ||
539 | u32 sgl_depth; | ||
540 | u32 pd_id; | ||
541 | } __attribute__((packed)) ; | ||
542 | |||
543 | struct c2wr_srq_create_rep { | ||
544 | struct c2wr_hdr hdr; | ||
545 | u32 srq_depth; | ||
546 | u32 sgl_depth; | ||
547 | u32 msg_size; | ||
548 | u32 mq_index; | ||
549 | u32 mq_start; | ||
550 | u32 srq_handle; | ||
551 | } __attribute__((packed)) ; | ||
552 | |||
553 | union c2wr_srq_create { | ||
554 | struct c2wr_srq_create_req req; | ||
555 | struct c2wr_srq_create_rep rep; | ||
556 | } __attribute__((packed)) ; | ||
557 | |||
558 | struct c2wr_srq_destroy_req { | ||
559 | struct c2wr_hdr hdr; | ||
560 | u32 rnic_handle; | ||
561 | u32 srq_handle; | ||
562 | } __attribute__((packed)) ; | ||
563 | |||
564 | struct c2wr_srq_destroy_rep { | ||
565 | struct c2wr_hdr hdr; | ||
566 | } __attribute__((packed)) ; | ||
567 | |||
568 | union c2wr_srq_destroy { | ||
569 | struct c2wr_srq_destroy_req req; | ||
570 | struct c2wr_srq_destroy_rep rep; | ||
571 | } __attribute__((packed)) ; | ||
572 | |||
573 | /* | ||
574 | *------------------------ QP ------------------------ | ||
575 | */ | ||
576 | enum c2wr_qp_flags { | ||
577 | QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */ | ||
578 | QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */ | ||
579 | QP_MW_BIND = 0x00000004, /* MWs enabled */ | ||
580 | QP_ZERO_STAG = 0x00000008, /* enabled? */ | ||
581 | QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */ | ||
582 | QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */ | ||
583 | /* enabled? */ | ||
584 | }; | ||
585 | |||
586 | struct c2wr_qp_create_req { | ||
587 | struct c2wr_hdr hdr; | ||
588 | u64 shared_sq_ht; | ||
589 | u64 shared_rq_ht; | ||
590 | u64 user_context; | ||
591 | u32 rnic_handle; | ||
592 | u32 sq_cq_handle; | ||
593 | u32 rq_cq_handle; | ||
594 | u32 sq_depth; | ||
595 | u32 rq_depth; | ||
596 | u32 srq_handle; | ||
597 | u32 srq_limit; | ||
598 | u32 flags; /* see enum c2wr_qp_flags */ | ||
599 | u32 send_sgl_depth; | ||
600 | u32 recv_sgl_depth; | ||
601 | u32 rdma_write_sgl_depth; | ||
602 | u32 ord; | ||
603 | u32 ird; | ||
604 | u32 pd_id; | ||
605 | } __attribute__((packed)) ; | ||
606 | |||
607 | struct c2wr_qp_create_rep { | ||
608 | struct c2wr_hdr hdr; | ||
609 | u32 sq_depth; | ||
610 | u32 rq_depth; | ||
611 | u32 send_sgl_depth; | ||
612 | u32 recv_sgl_depth; | ||
613 | u32 rdma_write_sgl_depth; | ||
614 | u32 ord; | ||
615 | u32 ird; | ||
616 | u32 sq_msg_size; | ||
617 | u32 sq_mq_index; | ||
618 | u32 sq_mq_start; | ||
619 | u32 rq_msg_size; | ||
620 | u32 rq_mq_index; | ||
621 | u32 rq_mq_start; | ||
622 | u32 qp_handle; | ||
623 | } __attribute__((packed)) ; | ||
624 | |||
625 | union c2wr_qp_create { | ||
626 | struct c2wr_qp_create_req req; | ||
627 | struct c2wr_qp_create_rep rep; | ||
628 | } __attribute__((packed)) ; | ||
629 | |||
630 | struct c2wr_qp_query_req { | ||
631 | struct c2wr_hdr hdr; | ||
632 | u32 rnic_handle; | ||
633 | u32 qp_handle; | ||
634 | } __attribute__((packed)) ; | ||
635 | |||
636 | struct c2wr_qp_query_rep { | ||
637 | struct c2wr_hdr hdr; | ||
638 | u64 user_context; | ||
639 | u32 rnic_handle; | ||
640 | u32 sq_depth; | ||
641 | u32 rq_depth; | ||
642 | u32 send_sgl_depth; | ||
643 | u32 rdma_write_sgl_depth; | ||
644 | u32 recv_sgl_depth; | ||
645 | u32 ord; | ||
646 | u32 ird; | ||
647 | u16 qp_state; | ||
648 | u16 flags; /* see c2wr_qp_flags_t */ | ||
649 | u32 qp_id; | ||
650 | u32 local_addr; | ||
651 | u32 remote_addr; | ||
652 | u16 local_port; | ||
653 | u16 remote_port; | ||
654 | u32 terminate_msg_length; /* 0 if not present */ | ||
655 | u8 data[0]; | ||
656 | /* Terminate Message in-line here. */ | ||
657 | } __attribute__((packed)) ; | ||
658 | |||
659 | union c2wr_qp_query { | ||
660 | struct c2wr_qp_query_req req; | ||
661 | struct c2wr_qp_query_rep rep; | ||
662 | } __attribute__((packed)) ; | ||
663 | |||
664 | struct c2wr_qp_modify_req { | ||
665 | struct c2wr_hdr hdr; | ||
666 | u64 stream_msg; | ||
667 | u32 stream_msg_length; | ||
668 | u32 rnic_handle; | ||
669 | u32 qp_handle; | ||
670 | u32 next_qp_state; | ||
671 | u32 ord; | ||
672 | u32 ird; | ||
673 | u32 sq_depth; | ||
674 | u32 rq_depth; | ||
675 | u32 llp_ep_handle; | ||
676 | } __attribute__((packed)) ; | ||
677 | |||
678 | struct c2wr_qp_modify_rep { | ||
679 | struct c2wr_hdr hdr; | ||
680 | u32 ord; | ||
681 | u32 ird; | ||
682 | u32 sq_depth; | ||
683 | u32 rq_depth; | ||
684 | u32 sq_msg_size; | ||
685 | u32 sq_mq_index; | ||
686 | u32 sq_mq_start; | ||
687 | u32 rq_msg_size; | ||
688 | u32 rq_mq_index; | ||
689 | u32 rq_mq_start; | ||
690 | } __attribute__((packed)) ; | ||
691 | |||
692 | union c2wr_qp_modify { | ||
693 | struct c2wr_qp_modify_req req; | ||
694 | struct c2wr_qp_modify_rep rep; | ||
695 | } __attribute__((packed)) ; | ||
696 | |||
697 | struct c2wr_qp_destroy_req { | ||
698 | struct c2wr_hdr hdr; | ||
699 | u32 rnic_handle; | ||
700 | u32 qp_handle; | ||
701 | } __attribute__((packed)) ; | ||
702 | |||
703 | struct c2wr_qp_destroy_rep { | ||
704 | struct c2wr_hdr hdr; | ||
705 | } __attribute__((packed)) ; | ||
706 | |||
707 | union c2wr_qp_destroy { | ||
708 | struct c2wr_qp_destroy_req req; | ||
709 | struct c2wr_qp_destroy_rep rep; | ||
710 | } __attribute__((packed)) ; | ||
711 | |||
712 | /* | ||
713 | * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can | ||
714 | * only be posted when a QP is in IDLE state. After the connect request is | ||
715 | * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state. | ||
716 | * No synchronous reply from adapter to this WR. The results of | ||
717 | * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS | ||
718 | * See c2wr_ae_active_connect_results_t | ||
719 | */ | ||
720 | struct c2wr_qp_connect_req { | ||
721 | struct c2wr_hdr hdr; | ||
722 | u32 rnic_handle; | ||
723 | u32 qp_handle; | ||
724 | u32 remote_addr; | ||
725 | u16 remote_port; | ||
726 | u16 pad; | ||
727 | u32 private_data_length; | ||
728 | u8 private_data[0]; /* Private data in-line. */ | ||
729 | } __attribute__((packed)) ; | ||
730 | |||
731 | struct c2wr_qp_connect { | ||
732 | struct c2wr_qp_connect_req req; | ||
733 | /* no synchronous reply. */ | ||
734 | } __attribute__((packed)) ; | ||
735 | |||
736 | |||
737 | /* | ||
738 | *------------------------ MM ------------------------ | ||
739 | */ | ||
740 | |||
741 | struct c2wr_nsmr_stag_alloc_req { | ||
742 | struct c2wr_hdr hdr; | ||
743 | u32 rnic_handle; | ||
744 | u32 pbl_depth; | ||
745 | u32 pd_id; | ||
746 | u32 flags; | ||
747 | } __attribute__((packed)) ; | ||
748 | |||
749 | struct c2wr_nsmr_stag_alloc_rep { | ||
750 | struct c2wr_hdr hdr; | ||
751 | u32 pbl_depth; | ||
752 | u32 stag_index; | ||
753 | } __attribute__((packed)) ; | ||
754 | |||
755 | union c2wr_nsmr_stag_alloc { | ||
756 | struct c2wr_nsmr_stag_alloc_req req; | ||
757 | struct c2wr_nsmr_stag_alloc_rep rep; | ||
758 | } __attribute__((packed)) ; | ||
759 | |||
760 | struct c2wr_nsmr_register_req { | ||
761 | struct c2wr_hdr hdr; | ||
762 | u64 va; | ||
763 | u32 rnic_handle; | ||
764 | u16 flags; | ||
765 | u8 stag_key; | ||
766 | u8 pad; | ||
767 | u32 pd_id; | ||
768 | u32 pbl_depth; | ||
769 | u32 pbe_size; | ||
770 | u32 fbo; | ||
771 | u32 length; | ||
772 | u32 addrs_length; | ||
773 | /* array of paddrs (must be aligned on a 64bit boundary) */ | ||
774 | u64 paddrs[0]; | ||
775 | } __attribute__((packed)) ; | ||
776 | |||
777 | struct c2wr_nsmr_register_rep { | ||
778 | struct c2wr_hdr hdr; | ||
779 | u32 pbl_depth; | ||
780 | u32 stag_index; | ||
781 | } __attribute__((packed)) ; | ||
782 | |||
783 | union c2wr_nsmr_register { | ||
784 | struct c2wr_nsmr_register_req req; | ||
785 | struct c2wr_nsmr_register_rep rep; | ||
786 | } __attribute__((packed)) ; | ||
787 | |||
788 | struct c2wr_nsmr_pbl_req { | ||
789 | struct c2wr_hdr hdr; | ||
790 | u32 rnic_handle; | ||
791 | u32 flags; | ||
792 | u32 stag_index; | ||
793 | u32 addrs_length; | ||
794 | /* array of paddrs (must be aligned on a 64bit boundary) */ | ||
795 | u64 paddrs[0]; | ||
796 | } __attribute__((packed)) ; | ||
797 | |||
798 | struct c2wr_nsmr_pbl_rep { | ||
799 | struct c2wr_hdr hdr; | ||
800 | } __attribute__((packed)) ; | ||
801 | |||
802 | union c2wr_nsmr_pbl { | ||
803 | struct c2wr_nsmr_pbl_req req; | ||
804 | struct c2wr_nsmr_pbl_rep rep; | ||
805 | } __attribute__((packed)) ; | ||
806 | |||
807 | struct c2wr_mr_query_req { | ||
808 | struct c2wr_hdr hdr; | ||
809 | u32 rnic_handle; | ||
810 | u32 stag_index; | ||
811 | } __attribute__((packed)) ; | ||
812 | |||
813 | struct c2wr_mr_query_rep { | ||
814 | struct c2wr_hdr hdr; | ||
815 | u8 stag_key; | ||
816 | u8 pad[3]; | ||
817 | u32 pd_id; | ||
818 | u32 flags; | ||
819 | u32 pbl_depth; | ||
820 | } __attribute__((packed)) ; | ||
821 | |||
822 | union c2wr_mr_query { | ||
823 | struct c2wr_mr_query_req req; | ||
824 | struct c2wr_mr_query_rep rep; | ||
825 | } __attribute__((packed)) ; | ||
826 | |||
827 | struct c2wr_mw_query_req { | ||
828 | struct c2wr_hdr hdr; | ||
829 | u32 rnic_handle; | ||
830 | u32 stag_index; | ||
831 | } __attribute__((packed)) ; | ||
832 | |||
833 | struct c2wr_mw_query_rep { | ||
834 | struct c2wr_hdr hdr; | ||
835 | u8 stag_key; | ||
836 | u8 pad[3]; | ||
837 | u32 pd_id; | ||
838 | u32 flags; | ||
839 | } __attribute__((packed)) ; | ||
840 | |||
841 | union c2wr_mw_query { | ||
842 | struct c2wr_mw_query_req req; | ||
843 | struct c2wr_mw_query_rep rep; | ||
844 | } __attribute__((packed)) ; | ||
845 | |||
846 | |||
847 | struct c2wr_stag_dealloc_req { | ||
848 | struct c2wr_hdr hdr; | ||
849 | u32 rnic_handle; | ||
850 | u32 stag_index; | ||
851 | } __attribute__((packed)) ; | ||
852 | |||
853 | struct c2wr_stag_dealloc_rep { | ||
854 | struct c2wr_hdr hdr; | ||
855 | } __attribute__((packed)) ; | ||
856 | |||
857 | union c2wr_stag_dealloc { | ||
858 | struct c2wr_stag_dealloc_req req; | ||
859 | struct c2wr_stag_dealloc_rep rep; | ||
860 | } __attribute__((packed)) ; | ||
861 | |||
862 | struct c2wr_nsmr_reregister_req { | ||
863 | struct c2wr_hdr hdr; | ||
864 | u64 va; | ||
865 | u32 rnic_handle; | ||
866 | u16 flags; | ||
867 | u8 stag_key; | ||
868 | u8 pad; | ||
869 | u32 stag_index; | ||
870 | u32 pd_id; | ||
871 | u32 pbl_depth; | ||
872 | u32 pbe_size; | ||
873 | u32 fbo; | ||
874 | u32 length; | ||
875 | u32 addrs_length; | ||
876 | u32 pad1; | ||
877 | /* array of paddrs (must be aligned on a 64bit boundary) */ | ||
878 | u64 paddrs[0]; | ||
879 | } __attribute__((packed)) ; | ||
880 | |||
881 | struct c2wr_nsmr_reregister_rep { | ||
882 | struct c2wr_hdr hdr; | ||
883 | u32 pbl_depth; | ||
884 | u32 stag_index; | ||
885 | } __attribute__((packed)) ; | ||
886 | |||
887 | union c2wr_nsmr_reregister { | ||
888 | struct c2wr_nsmr_reregister_req req; | ||
889 | struct c2wr_nsmr_reregister_rep rep; | ||
890 | } __attribute__((packed)) ; | ||
891 | |||
892 | struct c2wr_smr_register_req { | ||
893 | struct c2wr_hdr hdr; | ||
894 | u64 va; | ||
895 | u32 rnic_handle; | ||
896 | u16 flags; | ||
897 | u8 stag_key; | ||
898 | u8 pad; | ||
899 | u32 stag_index; | ||
900 | u32 pd_id; | ||
901 | } __attribute__((packed)) ; | ||
902 | |||
903 | struct c2wr_smr_register_rep { | ||
904 | struct c2wr_hdr hdr; | ||
905 | u32 stag_index; | ||
906 | } __attribute__((packed)) ; | ||
907 | |||
908 | union c2wr_smr_register { | ||
909 | struct c2wr_smr_register_req req; | ||
910 | struct c2wr_smr_register_rep rep; | ||
911 | } __attribute__((packed)) ; | ||
912 | |||
913 | struct c2wr_mw_alloc_req { | ||
914 | struct c2wr_hdr hdr; | ||
915 | u32 rnic_handle; | ||
916 | u32 pd_id; | ||
917 | } __attribute__((packed)) ; | ||
918 | |||
919 | struct c2wr_mw_alloc_rep { | ||
920 | struct c2wr_hdr hdr; | ||
921 | u32 stag_index; | ||
922 | } __attribute__((packed)) ; | ||
923 | |||
924 | union c2wr_mw_alloc { | ||
925 | struct c2wr_mw_alloc_req req; | ||
926 | struct c2wr_mw_alloc_rep rep; | ||
927 | } __attribute__((packed)) ; | ||
928 | |||
929 | /* | ||
930 | *------------------------ WRs ----------------------- | ||
931 | */ | ||
932 | |||
933 | struct c2wr_user_hdr { | ||
934 | struct c2wr_hdr hdr; /* Has status and WR Type */ | ||
935 | } __attribute__((packed)) ; | ||
936 | |||
937 | enum c2_qp_state { | ||
938 | C2_QP_STATE_IDLE = 0x01, | ||
939 | C2_QP_STATE_CONNECTING = 0x02, | ||
940 | C2_QP_STATE_RTS = 0x04, | ||
941 | C2_QP_STATE_CLOSING = 0x08, | ||
942 | C2_QP_STATE_TERMINATE = 0x10, | ||
943 | C2_QP_STATE_ERROR = 0x20, | ||
944 | }; | ||
945 | |||
946 | /* Completion queue entry. */ | ||
947 | struct c2wr_ce { | ||
948 | struct c2wr_hdr hdr; /* Has status and WR Type */ | ||
949 | u64 qp_user_context; /* c2_user_qp_t * */ | ||
950 | u32 qp_state; /* Current QP State */ | ||
951 | u32 handle; /* QPID or EP Handle */ | ||
952 | u32 bytes_rcvd; /* valid for RECV WCs */ | ||
953 | u32 stag; | ||
954 | } __attribute__((packed)) ; | ||
955 | |||
956 | |||
957 | /* | ||
958 | * Flags used for all post-sq WRs. These must fit in the flags | ||
959 | * field of the struct c2wr_hdr (eight bits). | ||
960 | */ | ||
961 | enum { | ||
962 | SQ_SIGNALED = 0x01, | ||
963 | SQ_READ_FENCE = 0x02, | ||
964 | SQ_FENCE = 0x04, | ||
965 | }; | ||
966 | |||
967 | /* | ||
968 | * Common fields for all post-sq WRs. Namely the standard header and a | ||
969 | * secondary header with fields common to all post-sq WRs. | ||
970 | */ | ||
971 | struct c2_sq_hdr { | ||
972 | struct c2wr_user_hdr user_hdr; | ||
973 | } __attribute__((packed)); | ||
974 | |||
975 | /* | ||
976 | * Same as above but for post-rq WRs. | ||
977 | */ | ||
978 | struct c2_rq_hdr { | ||
979 | struct c2wr_user_hdr user_hdr; | ||
980 | } __attribute__((packed)); | ||
981 | |||
982 | /* | ||
983 | * use the same struct for all sends. | ||
984 | */ | ||
985 | struct c2wr_send_req { | ||
986 | struct c2_sq_hdr sq_hdr; | ||
987 | u32 sge_len; | ||
988 | u32 remote_stag; | ||
989 | u8 data[0]; /* SGE array */ | ||
990 | } __attribute__((packed)); | ||
991 | |||
992 | union c2wr_send { | ||
993 | struct c2wr_send_req req; | ||
994 | struct c2wr_ce rep; | ||
995 | } __attribute__((packed)); | ||
996 | |||
997 | struct c2wr_rdma_write_req { | ||
998 | struct c2_sq_hdr sq_hdr; | ||
999 | u64 remote_to; | ||
1000 | u32 remote_stag; | ||
1001 | u32 sge_len; | ||
1002 | u8 data[0]; /* SGE array */ | ||
1003 | } __attribute__((packed)); | ||
1004 | |||
1005 | union c2wr_rdma_write { | ||
1006 | struct c2wr_rdma_write_req req; | ||
1007 | struct c2wr_ce rep; | ||
1008 | } __attribute__((packed)); | ||
1009 | |||
1010 | struct c2wr_rdma_read_req { | ||
1011 | struct c2_sq_hdr sq_hdr; | ||
1012 | u64 local_to; | ||
1013 | u64 remote_to; | ||
1014 | u32 local_stag; | ||
1015 | u32 remote_stag; | ||
1016 | u32 length; | ||
1017 | } __attribute__((packed)); | ||
1018 | |||
1019 | union c2wr_rdma_read { | ||
1020 | struct c2wr_rdma_read_req req; | ||
1021 | struct c2wr_ce rep; | ||
1022 | } __attribute__((packed)); | ||
1023 | |||
1024 | struct c2wr_mw_bind_req { | ||
1025 | struct c2_sq_hdr sq_hdr; | ||
1026 | u64 va; | ||
1027 | u8 stag_key; | ||
1028 | u8 pad[3]; | ||
1029 | u32 mw_stag_index; | ||
1030 | u32 mr_stag_index; | ||
1031 | u32 length; | ||
1032 | u32 flags; | ||
1033 | } __attribute__((packed)); | ||
1034 | |||
1035 | union c2wr_mw_bind { | ||
1036 | struct c2wr_mw_bind_req req; | ||
1037 | struct c2wr_ce rep; | ||
1038 | } __attribute__((packed)); | ||
1039 | |||
1040 | struct c2wr_nsmr_fastreg_req { | ||
1041 | struct c2_sq_hdr sq_hdr; | ||
1042 | u64 va; | ||
1043 | u8 stag_key; | ||
1044 | u8 pad[3]; | ||
1045 | u32 stag_index; | ||
1046 | u32 pbe_size; | ||
1047 | u32 fbo; | ||
1048 | u32 length; | ||
1049 | u32 addrs_length; | ||
1050 | /* array of paddrs (must be aligned on a 64bit boundary) */ | ||
1051 | u64 paddrs[0]; | ||
1052 | } __attribute__((packed)); | ||
1053 | |||
1054 | union c2wr_nsmr_fastreg { | ||
1055 | struct c2wr_nsmr_fastreg_req req; | ||
1056 | struct c2wr_ce rep; | ||
1057 | } __attribute__((packed)); | ||
1058 | |||
1059 | struct c2wr_stag_invalidate_req { | ||
1060 | struct c2_sq_hdr sq_hdr; | ||
1061 | u8 stag_key; | ||
1062 | u8 pad[3]; | ||
1063 | u32 stag_index; | ||
1064 | } __attribute__((packed)); | ||
1065 | |||
1066 | union c2wr_stag_invalidate { | ||
1067 | struct c2wr_stag_invalidate_req req; | ||
1068 | struct c2wr_ce rep; | ||
1069 | } __attribute__((packed)); | ||
1070 | |||
1071 | union c2wr_sqwr { | ||
1072 | struct c2_sq_hdr sq_hdr; | ||
1073 | struct c2wr_send_req send; | ||
1074 | struct c2wr_send_req send_se; | ||
1075 | struct c2wr_send_req send_inv; | ||
1076 | struct c2wr_send_req send_se_inv; | ||
1077 | struct c2wr_rdma_write_req rdma_write; | ||
1078 | struct c2wr_rdma_read_req rdma_read; | ||
1079 | struct c2wr_mw_bind_req mw_bind; | ||
1080 | struct c2wr_nsmr_fastreg_req nsmr_fastreg; | ||
1081 | struct c2wr_stag_invalidate_req stag_inv; | ||
1082 | } __attribute__((packed)); | ||
1083 | |||
1084 | |||
1085 | /* | ||
1086 | * RQ WRs | ||
1087 | */ | ||
1088 | struct c2wr_rqwr { | ||
1089 | struct c2_rq_hdr rq_hdr; | ||
1090 | u8 data[0]; /* array of SGEs */ | ||
1091 | } __attribute__((packed)); | ||
1092 | |||
1093 | union c2wr_recv { | ||
1094 | struct c2wr_rqwr req; | ||
1095 | struct c2wr_ce rep; | ||
1096 | } __attribute__((packed)); | ||
1097 | |||
1098 | /* | ||
1099 | * All AEs start with this header. Most AEs only need to convey the | ||
1100 | * information in the header. Some, like LLP connection events, need | ||
1101 | * more info. The union typdef c2wr_ae_t has all the possible AEs. | ||
1102 | * | ||
1103 | * hdr.context is the user_context from the rnic_open WR. NULL If this | ||
1104 | * is not affiliated with an rnic | ||
1105 | * | ||
1106 | * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN, | ||
1107 | * CCAE_LLP_CLOSE_COMPLETE) | ||
1108 | * | ||
1109 | * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ | ||
1110 | * | ||
1111 | * user_context is the context passed down when the host created the resource. | ||
1112 | */ | ||
1113 | struct c2wr_ae_hdr { | ||
1114 | struct c2wr_hdr hdr; | ||
1115 | u64 user_context; /* user context for this res. */ | ||
1116 | u32 resource_type; /* see enum c2_resource_indicator */ | ||
1117 | u32 resource; /* handle for resource */ | ||
1118 | u32 qp_state; /* current QP State */ | ||
1119 | } __attribute__((packed)); | ||
1120 | |||
1121 | /* | ||
1122 | * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ, | ||
1123 | * the adapter moves the QP into RTS state | ||
1124 | */ | ||
1125 | struct c2wr_ae_active_connect_results { | ||
1126 | struct c2wr_ae_hdr ae_hdr; | ||
1127 | u32 laddr; | ||
1128 | u32 raddr; | ||
1129 | u16 lport; | ||
1130 | u16 rport; | ||
1131 | u32 private_data_length; | ||
1132 | u8 private_data[0]; /* data is in-line in the msg. */ | ||
1133 | } __attribute__((packed)); | ||
1134 | |||
1135 | /* | ||
1136 | * When connections are established by the stack (and the private data | ||
1137 | * MPA frame is received), the adapter will generate an event to the host. | ||
1138 | * The details of the connection, any private data, and the new connection | ||
1139 | * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the | ||
1140 | * AE queue: | ||
1141 | */ | ||
1142 | struct c2wr_ae_connection_request { | ||
1143 | struct c2wr_ae_hdr ae_hdr; | ||
1144 | u32 cr_handle; /* connreq handle (sock ptr) */ | ||
1145 | u32 laddr; | ||
1146 | u32 raddr; | ||
1147 | u16 lport; | ||
1148 | u16 rport; | ||
1149 | u32 private_data_length; | ||
1150 | u8 private_data[0]; /* data is in-line in the msg. */ | ||
1151 | } __attribute__((packed)); | ||
1152 | |||
1153 | union c2wr_ae { | ||
1154 | struct c2wr_ae_hdr ae_generic; | ||
1155 | struct c2wr_ae_active_connect_results ae_active_connect_results; | ||
1156 | struct c2wr_ae_connection_request ae_connection_request; | ||
1157 | } __attribute__((packed)); | ||
1158 | |||
1159 | struct c2wr_init_req { | ||
1160 | struct c2wr_hdr hdr; | ||
1161 | u64 hint_count; | ||
1162 | u64 q0_host_shared; | ||
1163 | u64 q1_host_shared; | ||
1164 | u64 q1_host_msg_pool; | ||
1165 | u64 q2_host_shared; | ||
1166 | u64 q2_host_msg_pool; | ||
1167 | } __attribute__((packed)); | ||
1168 | |||
1169 | struct c2wr_init_rep { | ||
1170 | struct c2wr_hdr hdr; | ||
1171 | } __attribute__((packed)); | ||
1172 | |||
1173 | union c2wr_init { | ||
1174 | struct c2wr_init_req req; | ||
1175 | struct c2wr_init_rep rep; | ||
1176 | } __attribute__((packed)); | ||
1177 | |||
1178 | /* | ||
1179 | * For upgrading flash. | ||
1180 | */ | ||
1181 | |||
1182 | struct c2wr_flash_init_req { | ||
1183 | struct c2wr_hdr hdr; | ||
1184 | u32 rnic_handle; | ||
1185 | } __attribute__((packed)); | ||
1186 | |||
1187 | struct c2wr_flash_init_rep { | ||
1188 | struct c2wr_hdr hdr; | ||
1189 | u32 adapter_flash_buf_offset; | ||
1190 | u32 adapter_flash_len; | ||
1191 | } __attribute__((packed)); | ||
1192 | |||
1193 | union c2wr_flash_init { | ||
1194 | struct c2wr_flash_init_req req; | ||
1195 | struct c2wr_flash_init_rep rep; | ||
1196 | } __attribute__((packed)); | ||
1197 | |||
1198 | struct c2wr_flash_req { | ||
1199 | struct c2wr_hdr hdr; | ||
1200 | u32 rnic_handle; | ||
1201 | u32 len; | ||
1202 | } __attribute__((packed)); | ||
1203 | |||
1204 | struct c2wr_flash_rep { | ||
1205 | struct c2wr_hdr hdr; | ||
1206 | u32 status; | ||
1207 | } __attribute__((packed)); | ||
1208 | |||
1209 | union c2wr_flash { | ||
1210 | struct c2wr_flash_req req; | ||
1211 | struct c2wr_flash_rep rep; | ||
1212 | } __attribute__((packed)); | ||
1213 | |||
1214 | struct c2wr_buf_alloc_req { | ||
1215 | struct c2wr_hdr hdr; | ||
1216 | u32 rnic_handle; | ||
1217 | u32 size; | ||
1218 | } __attribute__((packed)); | ||
1219 | |||
1220 | struct c2wr_buf_alloc_rep { | ||
1221 | struct c2wr_hdr hdr; | ||
1222 | u32 offset; /* 0 if mem not available */ | ||
1223 | u32 size; /* 0 if mem not available */ | ||
1224 | } __attribute__((packed)); | ||
1225 | |||
1226 | union c2wr_buf_alloc { | ||
1227 | struct c2wr_buf_alloc_req req; | ||
1228 | struct c2wr_buf_alloc_rep rep; | ||
1229 | } __attribute__((packed)); | ||
1230 | |||
1231 | struct c2wr_buf_free_req { | ||
1232 | struct c2wr_hdr hdr; | ||
1233 | u32 rnic_handle; | ||
1234 | u32 offset; /* Must match value from alloc */ | ||
1235 | u32 size; /* Must match value from alloc */ | ||
1236 | } __attribute__((packed)); | ||
1237 | |||
1238 | struct c2wr_buf_free_rep { | ||
1239 | struct c2wr_hdr hdr; | ||
1240 | } __attribute__((packed)); | ||
1241 | |||
1242 | union c2wr_buf_free { | ||
1243 | struct c2wr_buf_free_req req; | ||
1244 | struct c2wr_ce rep; | ||
1245 | } __attribute__((packed)); | ||
1246 | |||
1247 | struct c2wr_flash_write_req { | ||
1248 | struct c2wr_hdr hdr; | ||
1249 | u32 rnic_handle; | ||
1250 | u32 offset; | ||
1251 | u32 size; | ||
1252 | u32 type; | ||
1253 | u32 flags; | ||
1254 | } __attribute__((packed)); | ||
1255 | |||
1256 | struct c2wr_flash_write_rep { | ||
1257 | struct c2wr_hdr hdr; | ||
1258 | u32 status; | ||
1259 | } __attribute__((packed)); | ||
1260 | |||
1261 | union c2wr_flash_write { | ||
1262 | struct c2wr_flash_write_req req; | ||
1263 | struct c2wr_flash_write_rep rep; | ||
1264 | } __attribute__((packed)); | ||
1265 | |||
1266 | /* | ||
1267 | * Messages for LLP connection setup. | ||
1268 | */ | ||
1269 | |||
1270 | /* | ||
1271 | * Listen Request. This allocates a listening endpoint to allow passive | ||
1272 | * connection setup. Newly established LLP connections are passed up | ||
1273 | * via an AE. See c2wr_ae_connection_request_t | ||
1274 | */ | ||
1275 | struct c2wr_ep_listen_create_req { | ||
1276 | struct c2wr_hdr hdr; | ||
1277 | u64 user_context; /* returned in AEs. */ | ||
1278 | u32 rnic_handle; | ||
1279 | u32 local_addr; /* local addr, or 0 */ | ||
1280 | u16 local_port; /* 0 means "pick one" */ | ||
1281 | u16 pad; | ||
1282 | u32 backlog; /* tradional tcp listen bl */ | ||
1283 | } __attribute__((packed)); | ||
1284 | |||
1285 | struct c2wr_ep_listen_create_rep { | ||
1286 | struct c2wr_hdr hdr; | ||
1287 | u32 ep_handle; /* handle to new listening ep */ | ||
1288 | u16 local_port; /* resulting port... */ | ||
1289 | u16 pad; | ||
1290 | } __attribute__((packed)); | ||
1291 | |||
1292 | union c2wr_ep_listen_create { | ||
1293 | struct c2wr_ep_listen_create_req req; | ||
1294 | struct c2wr_ep_listen_create_rep rep; | ||
1295 | } __attribute__((packed)); | ||
1296 | |||
1297 | struct c2wr_ep_listen_destroy_req { | ||
1298 | struct c2wr_hdr hdr; | ||
1299 | u32 rnic_handle; | ||
1300 | u32 ep_handle; | ||
1301 | } __attribute__((packed)); | ||
1302 | |||
1303 | struct c2wr_ep_listen_destroy_rep { | ||
1304 | struct c2wr_hdr hdr; | ||
1305 | } __attribute__((packed)); | ||
1306 | |||
1307 | union c2wr_ep_listen_destroy { | ||
1308 | struct c2wr_ep_listen_destroy_req req; | ||
1309 | struct c2wr_ep_listen_destroy_rep rep; | ||
1310 | } __attribute__((packed)); | ||
1311 | |||
1312 | struct c2wr_ep_query_req { | ||
1313 | struct c2wr_hdr hdr; | ||
1314 | u32 rnic_handle; | ||
1315 | u32 ep_handle; | ||
1316 | } __attribute__((packed)); | ||
1317 | |||
1318 | struct c2wr_ep_query_rep { | ||
1319 | struct c2wr_hdr hdr; | ||
1320 | u32 rnic_handle; | ||
1321 | u32 local_addr; | ||
1322 | u32 remote_addr; | ||
1323 | u16 local_port; | ||
1324 | u16 remote_port; | ||
1325 | } __attribute__((packed)); | ||
1326 | |||
1327 | union c2wr_ep_query { | ||
1328 | struct c2wr_ep_query_req req; | ||
1329 | struct c2wr_ep_query_rep rep; | ||
1330 | } __attribute__((packed)); | ||
1331 | |||
1332 | |||
1333 | /* | ||
1334 | * The host passes this down to indicate acceptance of a pending iWARP | ||
1335 | * connection. The cr_handle was obtained from the CONNECTION_REQUEST | ||
1336 | * AE passed up by the adapter. See c2wr_ae_connection_request_t. | ||
1337 | */ | ||
1338 | struct c2wr_cr_accept_req { | ||
1339 | struct c2wr_hdr hdr; | ||
1340 | u32 rnic_handle; | ||
1341 | u32 qp_handle; /* QP to bind to this LLP conn */ | ||
1342 | u32 ep_handle; /* LLP handle to accept */ | ||
1343 | u32 private_data_length; | ||
1344 | u8 private_data[0]; /* data in-line in msg. */ | ||
1345 | } __attribute__((packed)); | ||
1346 | |||
1347 | /* | ||
1348 | * adapter sends reply when private data is successfully submitted to | ||
1349 | * the LLP. | ||
1350 | */ | ||
1351 | struct c2wr_cr_accept_rep { | ||
1352 | struct c2wr_hdr hdr; | ||
1353 | } __attribute__((packed)); | ||
1354 | |||
1355 | union c2wr_cr_accept { | ||
1356 | struct c2wr_cr_accept_req req; | ||
1357 | struct c2wr_cr_accept_rep rep; | ||
1358 | } __attribute__((packed)); | ||
1359 | |||
1360 | /* | ||
1361 | * The host sends this down if a given iWARP connection request was | ||
1362 | * rejected by the consumer. The cr_handle was obtained from a | ||
1363 | * previous c2wr_ae_connection_request_t AE sent by the adapter. | ||
1364 | */ | ||
1365 | struct c2wr_cr_reject_req { | ||
1366 | struct c2wr_hdr hdr; | ||
1367 | u32 rnic_handle; | ||
1368 | u32 ep_handle; /* LLP handle to reject */ | ||
1369 | } __attribute__((packed)); | ||
1370 | |||
1371 | /* | ||
1372 | * Dunno if this is needed, but we'll add it for now. The adapter will | ||
1373 | * send the reject_reply after the LLP endpoint has been destroyed. | ||
1374 | */ | ||
1375 | struct c2wr_cr_reject_rep { | ||
1376 | struct c2wr_hdr hdr; | ||
1377 | } __attribute__((packed)); | ||
1378 | |||
1379 | union c2wr_cr_reject { | ||
1380 | struct c2wr_cr_reject_req req; | ||
1381 | struct c2wr_cr_reject_rep rep; | ||
1382 | } __attribute__((packed)); | ||
1383 | |||
1384 | /* | ||
1385 | * console command. Used to implement a debug console over the verbs | ||
1386 | * request and reply queues. | ||
1387 | */ | ||
1388 | |||
1389 | /* | ||
1390 | * Console request message. It contains: | ||
1391 | * - message hdr with id = CCWR_CONSOLE | ||
1392 | * - the physaddr/len of host memory to be used for the reply. | ||
1393 | * - the command string. eg: "netstat -s" or "zoneinfo" | ||
1394 | */ | ||
1395 | struct c2wr_console_req { | ||
1396 | struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */ | ||
1397 | u64 reply_buf; /* pinned host buf for reply */ | ||
1398 | u32 reply_buf_len; /* length of reply buffer */ | ||
1399 | u8 command[0]; /* NUL terminated ascii string */ | ||
1400 | /* containing the command req */ | ||
1401 | } __attribute__((packed)); | ||
1402 | |||
1403 | /* | ||
1404 | * flags used in the console reply. | ||
1405 | */ | ||
1406 | enum c2_console_flags { | ||
1407 | CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */ | ||
1408 | } __attribute__((packed)); | ||
1409 | |||
1410 | /* | ||
1411 | * Console reply message. | ||
1412 | * hdr.result contains the c2_status_t error if the reply was _not_ generated, | ||
1413 | * or C2_OK if the reply was generated. | ||
1414 | */ | ||
1415 | struct c2wr_console_rep { | ||
1416 | struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */ | ||
1417 | u32 flags; | ||
1418 | } __attribute__((packed)); | ||
1419 | |||
1420 | union c2wr_console { | ||
1421 | struct c2wr_console_req req; | ||
1422 | struct c2wr_console_rep rep; | ||
1423 | } __attribute__((packed)); | ||
1424 | |||
1425 | |||
1426 | /* | ||
1427 | * Giant union with all WRs. Makes life easier... | ||
1428 | */ | ||
1429 | union c2wr { | ||
1430 | struct c2wr_hdr hdr; | ||
1431 | struct c2wr_user_hdr user_hdr; | ||
1432 | union c2wr_rnic_open rnic_open; | ||
1433 | union c2wr_rnic_query rnic_query; | ||
1434 | union c2wr_rnic_getconfig rnic_getconfig; | ||
1435 | union c2wr_rnic_setconfig rnic_setconfig; | ||
1436 | union c2wr_rnic_close rnic_close; | ||
1437 | union c2wr_cq_create cq_create; | ||
1438 | union c2wr_cq_modify cq_modify; | ||
1439 | union c2wr_cq_destroy cq_destroy; | ||
1440 | union c2wr_pd_alloc pd_alloc; | ||
1441 | union c2wr_pd_dealloc pd_dealloc; | ||
1442 | union c2wr_srq_create srq_create; | ||
1443 | union c2wr_srq_destroy srq_destroy; | ||
1444 | union c2wr_qp_create qp_create; | ||
1445 | union c2wr_qp_query qp_query; | ||
1446 | union c2wr_qp_modify qp_modify; | ||
1447 | union c2wr_qp_destroy qp_destroy; | ||
1448 | struct c2wr_qp_connect qp_connect; | ||
1449 | union c2wr_nsmr_stag_alloc nsmr_stag_alloc; | ||
1450 | union c2wr_nsmr_register nsmr_register; | ||
1451 | union c2wr_nsmr_pbl nsmr_pbl; | ||
1452 | union c2wr_mr_query mr_query; | ||
1453 | union c2wr_mw_query mw_query; | ||
1454 | union c2wr_stag_dealloc stag_dealloc; | ||
1455 | union c2wr_sqwr sqwr; | ||
1456 | struct c2wr_rqwr rqwr; | ||
1457 | struct c2wr_ce ce; | ||
1458 | union c2wr_ae ae; | ||
1459 | union c2wr_init init; | ||
1460 | union c2wr_ep_listen_create ep_listen_create; | ||
1461 | union c2wr_ep_listen_destroy ep_listen_destroy; | ||
1462 | union c2wr_cr_accept cr_accept; | ||
1463 | union c2wr_cr_reject cr_reject; | ||
1464 | union c2wr_console console; | ||
1465 | union c2wr_flash_init flash_init; | ||
1466 | union c2wr_flash flash; | ||
1467 | union c2wr_buf_alloc buf_alloc; | ||
1468 | union c2wr_buf_free buf_free; | ||
1469 | union c2wr_flash_write flash_write; | ||
1470 | } __attribute__((packed)); | ||
1471 | |||
1472 | |||
1473 | /* | ||
1474 | * Accessors for the wr fields that are packed together tightly to | ||
1475 | * reduce the wr message size. The wr arguments are void* so that | ||
1476 | * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types | ||
1477 | * in the struct c2wr union can be passed in. | ||
1478 | */ | ||
1479 | static __inline__ u8 c2_wr_get_id(void *wr) | ||
1480 | { | ||
1481 | return ((struct c2wr_hdr *) wr)->id; | ||
1482 | } | ||
1483 | static __inline__ void c2_wr_set_id(void *wr, u8 id) | ||
1484 | { | ||
1485 | ((struct c2wr_hdr *) wr)->id = id; | ||
1486 | } | ||
1487 | static __inline__ u8 c2_wr_get_result(void *wr) | ||
1488 | { | ||
1489 | return ((struct c2wr_hdr *) wr)->result; | ||
1490 | } | ||
1491 | static __inline__ void c2_wr_set_result(void *wr, u8 result) | ||
1492 | { | ||
1493 | ((struct c2wr_hdr *) wr)->result = result; | ||
1494 | } | ||
1495 | static __inline__ u8 c2_wr_get_flags(void *wr) | ||
1496 | { | ||
1497 | return ((struct c2wr_hdr *) wr)->flags; | ||
1498 | } | ||
1499 | static __inline__ void c2_wr_set_flags(void *wr, u8 flags) | ||
1500 | { | ||
1501 | ((struct c2wr_hdr *) wr)->flags = flags; | ||
1502 | } | ||
1503 | static __inline__ u8 c2_wr_get_sge_count(void *wr) | ||
1504 | { | ||
1505 | return ((struct c2wr_hdr *) wr)->sge_count; | ||
1506 | } | ||
1507 | static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count) | ||
1508 | { | ||
1509 | ((struct c2wr_hdr *) wr)->sge_count = sge_count; | ||
1510 | } | ||
1511 | static __inline__ u32 c2_wr_get_wqe_count(void *wr) | ||
1512 | { | ||
1513 | return ((struct c2wr_hdr *) wr)->wqe_count; | ||
1514 | } | ||
1515 | static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count) | ||
1516 | { | ||
1517 | ((struct c2wr_hdr *) wr)->wqe_count = wqe_count; | ||
1518 | } | ||
1519 | |||
1520 | #endif /* _C2_WR_H_ */ | ||