diff options
Diffstat (limited to 'drivers/ntb/ntb_hw.c')
-rw-r--r-- | drivers/ntb/ntb_hw.c | 1141 |
1 files changed, 1141 insertions, 0 deletions
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c new file mode 100644 index 000000000000..f802e7c92356 --- /dev/null +++ b/drivers/ntb/ntb_hw.c | |||
@@ -0,0 +1,1141 @@ | |||
1 | /* | ||
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of version 2 of the GNU General Public License as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * BSD LICENSE | ||
14 | * | ||
15 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | ||
16 | * | ||
17 | * Redistribution and use in source and binary forms, with or without | ||
18 | * modification, are permitted provided that the following conditions | ||
19 | * are met: | ||
20 | * | ||
21 | * * Redistributions of source code must retain the above copyright | ||
22 | * notice, this list of conditions and the following disclaimer. | ||
23 | * * Redistributions in binary form must reproduce the above copy | ||
24 | * notice, this list of conditions and the following disclaimer in | ||
25 | * the documentation and/or other materials provided with the | ||
26 | * distribution. | ||
27 | * * Neither the name of Intel Corporation nor the names of its | ||
28 | * contributors may be used to endorse or promote products derived | ||
29 | * from this software without specific prior written permission. | ||
30 | * | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
36 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
37 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
38 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
39 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
41 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
42 | * | ||
43 | * Intel PCIe NTB Linux driver | ||
44 | * | ||
45 | * Contact Information: | ||
46 | * Jon Mason <jon.mason@intel.com> | ||
47 | */ | ||
48 | #include <linux/debugfs.h> | ||
49 | #include <linux/init.h> | ||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/module.h> | ||
52 | #include <linux/pci.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include "ntb_hw.h" | ||
55 | #include "ntb_regs.h" | ||
56 | |||
57 | #define NTB_NAME "Intel(R) PCI-E Non-Transparent Bridge Driver" | ||
58 | #define NTB_VER "0.25" | ||
59 | |||
60 | MODULE_DESCRIPTION(NTB_NAME); | ||
61 | MODULE_VERSION(NTB_VER); | ||
62 | MODULE_LICENSE("Dual BSD/GPL"); | ||
63 | MODULE_AUTHOR("Intel Corporation"); | ||
64 | |||
65 | enum { | ||
66 | NTB_CONN_CLASSIC = 0, | ||
67 | NTB_CONN_B2B, | ||
68 | NTB_CONN_RP, | ||
69 | }; | ||
70 | |||
71 | enum { | ||
72 | NTB_DEV_USD = 0, | ||
73 | NTB_DEV_DSD, | ||
74 | }; | ||
75 | |||
76 | enum { | ||
77 | SNB_HW = 0, | ||
78 | BWD_HW, | ||
79 | }; | ||
80 | |||
81 | /* Translate memory window 0,1 to BAR 2,4 */ | ||
82 | #define MW_TO_BAR(mw) (mw * 2 + 2) | ||
83 | |||
84 | static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = { | ||
85 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)}, | ||
86 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, | ||
87 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)}, | ||
88 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)}, | ||
89 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)}, | ||
90 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, | ||
91 | {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)}, | ||
92 | {0} | ||
93 | }; | ||
94 | MODULE_DEVICE_TABLE(pci, ntb_pci_tbl); | ||
95 | |||
96 | /** | ||
97 | * ntb_register_event_callback() - register event callback | ||
98 | * @ndev: pointer to ntb_device instance | ||
99 | * @func: callback function to register | ||
100 | * | ||
101 | * This function registers a callback for any HW driver events such as link | ||
102 | * up/down, power management notices and etc. | ||
103 | * | ||
104 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
105 | */ | ||
106 | int ntb_register_event_callback(struct ntb_device *ndev, | ||
107 | void (*func)(void *handle, enum ntb_hw_event event)) | ||
108 | { | ||
109 | if (ndev->event_cb) | ||
110 | return -EINVAL; | ||
111 | |||
112 | ndev->event_cb = func; | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * ntb_unregister_event_callback() - unregisters the event callback | ||
119 | * @ndev: pointer to ntb_device instance | ||
120 | * | ||
121 | * This function unregisters the existing callback from transport | ||
122 | */ | ||
123 | void ntb_unregister_event_callback(struct ntb_device *ndev) | ||
124 | { | ||
125 | ndev->event_cb = NULL; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * ntb_register_db_callback() - register a callback for doorbell interrupt | ||
130 | * @ndev: pointer to ntb_device instance | ||
131 | * @idx: doorbell index to register callback, zero based | ||
132 | * @func: callback function to register | ||
133 | * | ||
134 | * This function registers a callback function for the doorbell interrupt | ||
135 | * on the primary side. The function will unmask the doorbell as well to | ||
136 | * allow interrupt. | ||
137 | * | ||
138 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
139 | */ | ||
140 | int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, | ||
141 | void *data, void (*func)(void *data, int db_num)) | ||
142 | { | ||
143 | unsigned long mask; | ||
144 | |||
145 | if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) { | ||
146 | dev_warn(&ndev->pdev->dev, "Invalid Index.\n"); | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
150 | ndev->db_cb[idx].callback = func; | ||
151 | ndev->db_cb[idx].data = data; | ||
152 | |||
153 | /* unmask interrupt */ | ||
154 | mask = readw(ndev->reg_ofs.pdb_mask); | ||
155 | clear_bit(idx * ndev->bits_per_vector, &mask); | ||
156 | writew(mask, ndev->reg_ofs.pdb_mask); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt | ||
163 | * @ndev: pointer to ntb_device instance | ||
164 | * @idx: doorbell index to register callback, zero based | ||
165 | * | ||
166 | * This function unregisters a callback function for the doorbell interrupt | ||
167 | * on the primary side. The function will also mask the said doorbell. | ||
168 | */ | ||
169 | void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx) | ||
170 | { | ||
171 | unsigned long mask; | ||
172 | |||
173 | if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback) | ||
174 | return; | ||
175 | |||
176 | mask = readw(ndev->reg_ofs.pdb_mask); | ||
177 | set_bit(idx * ndev->bits_per_vector, &mask); | ||
178 | writew(mask, ndev->reg_ofs.pdb_mask); | ||
179 | |||
180 | ndev->db_cb[idx].callback = NULL; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * ntb_find_transport() - find the transport pointer | ||
185 | * @transport: pointer to pci device | ||
186 | * | ||
187 | * Given the pci device pointer, return the transport pointer passed in when | ||
188 | * the transport attached when it was inited. | ||
189 | * | ||
190 | * RETURNS: pointer to transport. | ||
191 | */ | ||
192 | void *ntb_find_transport(struct pci_dev *pdev) | ||
193 | { | ||
194 | struct ntb_device *ndev = pci_get_drvdata(pdev); | ||
195 | return ndev->ntb_transport; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * ntb_register_transport() - Register NTB transport with NTB HW driver | ||
200 | * @transport: transport identifier | ||
201 | * | ||
202 | * This function allows a transport to reserve the hardware driver for | ||
203 | * NTB usage. | ||
204 | * | ||
205 | * RETURNS: pointer to ntb_device, NULL on error. | ||
206 | */ | ||
207 | struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport) | ||
208 | { | ||
209 | struct ntb_device *ndev = pci_get_drvdata(pdev); | ||
210 | |||
211 | if (ndev->ntb_transport) | ||
212 | return NULL; | ||
213 | |||
214 | ndev->ntb_transport = transport; | ||
215 | return ndev; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * ntb_unregister_transport() - Unregister the transport with the NTB HW driver | ||
220 | * @ndev - ntb_device of the transport to be freed | ||
221 | * | ||
222 | * This function unregisters the transport from the HW driver and performs any | ||
223 | * necessary cleanups. | ||
224 | */ | ||
225 | void ntb_unregister_transport(struct ntb_device *ndev) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | if (!ndev->ntb_transport) | ||
230 | return; | ||
231 | |||
232 | for (i = 0; i < ndev->max_cbs; i++) | ||
233 | ntb_unregister_db_callback(ndev, i); | ||
234 | |||
235 | ntb_unregister_event_callback(ndev); | ||
236 | ndev->ntb_transport = NULL; | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * ntb_write_local_spad() - write to the secondary scratchpad register | ||
241 | * @ndev: pointer to ntb_device instance | ||
242 | * @idx: index to the scratchpad register, 0 based | ||
243 | * @val: the data value to put into the register | ||
244 | * | ||
245 | * This function allows writing of a 32bit value to the indexed scratchpad | ||
246 | * register. This writes over the data mirrored to the local scratchpad register | ||
247 | * by the remote system. | ||
248 | * | ||
249 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
250 | */ | ||
251 | int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val) | ||
252 | { | ||
253 | if (idx >= ndev->limits.max_spads) | ||
254 | return -EINVAL; | ||
255 | |||
256 | dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n", | ||
257 | val, idx); | ||
258 | writel(val, ndev->reg_ofs.spad_read + idx * 4); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * ntb_read_local_spad() - read from the primary scratchpad register | ||
265 | * @ndev: pointer to ntb_device instance | ||
266 | * @idx: index to scratchpad register, 0 based | ||
267 | * @val: pointer to 32bit integer for storing the register value | ||
268 | * | ||
269 | * This function allows reading of the 32bit scratchpad register on | ||
270 | * the primary (internal) side. This allows the local system to read data | ||
271 | * written and mirrored to the scratchpad register by the remote system. | ||
272 | * | ||
273 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
274 | */ | ||
275 | int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) | ||
276 | { | ||
277 | if (idx >= ndev->limits.max_spads) | ||
278 | return -EINVAL; | ||
279 | |||
280 | *val = readl(ndev->reg_ofs.spad_write + idx * 4); | ||
281 | dev_dbg(&ndev->pdev->dev, | ||
282 | "Reading %x from local scratch pad index %d\n", *val, idx); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * ntb_write_remote_spad() - write to the secondary scratchpad register | ||
289 | * @ndev: pointer to ntb_device instance | ||
290 | * @idx: index to the scratchpad register, 0 based | ||
291 | * @val: the data value to put into the register | ||
292 | * | ||
293 | * This function allows writing of a 32bit value to the indexed scratchpad | ||
294 | * register. The register resides on the secondary (external) side. This allows | ||
295 | * the local system to write data to be mirrored to the remote systems | ||
296 | * scratchpad register. | ||
297 | * | ||
298 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
299 | */ | ||
300 | int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val) | ||
301 | { | ||
302 | if (idx >= ndev->limits.max_spads) | ||
303 | return -EINVAL; | ||
304 | |||
305 | dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n", | ||
306 | val, idx); | ||
307 | writel(val, ndev->reg_ofs.spad_write + idx * 4); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * ntb_read_remote_spad() - read from the primary scratchpad register | ||
314 | * @ndev: pointer to ntb_device instance | ||
315 | * @idx: index to scratchpad register, 0 based | ||
316 | * @val: pointer to 32bit integer for storing the register value | ||
317 | * | ||
318 | * This function allows reading of the 32bit scratchpad register on | ||
319 | * the primary (internal) side. This alloows the local system to read the data | ||
320 | * it wrote to be mirrored on the remote system. | ||
321 | * | ||
322 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
323 | */ | ||
324 | int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) | ||
325 | { | ||
326 | if (idx >= ndev->limits.max_spads) | ||
327 | return -EINVAL; | ||
328 | |||
329 | *val = readl(ndev->reg_ofs.spad_read + idx * 4); | ||
330 | dev_dbg(&ndev->pdev->dev, | ||
331 | "Reading %x from remote scratch pad index %d\n", *val, idx); | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * ntb_get_mw_vbase() - get virtual addr for the NTB memory window | ||
338 | * @ndev: pointer to ntb_device instance | ||
339 | * @mw: memory window number | ||
340 | * | ||
341 | * This function provides the base virtual address of the memory window | ||
342 | * specified. | ||
343 | * | ||
344 | * RETURNS: pointer to virtual address, or NULL on error. | ||
345 | */ | ||
346 | void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) | ||
347 | { | ||
348 | if (mw > NTB_NUM_MW) | ||
349 | return NULL; | ||
350 | |||
351 | return ndev->mw[mw].vbase; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * ntb_get_mw_size() - return size of NTB memory window | ||
356 | * @ndev: pointer to ntb_device instance | ||
357 | * @mw: memory window number | ||
358 | * | ||
359 | * This function provides the physical size of the memory window specified | ||
360 | * | ||
361 | * RETURNS: the size of the memory window or zero on error | ||
362 | */ | ||
363 | resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) | ||
364 | { | ||
365 | if (mw > NTB_NUM_MW) | ||
366 | return 0; | ||
367 | |||
368 | return ndev->mw[mw].bar_sz; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * ntb_set_mw_addr - set the memory window address | ||
373 | * @ndev: pointer to ntb_device instance | ||
374 | * @mw: memory window number | ||
375 | * @addr: base address for data | ||
376 | * | ||
377 | * This function sets the base physical address of the memory window. This | ||
378 | * memory address is where data from the remote system will be transfered into | ||
379 | * or out of depending on how the transport is configured. | ||
380 | */ | ||
381 | void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) | ||
382 | { | ||
383 | if (mw > NTB_NUM_MW) | ||
384 | return; | ||
385 | |||
386 | dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, | ||
387 | MW_TO_BAR(mw)); | ||
388 | |||
389 | ndev->mw[mw].phys_addr = addr; | ||
390 | |||
391 | switch (MW_TO_BAR(mw)) { | ||
392 | case NTB_BAR_23: | ||
393 | writeq(addr, ndev->reg_ofs.sbar2_xlat); | ||
394 | break; | ||
395 | case NTB_BAR_45: | ||
396 | writeq(addr, ndev->reg_ofs.sbar4_xlat); | ||
397 | break; | ||
398 | } | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * ntb_ring_sdb() - Set the doorbell on the secondary/external side | ||
403 | * @ndev: pointer to ntb_device instance | ||
404 | * @db: doorbell to ring | ||
405 | * | ||
406 | * This function allows triggering of a doorbell on the secondary/external | ||
407 | * side that will initiate an interrupt on the remote host | ||
408 | * | ||
409 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | ||
410 | */ | ||
411 | void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db) | ||
412 | { | ||
413 | dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db); | ||
414 | |||
415 | if (ndev->hw_type == BWD_HW) | ||
416 | writeq((u64) 1 << db, ndev->reg_ofs.sdb); | ||
417 | else | ||
418 | writew(((1 << ndev->bits_per_vector) - 1) << | ||
419 | (db * ndev->bits_per_vector), ndev->reg_ofs.sdb); | ||
420 | } | ||
421 | |||
422 | static void ntb_link_event(struct ntb_device *ndev, int link_state) | ||
423 | { | ||
424 | unsigned int event; | ||
425 | |||
426 | if (ndev->link_status == link_state) | ||
427 | return; | ||
428 | |||
429 | if (link_state == NTB_LINK_UP) { | ||
430 | u16 status; | ||
431 | |||
432 | dev_info(&ndev->pdev->dev, "Link Up\n"); | ||
433 | ndev->link_status = NTB_LINK_UP; | ||
434 | event = NTB_EVENT_HW_LINK_UP; | ||
435 | |||
436 | if (ndev->hw_type == BWD_HW) | ||
437 | status = readw(ndev->reg_ofs.lnk_stat); | ||
438 | else { | ||
439 | int rc = pci_read_config_word(ndev->pdev, | ||
440 | SNB_LINK_STATUS_OFFSET, | ||
441 | &status); | ||
442 | if (rc) | ||
443 | return; | ||
444 | } | ||
445 | dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n", | ||
446 | (status & NTB_LINK_WIDTH_MASK) >> 4, | ||
447 | (status & NTB_LINK_SPEED_MASK)); | ||
448 | } else { | ||
449 | dev_info(&ndev->pdev->dev, "Link Down\n"); | ||
450 | ndev->link_status = NTB_LINK_DOWN; | ||
451 | event = NTB_EVENT_HW_LINK_DOWN; | ||
452 | } | ||
453 | |||
454 | /* notify the upper layer if we have an event change */ | ||
455 | if (ndev->event_cb) | ||
456 | ndev->event_cb(ndev->ntb_transport, event); | ||
457 | } | ||
458 | |||
459 | static int ntb_link_status(struct ntb_device *ndev) | ||
460 | { | ||
461 | int link_state; | ||
462 | |||
463 | if (ndev->hw_type == BWD_HW) { | ||
464 | u32 ntb_cntl; | ||
465 | |||
466 | ntb_cntl = readl(ndev->reg_ofs.lnk_cntl); | ||
467 | if (ntb_cntl & BWD_CNTL_LINK_DOWN) | ||
468 | link_state = NTB_LINK_DOWN; | ||
469 | else | ||
470 | link_state = NTB_LINK_UP; | ||
471 | } else { | ||
472 | u16 status; | ||
473 | int rc; | ||
474 | |||
475 | rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET, | ||
476 | &status); | ||
477 | if (rc) | ||
478 | return rc; | ||
479 | |||
480 | if (status & NTB_LINK_STATUS_ACTIVE) | ||
481 | link_state = NTB_LINK_UP; | ||
482 | else | ||
483 | link_state = NTB_LINK_DOWN; | ||
484 | } | ||
485 | |||
486 | ntb_link_event(ndev, link_state); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | /* BWD doesn't have link status interrupt, poll on that platform */ | ||
492 | static void bwd_link_poll(struct work_struct *work) | ||
493 | { | ||
494 | struct ntb_device *ndev = container_of(work, struct ntb_device, | ||
495 | hb_timer.work); | ||
496 | unsigned long ts = jiffies; | ||
497 | |||
498 | /* If we haven't gotten an interrupt in a while, check the BWD link | ||
499 | * status bit | ||
500 | */ | ||
501 | if (ts > ndev->last_ts + NTB_HB_TIMEOUT) { | ||
502 | int rc = ntb_link_status(ndev); | ||
503 | if (rc) | ||
504 | dev_err(&ndev->pdev->dev, | ||
505 | "Error determining link status\n"); | ||
506 | } | ||
507 | |||
508 | schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT); | ||
509 | } | ||
510 | |||
511 | static int ntb_xeon_setup(struct ntb_device *ndev) | ||
512 | { | ||
513 | int rc; | ||
514 | u8 val; | ||
515 | |||
516 | ndev->hw_type = SNB_HW; | ||
517 | |||
518 | rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val); | ||
519 | if (rc) | ||
520 | return rc; | ||
521 | |||
522 | switch (val & SNB_PPD_CONN_TYPE) { | ||
523 | case NTB_CONN_B2B: | ||
524 | ndev->conn_type = NTB_CONN_B2B; | ||
525 | break; | ||
526 | case NTB_CONN_CLASSIC: | ||
527 | case NTB_CONN_RP: | ||
528 | default: | ||
529 | dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n"); | ||
530 | return -EINVAL; | ||
531 | } | ||
532 | |||
533 | if (val & SNB_PPD_DEV_TYPE) | ||
534 | ndev->dev_type = NTB_DEV_DSD; | ||
535 | else | ||
536 | ndev->dev_type = NTB_DEV_USD; | ||
537 | |||
538 | ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET; | ||
539 | ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET; | ||
540 | ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET; | ||
541 | ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET; | ||
542 | ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET; | ||
543 | ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET; | ||
544 | ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET; | ||
545 | ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET; | ||
546 | |||
547 | if (ndev->conn_type == NTB_CONN_B2B) { | ||
548 | ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET; | ||
549 | ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET; | ||
550 | ndev->limits.max_spads = SNB_MAX_SPADS; | ||
551 | } else { | ||
552 | ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET; | ||
553 | ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET; | ||
554 | ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS; | ||
555 | } | ||
556 | |||
557 | ndev->limits.max_db_bits = SNB_MAX_DB_BITS; | ||
558 | ndev->limits.msix_cnt = SNB_MSIX_CNT; | ||
559 | ndev->bits_per_vector = SNB_DB_BITS_PER_VEC; | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | static int ntb_bwd_setup(struct ntb_device *ndev) | ||
565 | { | ||
566 | int rc; | ||
567 | u32 val; | ||
568 | |||
569 | ndev->hw_type = BWD_HW; | ||
570 | |||
571 | rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val); | ||
572 | if (rc) | ||
573 | return rc; | ||
574 | |||
575 | switch ((val & BWD_PPD_CONN_TYPE) >> 8) { | ||
576 | case NTB_CONN_B2B: | ||
577 | ndev->conn_type = NTB_CONN_B2B; | ||
578 | break; | ||
579 | case NTB_CONN_RP: | ||
580 | default: | ||
581 | dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n"); | ||
582 | return -EINVAL; | ||
583 | } | ||
584 | |||
585 | if (val & BWD_PPD_DEV_TYPE) | ||
586 | ndev->dev_type = NTB_DEV_DSD; | ||
587 | else | ||
588 | ndev->dev_type = NTB_DEV_USD; | ||
589 | |||
590 | /* Initiate PCI-E link training */ | ||
591 | rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET, | ||
592 | val | BWD_PPD_INIT_LINK); | ||
593 | if (rc) | ||
594 | return rc; | ||
595 | |||
596 | ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET; | ||
597 | ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET; | ||
598 | ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET; | ||
599 | ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET; | ||
600 | ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET; | ||
601 | ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET; | ||
602 | ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET; | ||
603 | ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET; | ||
604 | |||
605 | if (ndev->conn_type == NTB_CONN_B2B) { | ||
606 | ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET; | ||
607 | ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET; | ||
608 | ndev->limits.max_spads = BWD_MAX_SPADS; | ||
609 | } else { | ||
610 | ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET; | ||
611 | ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET; | ||
612 | ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS; | ||
613 | } | ||
614 | |||
615 | ndev->limits.max_db_bits = BWD_MAX_DB_BITS; | ||
616 | ndev->limits.msix_cnt = BWD_MSIX_CNT; | ||
617 | ndev->bits_per_vector = BWD_DB_BITS_PER_VEC; | ||
618 | |||
619 | /* Since bwd doesn't have a link interrupt, setup a poll timer */ | ||
620 | INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll); | ||
621 | schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT); | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int ntb_device_setup(struct ntb_device *ndev) | ||
627 | { | ||
628 | int rc; | ||
629 | |||
630 | switch (ndev->pdev->device) { | ||
631 | case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB: | ||
632 | case PCI_DEVICE_ID_INTEL_NTB_RP_JSF: | ||
633 | case PCI_DEVICE_ID_INTEL_NTB_RP_SNB: | ||
634 | case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF: | ||
635 | case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB: | ||
636 | case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: | ||
637 | case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: | ||
638 | rc = ntb_xeon_setup(ndev); | ||
639 | break; | ||
640 | case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD: | ||
641 | rc = ntb_bwd_setup(ndev); | ||
642 | break; | ||
643 | default: | ||
644 | rc = -ENODEV; | ||
645 | } | ||
646 | |||
647 | /* Enable Bus Master and Memory Space on the secondary side */ | ||
648 | writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd); | ||
649 | |||
650 | return rc; | ||
651 | } | ||
652 | |||
653 | static void ntb_device_free(struct ntb_device *ndev) | ||
654 | { | ||
655 | if (ndev->hw_type == BWD_HW) | ||
656 | cancel_delayed_work_sync(&ndev->hb_timer); | ||
657 | } | ||
658 | |||
659 | static irqreturn_t bwd_callback_msix_irq(int irq, void *data) | ||
660 | { | ||
661 | struct ntb_db_cb *db_cb = data; | ||
662 | struct ntb_device *ndev = db_cb->ndev; | ||
663 | |||
664 | dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, | ||
665 | db_cb->db_num); | ||
666 | |||
667 | if (db_cb->callback) | ||
668 | db_cb->callback(db_cb->data, db_cb->db_num); | ||
669 | |||
670 | /* No need to check for the specific HB irq, any interrupt means | ||
671 | * we're connected. | ||
672 | */ | ||
673 | ndev->last_ts = jiffies; | ||
674 | |||
675 | writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb); | ||
676 | |||
677 | return IRQ_HANDLED; | ||
678 | } | ||
679 | |||
680 | static irqreturn_t xeon_callback_msix_irq(int irq, void *data) | ||
681 | { | ||
682 | struct ntb_db_cb *db_cb = data; | ||
683 | struct ntb_device *ndev = db_cb->ndev; | ||
684 | |||
685 | dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, | ||
686 | db_cb->db_num); | ||
687 | |||
688 | if (db_cb->callback) | ||
689 | db_cb->callback(db_cb->data, db_cb->db_num); | ||
690 | |||
691 | /* On Sandybridge, there are 16 bits in the interrupt register | ||
692 | * but only 4 vectors. So, 5 bits are assigned to the first 3 | ||
693 | * vectors, with the 4th having a single bit for link | ||
694 | * interrupts. | ||
695 | */ | ||
696 | writew(((1 << ndev->bits_per_vector) - 1) << | ||
697 | (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb); | ||
698 | |||
699 | return IRQ_HANDLED; | ||
700 | } | ||
701 | |||
702 | /* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */ | ||
703 | static irqreturn_t xeon_event_msix_irq(int irq, void *dev) | ||
704 | { | ||
705 | struct ntb_device *ndev = dev; | ||
706 | int rc; | ||
707 | |||
708 | dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq); | ||
709 | |||
710 | rc = ntb_link_status(ndev); | ||
711 | if (rc) | ||
712 | dev_err(&ndev->pdev->dev, "Error determining link status\n"); | ||
713 | |||
714 | /* bit 15 is always the link bit */ | ||
715 | writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb); | ||
716 | |||
717 | return IRQ_HANDLED; | ||
718 | } | ||
719 | |||
720 | static irqreturn_t ntb_interrupt(int irq, void *dev) | ||
721 | { | ||
722 | struct ntb_device *ndev = dev; | ||
723 | unsigned int i = 0; | ||
724 | |||
725 | if (ndev->hw_type == BWD_HW) { | ||
726 | u64 pdb = readq(ndev->reg_ofs.pdb); | ||
727 | |||
728 | dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb); | ||
729 | |||
730 | while (pdb) { | ||
731 | i = __ffs(pdb); | ||
732 | pdb &= pdb - 1; | ||
733 | bwd_callback_msix_irq(irq, &ndev->db_cb[i]); | ||
734 | } | ||
735 | } else { | ||
736 | u16 pdb = readw(ndev->reg_ofs.pdb); | ||
737 | |||
738 | dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq, | ||
739 | pdb, readw(ndev->reg_ofs.sdb)); | ||
740 | |||
741 | if (pdb & SNB_DB_HW_LINK) { | ||
742 | xeon_event_msix_irq(irq, dev); | ||
743 | pdb &= ~SNB_DB_HW_LINK; | ||
744 | } | ||
745 | |||
746 | while (pdb) { | ||
747 | i = __ffs(pdb); | ||
748 | pdb &= pdb - 1; | ||
749 | xeon_callback_msix_irq(irq, &ndev->db_cb[i]); | ||
750 | } | ||
751 | } | ||
752 | |||
753 | return IRQ_HANDLED; | ||
754 | } | ||
755 | |||
756 | static int ntb_setup_msix(struct ntb_device *ndev) | ||
757 | { | ||
758 | struct pci_dev *pdev = ndev->pdev; | ||
759 | struct msix_entry *msix; | ||
760 | int msix_entries; | ||
761 | int rc, i, pos; | ||
762 | u16 val; | ||
763 | |||
764 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
765 | if (!pos) { | ||
766 | rc = -EIO; | ||
767 | goto err; | ||
768 | } | ||
769 | |||
770 | rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val); | ||
771 | if (rc) | ||
772 | goto err; | ||
773 | |||
774 | msix_entries = msix_table_size(val); | ||
775 | if (msix_entries > ndev->limits.msix_cnt) { | ||
776 | rc = -EINVAL; | ||
777 | goto err; | ||
778 | } | ||
779 | |||
780 | ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries, | ||
781 | GFP_KERNEL); | ||
782 | if (!ndev->msix_entries) { | ||
783 | rc = -ENOMEM; | ||
784 | goto err; | ||
785 | } | ||
786 | |||
787 | for (i = 0; i < msix_entries; i++) | ||
788 | ndev->msix_entries[i].entry = i; | ||
789 | |||
790 | rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); | ||
791 | if (rc < 0) | ||
792 | goto err1; | ||
793 | if (rc > 0) { | ||
794 | /* On SNB, the link interrupt is always tied to 4th vector. If | ||
795 | * we can't get all 4, then we can't use MSI-X. | ||
796 | */ | ||
797 | if (ndev->hw_type != BWD_HW) { | ||
798 | rc = -EIO; | ||
799 | goto err1; | ||
800 | } | ||
801 | |||
802 | dev_warn(&pdev->dev, | ||
803 | "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", | ||
804 | rc); | ||
805 | msix_entries = rc; | ||
806 | } | ||
807 | |||
808 | for (i = 0; i < msix_entries; i++) { | ||
809 | msix = &ndev->msix_entries[i]; | ||
810 | WARN_ON(!msix->vector); | ||
811 | |||
812 | /* Use the last MSI-X vector for Link status */ | ||
813 | if (ndev->hw_type == BWD_HW) { | ||
814 | rc = request_irq(msix->vector, bwd_callback_msix_irq, 0, | ||
815 | "ntb-callback-msix", &ndev->db_cb[i]); | ||
816 | if (rc) | ||
817 | goto err2; | ||
818 | } else { | ||
819 | if (i == msix_entries - 1) { | ||
820 | rc = request_irq(msix->vector, | ||
821 | xeon_event_msix_irq, 0, | ||
822 | "ntb-event-msix", ndev); | ||
823 | if (rc) | ||
824 | goto err2; | ||
825 | } else { | ||
826 | rc = request_irq(msix->vector, | ||
827 | xeon_callback_msix_irq, 0, | ||
828 | "ntb-callback-msix", | ||
829 | &ndev->db_cb[i]); | ||
830 | if (rc) | ||
831 | goto err2; | ||
832 | } | ||
833 | } | ||
834 | } | ||
835 | |||
836 | ndev->num_msix = msix_entries; | ||
837 | if (ndev->hw_type == BWD_HW) | ||
838 | ndev->max_cbs = msix_entries; | ||
839 | else | ||
840 | ndev->max_cbs = msix_entries - 1; | ||
841 | |||
842 | return 0; | ||
843 | |||
844 | err2: | ||
845 | while (--i >= 0) { | ||
846 | msix = &ndev->msix_entries[i]; | ||
847 | if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1) | ||
848 | free_irq(msix->vector, ndev); | ||
849 | else | ||
850 | free_irq(msix->vector, &ndev->db_cb[i]); | ||
851 | } | ||
852 | pci_disable_msix(pdev); | ||
853 | err1: | ||
854 | kfree(ndev->msix_entries); | ||
855 | dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n"); | ||
856 | err: | ||
857 | ndev->num_msix = 0; | ||
858 | return rc; | ||
859 | } | ||
860 | |||
861 | static int ntb_setup_msi(struct ntb_device *ndev) | ||
862 | { | ||
863 | struct pci_dev *pdev = ndev->pdev; | ||
864 | int rc; | ||
865 | |||
866 | rc = pci_enable_msi(pdev); | ||
867 | if (rc) | ||
868 | return rc; | ||
869 | |||
870 | rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev); | ||
871 | if (rc) { | ||
872 | pci_disable_msi(pdev); | ||
873 | dev_err(&pdev->dev, "Error allocating MSI interrupt\n"); | ||
874 | return rc; | ||
875 | } | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | static int ntb_setup_intx(struct ntb_device *ndev) | ||
881 | { | ||
882 | struct pci_dev *pdev = ndev->pdev; | ||
883 | int rc; | ||
884 | |||
885 | pci_msi_off(pdev); | ||
886 | |||
887 | /* Verify intx is enabled */ | ||
888 | pci_intx(pdev, 1); | ||
889 | |||
890 | rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx", | ||
891 | ndev); | ||
892 | if (rc) | ||
893 | return rc; | ||
894 | |||
895 | return 0; | ||
896 | } | ||
897 | |||
898 | static int ntb_setup_interrupts(struct ntb_device *ndev) | ||
899 | { | ||
900 | int rc; | ||
901 | |||
902 | /* On BWD, disable all interrupts. On SNB, disable all but Link | ||
903 | * Interrupt. The rest will be unmasked as callbacks are registered. | ||
904 | */ | ||
905 | if (ndev->hw_type == BWD_HW) | ||
906 | writeq(~0, ndev->reg_ofs.pdb_mask); | ||
907 | else | ||
908 | writew(~(1 << ndev->limits.max_db_bits), | ||
909 | ndev->reg_ofs.pdb_mask); | ||
910 | |||
911 | rc = ntb_setup_msix(ndev); | ||
912 | if (!rc) | ||
913 | goto done; | ||
914 | |||
915 | ndev->bits_per_vector = 1; | ||
916 | ndev->max_cbs = ndev->limits.max_db_bits; | ||
917 | |||
918 | rc = ntb_setup_msi(ndev); | ||
919 | if (!rc) | ||
920 | goto done; | ||
921 | |||
922 | rc = ntb_setup_intx(ndev); | ||
923 | if (rc) { | ||
924 | dev_err(&ndev->pdev->dev, "no usable interrupts\n"); | ||
925 | return rc; | ||
926 | } | ||
927 | |||
928 | done: | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static void ntb_free_interrupts(struct ntb_device *ndev) | ||
933 | { | ||
934 | struct pci_dev *pdev = ndev->pdev; | ||
935 | |||
936 | /* mask interrupts */ | ||
937 | if (ndev->hw_type == BWD_HW) | ||
938 | writeq(~0, ndev->reg_ofs.pdb_mask); | ||
939 | else | ||
940 | writew(~0, ndev->reg_ofs.pdb_mask); | ||
941 | |||
942 | if (ndev->num_msix) { | ||
943 | struct msix_entry *msix; | ||
944 | u32 i; | ||
945 | |||
946 | for (i = 0; i < ndev->num_msix; i++) { | ||
947 | msix = &ndev->msix_entries[i]; | ||
948 | if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1) | ||
949 | free_irq(msix->vector, ndev); | ||
950 | else | ||
951 | free_irq(msix->vector, &ndev->db_cb[i]); | ||
952 | } | ||
953 | pci_disable_msix(pdev); | ||
954 | } else { | ||
955 | free_irq(pdev->irq, ndev); | ||
956 | |||
957 | if (pci_dev_msi_enabled(pdev)) | ||
958 | pci_disable_msi(pdev); | ||
959 | } | ||
960 | } | ||
961 | |||
962 | static int ntb_create_callbacks(struct ntb_device *ndev) | ||
963 | { | ||
964 | int i; | ||
965 | |||
966 | /* Checken-egg issue. We won't know how many callbacks are necessary | ||
967 | * until we see how many MSI-X vectors we get, but these pointers need | ||
968 | * to be passed into the MSI-X register fucntion. So, we allocate the | ||
969 | * max, knowing that they might not all be used, to work around this. | ||
970 | */ | ||
971 | ndev->db_cb = kcalloc(ndev->limits.max_db_bits, | ||
972 | sizeof(struct ntb_db_cb), | ||
973 | GFP_KERNEL); | ||
974 | if (!ndev->db_cb) | ||
975 | return -ENOMEM; | ||
976 | |||
977 | for (i = 0; i < ndev->limits.max_db_bits; i++) { | ||
978 | ndev->db_cb[i].db_num = i; | ||
979 | ndev->db_cb[i].ndev = ndev; | ||
980 | } | ||
981 | |||
982 | return 0; | ||
983 | } | ||
984 | |||
985 | static void ntb_free_callbacks(struct ntb_device *ndev) | ||
986 | { | ||
987 | int i; | ||
988 | |||
989 | for (i = 0; i < ndev->limits.max_db_bits; i++) | ||
990 | ntb_unregister_db_callback(ndev, i); | ||
991 | |||
992 | kfree(ndev->db_cb); | ||
993 | } | ||
994 | |||
995 | static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
996 | { | ||
997 | struct ntb_device *ndev; | ||
998 | int rc, i; | ||
999 | |||
1000 | ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL); | ||
1001 | if (!ndev) | ||
1002 | return -ENOMEM; | ||
1003 | |||
1004 | ndev->pdev = pdev; | ||
1005 | ndev->link_status = NTB_LINK_DOWN; | ||
1006 | pci_set_drvdata(pdev, ndev); | ||
1007 | |||
1008 | rc = pci_enable_device(pdev); | ||
1009 | if (rc) | ||
1010 | goto err; | ||
1011 | |||
1012 | pci_set_master(ndev->pdev); | ||
1013 | |||
1014 | rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME); | ||
1015 | if (rc) | ||
1016 | goto err1; | ||
1017 | |||
1018 | ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO); | ||
1019 | if (!ndev->reg_base) { | ||
1020 | dev_warn(&pdev->dev, "Cannot remap BAR 0\n"); | ||
1021 | rc = -EIO; | ||
1022 | goto err2; | ||
1023 | } | ||
1024 | |||
1025 | for (i = 0; i < NTB_NUM_MW; i++) { | ||
1026 | ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i)); | ||
1027 | ndev->mw[i].vbase = | ||
1028 | ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), | ||
1029 | ndev->mw[i].bar_sz); | ||
1030 | dev_info(&pdev->dev, "MW %d size %d\n", i, | ||
1031 | (u32) pci_resource_len(pdev, MW_TO_BAR(i))); | ||
1032 | if (!ndev->mw[i].vbase) { | ||
1033 | dev_warn(&pdev->dev, "Cannot remap BAR %d\n", | ||
1034 | MW_TO_BAR(i)); | ||
1035 | rc = -EIO; | ||
1036 | goto err3; | ||
1037 | } | ||
1038 | } | ||
1039 | |||
1040 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
1041 | if (rc) { | ||
1042 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1043 | if (rc) | ||
1044 | goto err3; | ||
1045 | |||
1046 | dev_warn(&pdev->dev, "Cannot DMA highmem\n"); | ||
1047 | } | ||
1048 | |||
1049 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
1050 | if (rc) { | ||
1051 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1052 | if (rc) | ||
1053 | goto err3; | ||
1054 | |||
1055 | dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); | ||
1056 | } | ||
1057 | |||
1058 | rc = ntb_device_setup(ndev); | ||
1059 | if (rc) | ||
1060 | goto err3; | ||
1061 | |||
1062 | rc = ntb_create_callbacks(ndev); | ||
1063 | if (rc) | ||
1064 | goto err4; | ||
1065 | |||
1066 | rc = ntb_setup_interrupts(ndev); | ||
1067 | if (rc) | ||
1068 | goto err5; | ||
1069 | |||
1070 | /* The scratchpad registers keep the values between rmmod/insmod, | ||
1071 | * blast them now | ||
1072 | */ | ||
1073 | for (i = 0; i < ndev->limits.max_spads; i++) { | ||
1074 | ntb_write_local_spad(ndev, i, 0); | ||
1075 | ntb_write_remote_spad(ndev, i, 0); | ||
1076 | } | ||
1077 | |||
1078 | rc = ntb_transport_init(pdev); | ||
1079 | if (rc) | ||
1080 | goto err6; | ||
1081 | |||
1082 | /* Let's bring the NTB link up */ | ||
1083 | writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP, | ||
1084 | ndev->reg_ofs.lnk_cntl); | ||
1085 | |||
1086 | return 0; | ||
1087 | |||
1088 | err6: | ||
1089 | ntb_free_interrupts(ndev); | ||
1090 | err5: | ||
1091 | ntb_free_callbacks(ndev); | ||
1092 | err4: | ||
1093 | ntb_device_free(ndev); | ||
1094 | err3: | ||
1095 | for (i--; i >= 0; i--) | ||
1096 | iounmap(ndev->mw[i].vbase); | ||
1097 | iounmap(ndev->reg_base); | ||
1098 | err2: | ||
1099 | pci_release_selected_regions(pdev, NTB_BAR_MASK); | ||
1100 | err1: | ||
1101 | pci_disable_device(pdev); | ||
1102 | err: | ||
1103 | kfree(ndev); | ||
1104 | |||
1105 | dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME); | ||
1106 | return rc; | ||
1107 | } | ||
1108 | |||
1109 | static void ntb_pci_remove(struct pci_dev *pdev) | ||
1110 | { | ||
1111 | struct ntb_device *ndev = pci_get_drvdata(pdev); | ||
1112 | int i; | ||
1113 | u32 ntb_cntl; | ||
1114 | |||
1115 | /* Bring NTB link down */ | ||
1116 | ntb_cntl = readl(ndev->reg_ofs.lnk_cntl); | ||
1117 | ntb_cntl |= NTB_LINK_DISABLE; | ||
1118 | writel(ntb_cntl, ndev->reg_ofs.lnk_cntl); | ||
1119 | |||
1120 | ntb_transport_free(ndev->ntb_transport); | ||
1121 | |||
1122 | ntb_free_interrupts(ndev); | ||
1123 | ntb_free_callbacks(ndev); | ||
1124 | ntb_device_free(ndev); | ||
1125 | |||
1126 | for (i = 0; i < NTB_NUM_MW; i++) | ||
1127 | iounmap(ndev->mw[i].vbase); | ||
1128 | |||
1129 | iounmap(ndev->reg_base); | ||
1130 | pci_release_selected_regions(pdev, NTB_BAR_MASK); | ||
1131 | pci_disable_device(pdev); | ||
1132 | kfree(ndev); | ||
1133 | } | ||
1134 | |||
1135 | static struct pci_driver ntb_pci_driver = { | ||
1136 | .name = KBUILD_MODNAME, | ||
1137 | .id_table = ntb_pci_tbl, | ||
1138 | .probe = ntb_pci_probe, | ||
1139 | .remove = ntb_pci_remove, | ||
1140 | }; | ||
1141 | module_pci_driver(ntb_pci_driver); | ||