diff options
author | George Zhang <georgezhang@vmware.com> | 2013-01-08 18:55:32 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-01-08 19:15:56 -0500 |
commit | 1f166439917b69d3046e2e49fe923579d9181212 (patch) | |
tree | c0d8c4eba0d387e31a8f3ba8a5c9aa8513865acc /drivers/misc/vmw_vmci | |
parent | e76ffea3216bfea2b46bbc40f322b43430ec3367 (diff) |
VMCI: guest side driver implementation.
VMCI guest side driver code implementation.
Signed-off-by: George Zhang <georgezhang@vmware.com>
Acked-by: Andy king <acking@vmware.com>
Acked-by: Dmitry Torokhov <dtor@vmware.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc/vmw_vmci')
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_guest.c | 759 |
1 files changed, 759 insertions, 0 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c new file mode 100644 index 000000000000..d302c89d2bfa --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_guest.c | |||
@@ -0,0 +1,759 @@ | |||
1 | /* | ||
2 | * VMware VMCI Driver | ||
3 | * | ||
4 | * Copyright (C) 2012 VMware, Inc. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation version 2 and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
12 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | * for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/vmw_vmci_defs.h> | ||
17 | #include <linux/vmw_vmci_api.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/io.h> | ||
28 | |||
29 | #include "vmci_datagram.h" | ||
30 | #include "vmci_doorbell.h" | ||
31 | #include "vmci_context.h" | ||
32 | #include "vmci_driver.h" | ||
33 | #include "vmci_event.h" | ||
34 | |||
35 | #define PCI_VENDOR_ID_VMWARE 0x15AD | ||
36 | #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 | ||
37 | |||
38 | #define VMCI_UTIL_NUM_RESOURCES 1 | ||
39 | |||
40 | static bool vmci_disable_msi; | ||
41 | module_param_named(disable_msi, vmci_disable_msi, bool, 0); | ||
42 | MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); | ||
43 | |||
44 | static bool vmci_disable_msix; | ||
45 | module_param_named(disable_msix, vmci_disable_msix, bool, 0); | ||
46 | MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); | ||
47 | |||
48 | static u32 ctx_update_sub_id = VMCI_INVALID_ID; | ||
49 | static u32 vm_context_id = VMCI_INVALID_ID; | ||
50 | |||
51 | struct vmci_guest_device { | ||
52 | struct device *dev; /* PCI device we are attached to */ | ||
53 | void __iomem *iobase; | ||
54 | |||
55 | unsigned int irq; | ||
56 | unsigned int intr_type; | ||
57 | bool exclusive_vectors; | ||
58 | struct msix_entry msix_entries[VMCI_MAX_INTRS]; | ||
59 | |||
60 | struct tasklet_struct datagram_tasklet; | ||
61 | struct tasklet_struct bm_tasklet; | ||
62 | |||
63 | void *data_buffer; | ||
64 | void *notification_bitmap; | ||
65 | }; | ||
66 | |||
67 | /* vmci_dev singleton device and supporting data*/ | ||
68 | static struct vmci_guest_device *vmci_dev_g; | ||
69 | static DEFINE_SPINLOCK(vmci_dev_spinlock); | ||
70 | |||
71 | static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); | ||
72 | |||
73 | bool vmci_guest_code_active(void) | ||
74 | { | ||
75 | return atomic_read(&vmci_num_guest_devices) != 0; | ||
76 | } | ||
77 | |||
78 | u32 vmci_get_vm_context_id(void) | ||
79 | { | ||
80 | if (vm_context_id == VMCI_INVALID_ID) { | ||
81 | u32 result; | ||
82 | struct vmci_datagram get_cid_msg; | ||
83 | get_cid_msg.dst = | ||
84 | vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, | ||
85 | VMCI_GET_CONTEXT_ID); | ||
86 | get_cid_msg.src = VMCI_ANON_SRC_HANDLE; | ||
87 | get_cid_msg.payload_size = 0; | ||
88 | result = vmci_send_datagram(&get_cid_msg); | ||
89 | if (result >= 0) | ||
90 | vm_context_id = result; | ||
91 | } | ||
92 | return vm_context_id; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * VM to hypervisor call mechanism. We use the standard VMware naming | ||
97 | * convention since shared code is calling this function as well. | ||
98 | */ | ||
99 | int vmci_send_datagram(struct vmci_datagram *dg) | ||
100 | { | ||
101 | unsigned long flags; | ||
102 | int result; | ||
103 | |||
104 | /* Check args. */ | ||
105 | if (dg == NULL) | ||
106 | return VMCI_ERROR_INVALID_ARGS; | ||
107 | |||
108 | /* | ||
109 | * Need to acquire spinlock on the device because the datagram | ||
110 | * data may be spread over multiple pages and the monitor may | ||
111 | * interleave device user rpc calls from multiple | ||
112 | * VCPUs. Acquiring the spinlock precludes that | ||
113 | * possibility. Disabling interrupts to avoid incoming | ||
114 | * datagrams during a "rep out" and possibly landing up in | ||
115 | * this function. | ||
116 | */ | ||
117 | spin_lock_irqsave(&vmci_dev_spinlock, flags); | ||
118 | |||
119 | if (vmci_dev_g) { | ||
120 | iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, | ||
121 | dg, VMCI_DG_SIZE(dg)); | ||
122 | result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); | ||
123 | } else { | ||
124 | result = VMCI_ERROR_UNAVAILABLE; | ||
125 | } | ||
126 | |||
127 | spin_unlock_irqrestore(&vmci_dev_spinlock, flags); | ||
128 | |||
129 | return result; | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(vmci_send_datagram); | ||
132 | |||
133 | /* | ||
134 | * Gets called with the new context id if updated or resumed. | ||
135 | * Context id. | ||
136 | */ | ||
137 | static void vmci_guest_cid_update(u32 sub_id, | ||
138 | const struct vmci_event_data *event_data, | ||
139 | void *client_data) | ||
140 | { | ||
141 | const struct vmci_event_payld_ctx *ev_payload = | ||
142 | vmci_event_data_const_payload(event_data); | ||
143 | |||
144 | if (sub_id != ctx_update_sub_id) { | ||
145 | pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { | ||
150 | pr_devel("Invalid event data\n"); | ||
151 | return; | ||
152 | } | ||
153 | |||
154 | pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", | ||
155 | vm_context_id, ev_payload->context_id, event_data->event); | ||
156 | |||
157 | vm_context_id = ev_payload->context_id; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Verify that the host supports the hypercalls we need. If it does not, | ||
162 | * try to find fallback hypercalls and use those instead. Returns | ||
163 | * true if required hypercalls (or fallback hypercalls) are | ||
164 | * supported by the host, false otherwise. | ||
165 | */ | ||
166 | static bool vmci_check_host_caps(struct pci_dev *pdev) | ||
167 | { | ||
168 | bool result; | ||
169 | struct vmci_resource_query_msg *msg; | ||
170 | u32 msg_size = sizeof(struct vmci_resource_query_hdr) + | ||
171 | VMCI_UTIL_NUM_RESOURCES * sizeof(u32); | ||
172 | struct vmci_datagram *check_msg; | ||
173 | |||
174 | check_msg = kmalloc(msg_size, GFP_KERNEL); | ||
175 | if (!check_msg) { | ||
176 | dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); | ||
177 | return false; | ||
178 | } | ||
179 | |||
180 | check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, | ||
181 | VMCI_RESOURCES_QUERY); | ||
182 | check_msg->src = VMCI_ANON_SRC_HANDLE; | ||
183 | check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; | ||
184 | msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); | ||
185 | |||
186 | msg->num_resources = VMCI_UTIL_NUM_RESOURCES; | ||
187 | msg->resources[0] = VMCI_GET_CONTEXT_ID; | ||
188 | |||
189 | /* Checks that hyper calls are supported */ | ||
190 | result = vmci_send_datagram(check_msg) == 0x01; | ||
191 | kfree(check_msg); | ||
192 | |||
193 | dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", | ||
194 | __func__, result ? "PASSED" : "FAILED"); | ||
195 | |||
196 | /* We need the vector. There are no fallbacks. */ | ||
197 | return result; | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Reads datagrams from the data in port and dispatches them. We | ||
202 | * always start reading datagrams into only the first page of the | ||
203 | * datagram buffer. If the datagrams don't fit into one page, we | ||
204 | * use the maximum datagram buffer size for the remainder of the | ||
205 | * invocation. This is a simple heuristic for not penalizing | ||
206 | * small datagrams. | ||
207 | * | ||
208 | * This function assumes that it has exclusive access to the data | ||
209 | * in port for the duration of the call. | ||
210 | */ | ||
211 | static void vmci_dispatch_dgs(unsigned long data) | ||
212 | { | ||
213 | struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; | ||
214 | u8 *dg_in_buffer = vmci_dev->data_buffer; | ||
215 | struct vmci_datagram *dg; | ||
216 | size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; | ||
217 | size_t current_dg_in_buffer_size = PAGE_SIZE; | ||
218 | size_t remaining_bytes; | ||
219 | |||
220 | BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); | ||
221 | |||
222 | ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, | ||
223 | vmci_dev->data_buffer, current_dg_in_buffer_size); | ||
224 | dg = (struct vmci_datagram *)dg_in_buffer; | ||
225 | remaining_bytes = current_dg_in_buffer_size; | ||
226 | |||
227 | while (dg->dst.resource != VMCI_INVALID_ID || | ||
228 | remaining_bytes > PAGE_SIZE) { | ||
229 | unsigned dg_in_size; | ||
230 | |||
231 | /* | ||
232 | * When the input buffer spans multiple pages, a datagram can | ||
233 | * start on any page boundary in the buffer. | ||
234 | */ | ||
235 | if (dg->dst.resource == VMCI_INVALID_ID) { | ||
236 | dg = (struct vmci_datagram *)roundup( | ||
237 | (uintptr_t)dg + 1, PAGE_SIZE); | ||
238 | remaining_bytes = | ||
239 | (size_t)(dg_in_buffer + | ||
240 | current_dg_in_buffer_size - | ||
241 | (u8 *)dg); | ||
242 | continue; | ||
243 | } | ||
244 | |||
245 | dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); | ||
246 | |||
247 | if (dg_in_size <= dg_in_buffer_size) { | ||
248 | int result; | ||
249 | |||
250 | /* | ||
251 | * If the remaining bytes in the datagram | ||
252 | * buffer doesn't contain the complete | ||
253 | * datagram, we first make sure we have enough | ||
254 | * room for it and then we read the reminder | ||
255 | * of the datagram and possibly any following | ||
256 | * datagrams. | ||
257 | */ | ||
258 | if (dg_in_size > remaining_bytes) { | ||
259 | if (remaining_bytes != | ||
260 | current_dg_in_buffer_size) { | ||
261 | |||
262 | /* | ||
263 | * We move the partial | ||
264 | * datagram to the front and | ||
265 | * read the reminder of the | ||
266 | * datagram and possibly | ||
267 | * following calls into the | ||
268 | * following bytes. | ||
269 | */ | ||
270 | memmove(dg_in_buffer, dg_in_buffer + | ||
271 | current_dg_in_buffer_size - | ||
272 | remaining_bytes, | ||
273 | remaining_bytes); | ||
274 | dg = (struct vmci_datagram *) | ||
275 | dg_in_buffer; | ||
276 | } | ||
277 | |||
278 | if (current_dg_in_buffer_size != | ||
279 | dg_in_buffer_size) | ||
280 | current_dg_in_buffer_size = | ||
281 | dg_in_buffer_size; | ||
282 | |||
283 | ioread8_rep(vmci_dev->iobase + | ||
284 | VMCI_DATA_IN_ADDR, | ||
285 | vmci_dev->data_buffer + | ||
286 | remaining_bytes, | ||
287 | current_dg_in_buffer_size - | ||
288 | remaining_bytes); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * We special case event datagrams from the | ||
293 | * hypervisor. | ||
294 | */ | ||
295 | if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && | ||
296 | dg->dst.resource == VMCI_EVENT_HANDLER) { | ||
297 | result = vmci_event_dispatch(dg); | ||
298 | } else { | ||
299 | result = vmci_datagram_invoke_guest_handler(dg); | ||
300 | } | ||
301 | if (result < VMCI_SUCCESS) | ||
302 | dev_dbg(vmci_dev->dev, | ||
303 | "Datagram with resource (ID=0x%x) failed (err=%d)\n", | ||
304 | dg->dst.resource, result); | ||
305 | |||
306 | /* On to the next datagram. */ | ||
307 | dg = (struct vmci_datagram *)((u8 *)dg + | ||
308 | dg_in_size); | ||
309 | } else { | ||
310 | size_t bytes_to_skip; | ||
311 | |||
312 | /* | ||
313 | * Datagram doesn't fit in datagram buffer of maximal | ||
314 | * size. We drop it. | ||
315 | */ | ||
316 | dev_dbg(vmci_dev->dev, | ||
317 | "Failed to receive datagram (size=%u bytes)\n", | ||
318 | dg_in_size); | ||
319 | |||
320 | bytes_to_skip = dg_in_size - remaining_bytes; | ||
321 | if (current_dg_in_buffer_size != dg_in_buffer_size) | ||
322 | current_dg_in_buffer_size = dg_in_buffer_size; | ||
323 | |||
324 | for (;;) { | ||
325 | ioread8_rep(vmci_dev->iobase + | ||
326 | VMCI_DATA_IN_ADDR, | ||
327 | vmci_dev->data_buffer, | ||
328 | current_dg_in_buffer_size); | ||
329 | if (bytes_to_skip <= current_dg_in_buffer_size) | ||
330 | break; | ||
331 | |||
332 | bytes_to_skip -= current_dg_in_buffer_size; | ||
333 | } | ||
334 | dg = (struct vmci_datagram *)(dg_in_buffer + | ||
335 | bytes_to_skip); | ||
336 | } | ||
337 | |||
338 | remaining_bytes = | ||
339 | (size_t) (dg_in_buffer + current_dg_in_buffer_size - | ||
340 | (u8 *)dg); | ||
341 | |||
342 | if (remaining_bytes < VMCI_DG_HEADERSIZE) { | ||
343 | /* Get the next batch of datagrams. */ | ||
344 | |||
345 | ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, | ||
346 | vmci_dev->data_buffer, | ||
347 | current_dg_in_buffer_size); | ||
348 | dg = (struct vmci_datagram *)dg_in_buffer; | ||
349 | remaining_bytes = current_dg_in_buffer_size; | ||
350 | } | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Scans the notification bitmap for raised flags, clears them | ||
356 | * and handles the notifications. | ||
357 | */ | ||
358 | static void vmci_process_bitmap(unsigned long data) | ||
359 | { | ||
360 | struct vmci_guest_device *dev = (struct vmci_guest_device *)data; | ||
361 | |||
362 | if (!dev->notification_bitmap) { | ||
363 | dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); | ||
364 | return; | ||
365 | } | ||
366 | |||
367 | vmci_dbell_scan_notification_entries(dev->notification_bitmap); | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Enable MSI-X. Try exclusive vectors first, then shared vectors. | ||
372 | */ | ||
373 | static int vmci_enable_msix(struct pci_dev *pdev, | ||
374 | struct vmci_guest_device *vmci_dev) | ||
375 | { | ||
376 | int i; | ||
377 | int result; | ||
378 | |||
379 | for (i = 0; i < VMCI_MAX_INTRS; ++i) { | ||
380 | vmci_dev->msix_entries[i].entry = i; | ||
381 | vmci_dev->msix_entries[i].vector = i; | ||
382 | } | ||
383 | |||
384 | result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); | ||
385 | if (result == 0) | ||
386 | vmci_dev->exclusive_vectors = true; | ||
387 | else if (result > 0) | ||
388 | result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); | ||
389 | |||
390 | return result; | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * Interrupt handler for legacy or MSI interrupt, or for first MSI-X | ||
395 | * interrupt (vector VMCI_INTR_DATAGRAM). | ||
396 | */ | ||
397 | static irqreturn_t vmci_interrupt(int irq, void *_dev) | ||
398 | { | ||
399 | struct vmci_guest_device *dev = _dev; | ||
400 | |||
401 | /* | ||
402 | * If we are using MSI-X with exclusive vectors then we simply schedule | ||
403 | * the datagram tasklet, since we know the interrupt was meant for us. | ||
404 | * Otherwise we must read the ICR to determine what to do. | ||
405 | */ | ||
406 | |||
407 | if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { | ||
408 | tasklet_schedule(&dev->datagram_tasklet); | ||
409 | } else { | ||
410 | unsigned int icr; | ||
411 | |||
412 | /* Acknowledge interrupt and determine what needs doing. */ | ||
413 | icr = ioread32(dev->iobase + VMCI_ICR_ADDR); | ||
414 | if (icr == 0 || icr == ~0) | ||
415 | return IRQ_NONE; | ||
416 | |||
417 | if (icr & VMCI_ICR_DATAGRAM) { | ||
418 | tasklet_schedule(&dev->datagram_tasklet); | ||
419 | icr &= ~VMCI_ICR_DATAGRAM; | ||
420 | } | ||
421 | |||
422 | if (icr & VMCI_ICR_NOTIFICATION) { | ||
423 | tasklet_schedule(&dev->bm_tasklet); | ||
424 | icr &= ~VMCI_ICR_NOTIFICATION; | ||
425 | } | ||
426 | |||
427 | if (icr != 0) | ||
428 | dev_warn(dev->dev, | ||
429 | "Ignoring unknown interrupt cause (%d)\n", | ||
430 | icr); | ||
431 | } | ||
432 | |||
433 | return IRQ_HANDLED; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, | ||
438 | * which is for the notification bitmap. Will only get called if we are | ||
439 | * using MSI-X with exclusive vectors. | ||
440 | */ | ||
441 | static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) | ||
442 | { | ||
443 | struct vmci_guest_device *dev = _dev; | ||
444 | |||
445 | /* For MSI-X we can just assume it was meant for us. */ | ||
446 | tasklet_schedule(&dev->bm_tasklet); | ||
447 | |||
448 | return IRQ_HANDLED; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Most of the initialization at module load time is done here. | ||
453 | */ | ||
454 | static int vmci_guest_probe_device(struct pci_dev *pdev, | ||
455 | const struct pci_device_id *id) | ||
456 | { | ||
457 | struct vmci_guest_device *vmci_dev; | ||
458 | void __iomem *iobase; | ||
459 | unsigned int capabilities; | ||
460 | unsigned long cmd; | ||
461 | int vmci_err; | ||
462 | int error; | ||
463 | |||
464 | dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); | ||
465 | |||
466 | error = pcim_enable_device(pdev); | ||
467 | if (error) { | ||
468 | dev_err(&pdev->dev, | ||
469 | "Failed to enable VMCI device: %d\n", error); | ||
470 | return error; | ||
471 | } | ||
472 | |||
473 | error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); | ||
474 | if (error) { | ||
475 | dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); | ||
476 | return error; | ||
477 | } | ||
478 | |||
479 | iobase = pcim_iomap_table(pdev)[0]; | ||
480 | |||
481 | dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", | ||
482 | (unsigned long)iobase, pdev->irq); | ||
483 | |||
484 | vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); | ||
485 | if (!vmci_dev) { | ||
486 | dev_err(&pdev->dev, | ||
487 | "Can't allocate memory for VMCI device\n"); | ||
488 | return -ENOMEM; | ||
489 | } | ||
490 | |||
491 | vmci_dev->dev = &pdev->dev; | ||
492 | vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; | ||
493 | vmci_dev->exclusive_vectors = false; | ||
494 | vmci_dev->iobase = iobase; | ||
495 | |||
496 | tasklet_init(&vmci_dev->datagram_tasklet, | ||
497 | vmci_dispatch_dgs, (unsigned long)vmci_dev); | ||
498 | tasklet_init(&vmci_dev->bm_tasklet, | ||
499 | vmci_process_bitmap, (unsigned long)vmci_dev); | ||
500 | |||
501 | vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); | ||
502 | if (!vmci_dev->data_buffer) { | ||
503 | dev_err(&pdev->dev, | ||
504 | "Can't allocate memory for datagram buffer\n"); | ||
505 | return -ENOMEM; | ||
506 | } | ||
507 | |||
508 | pci_set_master(pdev); /* To enable queue_pair functionality. */ | ||
509 | |||
510 | /* | ||
511 | * Verify that the VMCI Device supports the capabilities that | ||
512 | * we need. If the device is missing capabilities that we would | ||
513 | * like to use, check for fallback capabilities and use those | ||
514 | * instead (so we can run a new VM on old hosts). Fail the load if | ||
515 | * a required capability is missing and there is no fallback. | ||
516 | * | ||
517 | * Right now, we need datagrams. There are no fallbacks. | ||
518 | */ | ||
519 | capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); | ||
520 | if (!(capabilities & VMCI_CAPS_DATAGRAM)) { | ||
521 | dev_err(&pdev->dev, "Device does not support datagrams\n"); | ||
522 | error = -ENXIO; | ||
523 | goto err_free_data_buffer; | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * If the hardware supports notifications, we will use that as | ||
528 | * well. | ||
529 | */ | ||
530 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) { | ||
531 | vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); | ||
532 | if (!vmci_dev->notification_bitmap) { | ||
533 | dev_warn(&pdev->dev, | ||
534 | "Unable to allocate notification bitmap\n"); | ||
535 | } else { | ||
536 | memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); | ||
537 | capabilities |= VMCI_CAPS_NOTIFICATIONS; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); | ||
542 | |||
543 | /* Let the host know which capabilities we intend to use. */ | ||
544 | iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); | ||
545 | |||
546 | /* Set up global device so that we can start sending datagrams */ | ||
547 | spin_lock_irq(&vmci_dev_spinlock); | ||
548 | vmci_dev_g = vmci_dev; | ||
549 | spin_unlock_irq(&vmci_dev_spinlock); | ||
550 | |||
551 | /* | ||
552 | * Register notification bitmap with device if that capability is | ||
553 | * used. | ||
554 | */ | ||
555 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) { | ||
556 | struct page *page = | ||
557 | vmalloc_to_page(vmci_dev->notification_bitmap); | ||
558 | unsigned long bitmap_ppn = page_to_pfn(page); | ||
559 | if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { | ||
560 | dev_warn(&pdev->dev, | ||
561 | "VMCI device unable to register notification bitmap with PPN 0x%x\n", | ||
562 | (u32) bitmap_ppn); | ||
563 | goto err_remove_vmci_dev_g; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | /* Check host capabilities. */ | ||
568 | if (!vmci_check_host_caps(pdev)) | ||
569 | goto err_remove_bitmap; | ||
570 | |||
571 | /* Enable device. */ | ||
572 | |||
573 | /* | ||
574 | * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can | ||
575 | * update the internal context id when needed. | ||
576 | */ | ||
577 | vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, | ||
578 | vmci_guest_cid_update, NULL, | ||
579 | &ctx_update_sub_id); | ||
580 | if (vmci_err < VMCI_SUCCESS) | ||
581 | dev_warn(&pdev->dev, | ||
582 | "Failed to subscribe to event (type=%d): %d\n", | ||
583 | VMCI_EVENT_CTX_ID_UPDATE, vmci_err); | ||
584 | |||
585 | /* | ||
586 | * Enable interrupts. Try MSI-X first, then MSI, and then fallback on | ||
587 | * legacy interrupts. | ||
588 | */ | ||
589 | if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { | ||
590 | vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; | ||
591 | vmci_dev->irq = vmci_dev->msix_entries[0].vector; | ||
592 | } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { | ||
593 | vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; | ||
594 | vmci_dev->irq = pdev->irq; | ||
595 | } else { | ||
596 | vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; | ||
597 | vmci_dev->irq = pdev->irq; | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Request IRQ for legacy or MSI interrupts, or for first | ||
602 | * MSI-X vector. | ||
603 | */ | ||
604 | error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, | ||
605 | KBUILD_MODNAME, vmci_dev); | ||
606 | if (error) { | ||
607 | dev_err(&pdev->dev, "Irq %u in use: %d\n", | ||
608 | vmci_dev->irq, error); | ||
609 | goto err_disable_msi; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * For MSI-X with exclusive vectors we need to request an | ||
614 | * interrupt for each vector so that we get a separate | ||
615 | * interrupt handler routine. This allows us to distinguish | ||
616 | * between the vectors. | ||
617 | */ | ||
618 | if (vmci_dev->exclusive_vectors) { | ||
619 | error = request_irq(vmci_dev->msix_entries[1].vector, | ||
620 | vmci_interrupt_bm, 0, KBUILD_MODNAME, | ||
621 | vmci_dev); | ||
622 | if (error) { | ||
623 | dev_err(&pdev->dev, | ||
624 | "Failed to allocate irq %u: %d\n", | ||
625 | vmci_dev->msix_entries[1].vector, error); | ||
626 | goto err_free_irq; | ||
627 | } | ||
628 | } | ||
629 | |||
630 | dev_dbg(&pdev->dev, "Registered device\n"); | ||
631 | |||
632 | atomic_inc(&vmci_num_guest_devices); | ||
633 | |||
634 | /* Enable specific interrupt bits. */ | ||
635 | cmd = VMCI_IMR_DATAGRAM; | ||
636 | if (capabilities & VMCI_CAPS_NOTIFICATIONS) | ||
637 | cmd |= VMCI_IMR_NOTIFICATION; | ||
638 | iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); | ||
639 | |||
640 | /* Enable interrupts. */ | ||
641 | iowrite32(VMCI_CONTROL_INT_ENABLE, | ||
642 | vmci_dev->iobase + VMCI_CONTROL_ADDR); | ||
643 | |||
644 | pci_set_drvdata(pdev, vmci_dev); | ||
645 | return 0; | ||
646 | |||
647 | err_free_irq: | ||
648 | free_irq(vmci_dev->irq, &vmci_dev); | ||
649 | tasklet_kill(&vmci_dev->datagram_tasklet); | ||
650 | tasklet_kill(&vmci_dev->bm_tasklet); | ||
651 | |||
652 | err_disable_msi: | ||
653 | if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) | ||
654 | pci_disable_msix(pdev); | ||
655 | else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) | ||
656 | pci_disable_msi(pdev); | ||
657 | |||
658 | vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); | ||
659 | if (vmci_err < VMCI_SUCCESS) | ||
660 | dev_warn(&pdev->dev, | ||
661 | "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", | ||
662 | VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); | ||
663 | |||
664 | err_remove_bitmap: | ||
665 | if (vmci_dev->notification_bitmap) { | ||
666 | iowrite32(VMCI_CONTROL_RESET, | ||
667 | vmci_dev->iobase + VMCI_CONTROL_ADDR); | ||
668 | vfree(vmci_dev->notification_bitmap); | ||
669 | } | ||
670 | |||
671 | err_remove_vmci_dev_g: | ||
672 | spin_lock_irq(&vmci_dev_spinlock); | ||
673 | vmci_dev_g = NULL; | ||
674 | spin_unlock_irq(&vmci_dev_spinlock); | ||
675 | |||
676 | err_free_data_buffer: | ||
677 | vfree(vmci_dev->data_buffer); | ||
678 | |||
679 | /* The rest are managed resources and will be freed by PCI core */ | ||
680 | return error; | ||
681 | } | ||
682 | |||
683 | static void vmci_guest_remove_device(struct pci_dev *pdev) | ||
684 | { | ||
685 | struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); | ||
686 | int vmci_err; | ||
687 | |||
688 | dev_dbg(&pdev->dev, "Removing device\n"); | ||
689 | |||
690 | atomic_dec(&vmci_num_guest_devices); | ||
691 | |||
692 | vmci_qp_guest_endpoints_exit(); | ||
693 | |||
694 | vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); | ||
695 | if (vmci_err < VMCI_SUCCESS) | ||
696 | dev_warn(&pdev->dev, | ||
697 | "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", | ||
698 | VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); | ||
699 | |||
700 | spin_lock_irq(&vmci_dev_spinlock); | ||
701 | vmci_dev_g = NULL; | ||
702 | spin_unlock_irq(&vmci_dev_spinlock); | ||
703 | |||
704 | dev_dbg(&pdev->dev, "Resetting vmci device\n"); | ||
705 | iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); | ||
706 | |||
707 | /* | ||
708 | * Free IRQ and then disable MSI/MSI-X as appropriate. For | ||
709 | * MSI-X, we might have multiple vectors, each with their own | ||
710 | * IRQ, which we must free too. | ||
711 | */ | ||
712 | free_irq(vmci_dev->irq, vmci_dev); | ||
713 | if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { | ||
714 | if (vmci_dev->exclusive_vectors) | ||
715 | free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); | ||
716 | pci_disable_msix(pdev); | ||
717 | } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { | ||
718 | pci_disable_msi(pdev); | ||
719 | } | ||
720 | |||
721 | tasklet_kill(&vmci_dev->datagram_tasklet); | ||
722 | tasklet_kill(&vmci_dev->bm_tasklet); | ||
723 | |||
724 | if (vmci_dev->notification_bitmap) { | ||
725 | /* | ||
726 | * The device reset above cleared the bitmap state of the | ||
727 | * device, so we can safely free it here. | ||
728 | */ | ||
729 | |||
730 | vfree(vmci_dev->notification_bitmap); | ||
731 | } | ||
732 | |||
733 | vfree(vmci_dev->data_buffer); | ||
734 | |||
735 | /* The rest are managed resources and will be freed by PCI core */ | ||
736 | } | ||
737 | |||
738 | static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { | ||
739 | { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, | ||
740 | { 0 }, | ||
741 | }; | ||
742 | MODULE_DEVICE_TABLE(pci, vmci_ids); | ||
743 | |||
744 | static struct pci_driver vmci_guest_driver = { | ||
745 | .name = KBUILD_MODNAME, | ||
746 | .id_table = vmci_ids, | ||
747 | .probe = vmci_guest_probe_device, | ||
748 | .remove = vmci_guest_remove_device, | ||
749 | }; | ||
750 | |||
751 | int __init vmci_guest_init(void) | ||
752 | { | ||
753 | return pci_register_driver(&vmci_guest_driver); | ||
754 | } | ||
755 | |||
756 | void __exit vmci_guest_exit(void) | ||
757 | { | ||
758 | pci_unregister_driver(&vmci_guest_driver); | ||
759 | } | ||