aboutsummaryrefslogtreecommitdiffstats
path: root/samples
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 12:23:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 12:23:56 -0500
commitedc5f445a681a6f2522c36a4860f10ad457ab00e (patch)
treeadf689fe05902b358f25579dc583ab0bf773aa83 /samples
parent22d8262c33e52b10a4c442b04a2388b4bc589ee4 (diff)
parent2b8bb1d771f736b8b34bf160115aee1b12d29f83 (diff)
Merge tag 'vfio-v4.10-rc1' of git://github.com/awilliam/linux-vfio
Pull VFIO updates from Alex Williamson: - VFIO updates for v4.10 primarily include a new Mediated Device interface, which essentially allows software defined devices to be exposed to users through VFIO. The host vendor driver providing this virtual device polices, or mediates user access to the device. These devices often incorporate portions of real devices, for instance the primary initial users of this interface expose vGPUs which allow the user to map mediated devices, or mdevs, to a portion of a physical GPU. QEMU composes these mdevs into PCI representations using the existing VFIO user API. This enables both Intel KVM-GT support, which is also expected to arrive into Linux mainline during the v4.10 merge window, as well as NVIDIA vGPU, and also Channel I/O devices (aka CCW devices) for s390 virtualization support. (Kirti Wankhede, Neo Jia) - Drop unnecessary uses of pcibios_err_to_errno() (Cao Jin) - Fixes to VFIO capability chain handling (Eric Auger) - Error handling fixes for fallout from mdev (Christophe JAILLET) - Notifiers to expose struct kvm to mdev vendor drivers (Jike Song) - type1 IOMMU model search fixes (Kirti Wankhede, Neo Jia) * tag 'vfio-v4.10-rc1' of git://github.com/awilliam/linux-vfio: (30 commits) vfio iommu type1: Fix size argument to vfio_find_dma() in pin_pages/unpin_pages vfio iommu type1: Fix size argument to vfio_find_dma() during DMA UNMAP. vfio iommu type1: WARN_ON if notifier block is not unregistered kvm: set/clear kvm to/from vfio_group when group add/delete vfio: support notifier chain in vfio_group vfio: vfio_register_notifier: classify iommu notifier vfio: Fix handling of error returned by 'vfio_group_get_from_dev()' vfio: fix vfio_info_cap_add/shift vfio/pci: Drop unnecessary pcibios_err_to_errno() MAINTAINERS: Add entry VFIO based Mediated device drivers docs: Sample driver to demonstrate how to use Mediated device framework. docs: Sysfs ABI for mediated device framework docs: Add Documentation for Mediated devices vfio: Define device_api strings vfio_platform: Updated to use vfio_set_irqs_validate_and_prepare() vfio_pci: Updated to use vfio_set_irqs_validate_and_prepare() vfio: Introduce vfio_set_irqs_validate_and_prepare() vfio_pci: Update vfio_pci to use vfio_info_add_capability() vfio: Introduce common function to add capabilities vfio iommu: Add blocking notifier to notify DMA_UNMAP ...
Diffstat (limited to 'samples')
-rw-r--r--samples/vfio-mdev/Makefile13
-rw-r--r--samples/vfio-mdev/mtty.c1503
2 files changed, 1516 insertions, 0 deletions
diff --git a/samples/vfio-mdev/Makefile b/samples/vfio-mdev/Makefile
new file mode 100644
index 000000000000..a932edbe38eb
--- /dev/null
+++ b/samples/vfio-mdev/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for mtty.c file
3#
4KERNEL_DIR:=/lib/modules/$(shell uname -r)/build
5
6obj-m:=mtty.o
7
8modules clean modules_install:
9 $(MAKE) -C $(KERNEL_DIR) SUBDIRS=$(PWD) $@
10
11default: modules
12
13module: modules
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
new file mode 100644
index 000000000000..6b633a4ea333
--- /dev/null
+++ b/samples/vfio-mdev/mtty.c
@@ -0,0 +1,1503 @@
1/*
2 * Mediated virtual PCI serial host device driver
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 * Author: Neo Jia <cjia@nvidia.com>
6 * Kirti Wankhede <kwankhede@nvidia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Sample driver that creates mdev device that simulates serial port over PCI
13 * card.
14 *
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/device.h>
20#include <linux/kernel.h>
21#include <linux/fs.h>
22#include <linux/poll.h>
23#include <linux/slab.h>
24#include <linux/cdev.h>
25#include <linux/sched.h>
26#include <linux/wait.h>
27#include <linux/uuid.h>
28#include <linux/vfio.h>
29#include <linux/iommu.h>
30#include <linux/sysfs.h>
31#include <linux/ctype.h>
32#include <linux/file.h>
33#include <linux/mdev.h>
34#include <linux/pci.h>
35#include <linux/serial.h>
36#include <uapi/linux/serial_reg.h>
37#include <linux/eventfd.h>
38/*
39 * #defines
40 */
41
42#define VERSION_STRING "0.1"
43#define DRIVER_AUTHOR "NVIDIA Corporation"
44
45#define MTTY_CLASS_NAME "mtty"
46
47#define MTTY_NAME "mtty"
48
49#define MTTY_STRING_LEN 16
50
51#define MTTY_CONFIG_SPACE_SIZE 0xff
52#define MTTY_IO_BAR_SIZE 0x8
53#define MTTY_MMIO_BAR_SIZE 0x100000
54
55#define STORE_LE16(addr, val) (*(u16 *)addr = val)
56#define STORE_LE32(addr, val) (*(u32 *)addr = val)
57
58#define MAX_FIFO_SIZE 16
59
60#define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
61
62#define MTTY_VFIO_PCI_OFFSET_SHIFT 40
63
64#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
65#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
66 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
67#define MTTY_VFIO_PCI_OFFSET_MASK \
68 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
69#define MAX_MTTYS 24
70
71/*
72 * Global Structures
73 */
74
75struct mtty_dev {
76 dev_t vd_devt;
77 struct class *vd_class;
78 struct cdev vd_cdev;
79 struct idr vd_idr;
80 struct device dev;
81} mtty_dev;
82
83struct mdev_region_info {
84 u64 start;
85 u64 phys_start;
86 u32 size;
87 u64 vfio_offset;
88};
89
90#if defined(DEBUG_REGS)
91const char *wr_reg[] = {
92 "TX",
93 "IER",
94 "FCR",
95 "LCR",
96 "MCR",
97 "LSR",
98 "MSR",
99 "SCR"
100};
101
102const char *rd_reg[] = {
103 "RX",
104 "IER",
105 "IIR",
106 "LCR",
107 "MCR",
108 "LSR",
109 "MSR",
110 "SCR"
111};
112#endif
113
114/* loop back buffer */
115struct rxtx {
116 u8 fifo[MAX_FIFO_SIZE];
117 u8 head, tail;
118 u8 count;
119};
120
121struct serial_port {
122 u8 uart_reg[8]; /* 8 registers */
123 struct rxtx rxtx; /* loop back buffer */
124 bool dlab;
125 bool overrun;
126 u16 divisor;
127 u8 fcr; /* FIFO control register */
128 u8 max_fifo_size;
129 u8 intr_trigger_level; /* interrupt trigger level */
130};
131
132/* State of each mdev device */
133struct mdev_state {
134 int irq_fd;
135 struct eventfd_ctx *intx_evtfd;
136 struct eventfd_ctx *msi_evtfd;
137 int irq_index;
138 u8 *vconfig;
139 struct mutex ops_lock;
140 struct mdev_device *mdev;
141 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
142 u32 bar_mask[VFIO_PCI_NUM_REGIONS];
143 struct list_head next;
144 struct serial_port s[2];
145 struct mutex rxtx_lock;
146 struct vfio_device_info dev_info;
147 int nr_ports;
148};
149
150struct mutex mdev_list_lock;
151struct list_head mdev_devices_list;
152
153static const struct file_operations vd_fops = {
154 .owner = THIS_MODULE,
155};
156
157/* function prototypes */
158
159static int mtty_trigger_interrupt(uuid_le uuid);
160
161/* Helper functions */
162static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
163{
164 struct mdev_state *mds;
165
166 list_for_each_entry(mds, &mdev_devices_list, next) {
167 if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
168 return mds;
169 }
170
171 return NULL;
172}
173
174void dump_buffer(char *buf, uint32_t count)
175{
176#if defined(DEBUG)
177 int i;
178
179 pr_info("Buffer:\n");
180 for (i = 0; i < count; i++) {
181 pr_info("%2x ", *(buf + i));
182 if ((i + 1) % 16 == 0)
183 pr_info("\n");
184 }
185#endif
186}
187
188static void mtty_create_config_space(struct mdev_state *mdev_state)
189{
190 /* PCI dev ID */
191 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
192
193 /* Control: I/O+, Mem-, BusMaster- */
194 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
195
196 /* Status: capabilities list absent */
197 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
198
199 /* Rev ID */
200 mdev_state->vconfig[0x8] = 0x10;
201
202 /* programming interface class : 16550-compatible serial controller */
203 mdev_state->vconfig[0x9] = 0x02;
204
205 /* Sub class : 00 */
206 mdev_state->vconfig[0xa] = 0x00;
207
208 /* Base class : Simple Communication controllers */
209 mdev_state->vconfig[0xb] = 0x07;
210
211 /* base address registers */
212 /* BAR0: IO space */
213 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
214 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
215
216 if (mdev_state->nr_ports == 2) {
217 /* BAR1: IO space */
218 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
219 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
220 }
221
222 /* Subsystem ID */
223 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
224
225 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
226 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
227
228 /* Vendor specific data */
229 mdev_state->vconfig[0x40] = 0x23;
230 mdev_state->vconfig[0x43] = 0x80;
231 mdev_state->vconfig[0x44] = 0x23;
232 mdev_state->vconfig[0x48] = 0x23;
233 mdev_state->vconfig[0x4c] = 0x23;
234
235 mdev_state->vconfig[0x60] = 0x50;
236 mdev_state->vconfig[0x61] = 0x43;
237 mdev_state->vconfig[0x62] = 0x49;
238 mdev_state->vconfig[0x63] = 0x20;
239 mdev_state->vconfig[0x64] = 0x53;
240 mdev_state->vconfig[0x65] = 0x65;
241 mdev_state->vconfig[0x66] = 0x72;
242 mdev_state->vconfig[0x67] = 0x69;
243 mdev_state->vconfig[0x68] = 0x61;
244 mdev_state->vconfig[0x69] = 0x6c;
245 mdev_state->vconfig[0x6a] = 0x2f;
246 mdev_state->vconfig[0x6b] = 0x55;
247 mdev_state->vconfig[0x6c] = 0x41;
248 mdev_state->vconfig[0x6d] = 0x52;
249 mdev_state->vconfig[0x6e] = 0x54;
250}
251
252static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
253 char *buf, u32 count)
254{
255 u32 cfg_addr, bar_mask, bar_index = 0;
256
257 switch (offset) {
258 case 0x04: /* device control */
259 case 0x06: /* device status */
260 /* do nothing */
261 break;
262 case 0x3c: /* interrupt line */
263 mdev_state->vconfig[0x3c] = buf[0];
264 break;
265 case 0x3d:
266 /*
267 * Interrupt Pin is hardwired to INTA.
268 * This field is write protected by hardware
269 */
270 break;
271 case 0x10: /* BAR0 */
272 case 0x14: /* BAR1 */
273 if (offset == 0x10)
274 bar_index = 0;
275 else if (offset == 0x14)
276 bar_index = 1;
277
278 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
279 STORE_LE32(&mdev_state->vconfig[offset], 0);
280 break;
281 }
282
283 cfg_addr = *(u32 *)buf;
284 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
285
286 if (cfg_addr == 0xffffffff) {
287 bar_mask = mdev_state->bar_mask[bar_index];
288 cfg_addr = (cfg_addr & bar_mask);
289 }
290
291 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
292 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
293 break;
294 case 0x18: /* BAR2 */
295 case 0x1c: /* BAR3 */
296 case 0x20: /* BAR4 */
297 STORE_LE32(&mdev_state->vconfig[offset], 0);
298 break;
299 default:
300 pr_info("PCI config write @0x%x of %d bytes not handled\n",
301 offset, count);
302 break;
303 }
304}
305
306static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
307 u16 offset, char *buf, u32 count)
308{
309 u8 data = *buf;
310
311 /* Handle data written by guest */
312 switch (offset) {
313 case UART_TX:
314 /* if DLAB set, data is LSB of divisor */
315 if (mdev_state->s[index].dlab) {
316 mdev_state->s[index].divisor |= data;
317 break;
318 }
319
320 mutex_lock(&mdev_state->rxtx_lock);
321
322 /* save in TX buffer */
323 if (mdev_state->s[index].rxtx.count <
324 mdev_state->s[index].max_fifo_size) {
325 mdev_state->s[index].rxtx.fifo[
326 mdev_state->s[index].rxtx.head] = data;
327 mdev_state->s[index].rxtx.count++;
328 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
329 mdev_state->s[index].overrun = false;
330
331 /*
332 * Trigger interrupt if receive data interrupt is
333 * enabled and fifo reached trigger level
334 */
335 if ((mdev_state->s[index].uart_reg[UART_IER] &
336 UART_IER_RDI) &&
337 (mdev_state->s[index].rxtx.count ==
338 mdev_state->s[index].intr_trigger_level)) {
339 /* trigger interrupt */
340#if defined(DEBUG_INTR)
341 pr_err("Serial port %d: Fifo level trigger\n",
342 index);
343#endif
344 mtty_trigger_interrupt(mdev_state->mdev->uuid);
345 }
346 } else {
347#if defined(DEBUG_INTR)
348 pr_err("Serial port %d: Buffer Overflow\n", index);
349#endif
350 mdev_state->s[index].overrun = true;
351
352 /*
353 * Trigger interrupt if receiver line status interrupt
354 * is enabled
355 */
356 if (mdev_state->s[index].uart_reg[UART_IER] &
357 UART_IER_RLSI)
358 mtty_trigger_interrupt(mdev_state->mdev->uuid);
359 }
360 mutex_unlock(&mdev_state->rxtx_lock);
361 break;
362
363 case UART_IER:
364 /* if DLAB set, data is MSB of divisor */
365 if (mdev_state->s[index].dlab)
366 mdev_state->s[index].divisor |= (u16)data << 8;
367 else {
368 mdev_state->s[index].uart_reg[offset] = data;
369 mutex_lock(&mdev_state->rxtx_lock);
370 if ((data & UART_IER_THRI) &&
371 (mdev_state->s[index].rxtx.head ==
372 mdev_state->s[index].rxtx.tail)) {
373#if defined(DEBUG_INTR)
374 pr_err("Serial port %d: IER_THRI write\n",
375 index);
376#endif
377 mtty_trigger_interrupt(mdev_state->mdev->uuid);
378 }
379
380 mutex_unlock(&mdev_state->rxtx_lock);
381 }
382
383 break;
384
385 case UART_FCR:
386 mdev_state->s[index].fcr = data;
387
388 mutex_lock(&mdev_state->rxtx_lock);
389 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
390 /* clear loop back FIFO */
391 mdev_state->s[index].rxtx.count = 0;
392 mdev_state->s[index].rxtx.head = 0;
393 mdev_state->s[index].rxtx.tail = 0;
394 }
395 mutex_unlock(&mdev_state->rxtx_lock);
396
397 switch (data & UART_FCR_TRIGGER_MASK) {
398 case UART_FCR_TRIGGER_1:
399 mdev_state->s[index].intr_trigger_level = 1;
400 break;
401
402 case UART_FCR_TRIGGER_4:
403 mdev_state->s[index].intr_trigger_level = 4;
404 break;
405
406 case UART_FCR_TRIGGER_8:
407 mdev_state->s[index].intr_trigger_level = 8;
408 break;
409
410 case UART_FCR_TRIGGER_14:
411 mdev_state->s[index].intr_trigger_level = 14;
412 break;
413 }
414
415 /*
416 * Set trigger level to 1 otherwise or implement timer with
417 * timeout of 4 characters and on expiring that timer set
418 * Recevice data timeout in IIR register
419 */
420 mdev_state->s[index].intr_trigger_level = 1;
421 if (data & UART_FCR_ENABLE_FIFO)
422 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
423 else {
424 mdev_state->s[index].max_fifo_size = 1;
425 mdev_state->s[index].intr_trigger_level = 1;
426 }
427
428 break;
429
430 case UART_LCR:
431 if (data & UART_LCR_DLAB) {
432 mdev_state->s[index].dlab = true;
433 mdev_state->s[index].divisor = 0;
434 } else
435 mdev_state->s[index].dlab = false;
436
437 mdev_state->s[index].uart_reg[offset] = data;
438 break;
439
440 case UART_MCR:
441 mdev_state->s[index].uart_reg[offset] = data;
442
443 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
444 (data & UART_MCR_OUT2)) {
445#if defined(DEBUG_INTR)
446 pr_err("Serial port %d: MCR_OUT2 write\n", index);
447#endif
448 mtty_trigger_interrupt(mdev_state->mdev->uuid);
449 }
450
451 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
452 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
453#if defined(DEBUG_INTR)
454 pr_err("Serial port %d: MCR RTS/DTR write\n", index);
455#endif
456 mtty_trigger_interrupt(mdev_state->mdev->uuid);
457 }
458 break;
459
460 case UART_LSR:
461 case UART_MSR:
462 /* do nothing */
463 break;
464
465 case UART_SCR:
466 mdev_state->s[index].uart_reg[offset] = data;
467 break;
468
469 default:
470 break;
471 }
472}
473
474static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
475 u16 offset, char *buf, u32 count)
476{
477 /* Handle read requests by guest */
478 switch (offset) {
479 case UART_RX:
480 /* if DLAB set, data is LSB of divisor */
481 if (mdev_state->s[index].dlab) {
482 *buf = (u8)mdev_state->s[index].divisor;
483 break;
484 }
485
486 mutex_lock(&mdev_state->rxtx_lock);
487 /* return data in tx buffer */
488 if (mdev_state->s[index].rxtx.head !=
489 mdev_state->s[index].rxtx.tail) {
490 *buf = mdev_state->s[index].rxtx.fifo[
491 mdev_state->s[index].rxtx.tail];
492 mdev_state->s[index].rxtx.count--;
493 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
494 }
495
496 if (mdev_state->s[index].rxtx.head ==
497 mdev_state->s[index].rxtx.tail) {
498 /*
499 * Trigger interrupt if tx buffer empty interrupt is
500 * enabled and fifo is empty
501 */
502#if defined(DEBUG_INTR)
503 pr_err("Serial port %d: Buffer Empty\n", index);
504#endif
505 if (mdev_state->s[index].uart_reg[UART_IER] &
506 UART_IER_THRI)
507 mtty_trigger_interrupt(mdev_state->mdev->uuid);
508 }
509 mutex_unlock(&mdev_state->rxtx_lock);
510
511 break;
512
513 case UART_IER:
514 if (mdev_state->s[index].dlab) {
515 *buf = (u8)(mdev_state->s[index].divisor >> 8);
516 break;
517 }
518 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
519 break;
520
521 case UART_IIR:
522 {
523 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
524 *buf = 0;
525
526 mutex_lock(&mdev_state->rxtx_lock);
527 /* Interrupt priority 1: Parity, overrun, framing or break */
528 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
529 *buf |= UART_IIR_RLSI;
530
531 /* Interrupt priority 2: Fifo trigger level reached */
532 if ((ier & UART_IER_RDI) &&
533 (mdev_state->s[index].rxtx.count ==
534 mdev_state->s[index].intr_trigger_level))
535 *buf |= UART_IIR_RDI;
536
537 /* Interrupt priotiry 3: transmitter holding register empty */
538 if ((ier & UART_IER_THRI) &&
539 (mdev_state->s[index].rxtx.head ==
540 mdev_state->s[index].rxtx.tail))
541 *buf |= UART_IIR_THRI;
542
543 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
544 if ((ier & UART_IER_MSI) &&
545 (mdev_state->s[index].uart_reg[UART_MCR] &
546 (UART_MCR_RTS | UART_MCR_DTR)))
547 *buf |= UART_IIR_MSI;
548
549 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
550 if (*buf == 0)
551 *buf = UART_IIR_NO_INT;
552
553 /* set bit 6 & 7 to be 16550 compatible */
554 *buf |= 0xC0;
555 mutex_unlock(&mdev_state->rxtx_lock);
556 }
557 break;
558
559 case UART_LCR:
560 case UART_MCR:
561 *buf = mdev_state->s[index].uart_reg[offset];
562 break;
563
564 case UART_LSR:
565 {
566 u8 lsr = 0;
567
568 mutex_lock(&mdev_state->rxtx_lock);
569 /* atleast one char in FIFO */
570 if (mdev_state->s[index].rxtx.head !=
571 mdev_state->s[index].rxtx.tail)
572 lsr |= UART_LSR_DR;
573
574 /* if FIFO overrun */
575 if (mdev_state->s[index].overrun)
576 lsr |= UART_LSR_OE;
577
578 /* transmit FIFO empty and tramsitter empty */
579 if (mdev_state->s[index].rxtx.head ==
580 mdev_state->s[index].rxtx.tail)
581 lsr |= UART_LSR_TEMT | UART_LSR_THRE;
582
583 mutex_unlock(&mdev_state->rxtx_lock);
584 *buf = lsr;
585 break;
586 }
587 case UART_MSR:
588 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
589
590 mutex_lock(&mdev_state->rxtx_lock);
591 /* if AFE is 1 and FIFO have space, set CTS bit */
592 if (mdev_state->s[index].uart_reg[UART_MCR] &
593 UART_MCR_AFE) {
594 if (mdev_state->s[index].rxtx.count <
595 mdev_state->s[index].max_fifo_size)
596 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
597 } else
598 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
599 mutex_unlock(&mdev_state->rxtx_lock);
600
601 break;
602
603 case UART_SCR:
604 *buf = mdev_state->s[index].uart_reg[offset];
605 break;
606
607 default:
608 break;
609 }
610}
611
612static void mdev_read_base(struct mdev_state *mdev_state)
613{
614 int index, pos;
615 u32 start_lo, start_hi;
616 u32 mem_type;
617
618 pos = PCI_BASE_ADDRESS_0;
619
620 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
621
622 if (!mdev_state->region_info[index].size)
623 continue;
624
625 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
626 PCI_BASE_ADDRESS_MEM_MASK;
627 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
628 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
629
630 switch (mem_type) {
631 case PCI_BASE_ADDRESS_MEM_TYPE_64:
632 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
633 pos += 4;
634 break;
635 case PCI_BASE_ADDRESS_MEM_TYPE_32:
636 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
637 /* 1M mem BAR treated as 32-bit BAR */
638 default:
639 /* mem unknown type treated as 32-bit BAR */
640 start_hi = 0;
641 break;
642 }
643 pos += 4;
644 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
645 start_lo;
646 }
647}
648
649static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
650 loff_t pos, bool is_write)
651{
652 struct mdev_state *mdev_state;
653 unsigned int index;
654 loff_t offset;
655 int ret = 0;
656
657 if (!mdev || !buf)
658 return -EINVAL;
659
660 mdev_state = mdev_get_drvdata(mdev);
661 if (!mdev_state) {
662 pr_err("%s mdev_state not found\n", __func__);
663 return -EINVAL;
664 }
665
666 mutex_lock(&mdev_state->ops_lock);
667
668 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
669 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
670 switch (index) {
671 case VFIO_PCI_CONFIG_REGION_INDEX:
672
673#if defined(DEBUG)
674 pr_info("%s: PCI config space %s at offset 0x%llx\n",
675 __func__, is_write ? "write" : "read", offset);
676#endif
677 if (is_write) {
678 dump_buffer(buf, count);
679 handle_pci_cfg_write(mdev_state, offset, buf, count);
680 } else {
681 memcpy(buf, (mdev_state->vconfig + offset), count);
682 dump_buffer(buf, count);
683 }
684
685 break;
686
687 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
688 if (!mdev_state->region_info[index].start)
689 mdev_read_base(mdev_state);
690
691 if (is_write) {
692 dump_buffer(buf, count);
693
694#if defined(DEBUG_REGS)
695 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
696 __func__, index, offset, wr_reg[offset],
697 (u8)*buf, mdev_state->s[index].dlab);
698#endif
699 handle_bar_write(index, mdev_state, offset, buf, count);
700 } else {
701 handle_bar_read(index, mdev_state, offset, buf, count);
702 dump_buffer(buf, count);
703
704#if defined(DEBUG_REGS)
705 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
706 __func__, index, offset, rd_reg[offset],
707 (u8)*buf, mdev_state->s[index].dlab);
708#endif
709 }
710 break;
711
712 default:
713 ret = -1;
714 goto accessfailed;
715 }
716
717 ret = count;
718
719
720accessfailed:
721 mutex_unlock(&mdev_state->ops_lock);
722
723 return ret;
724}
725
726int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
727{
728 struct mdev_state *mdev_state;
729 char name[MTTY_STRING_LEN];
730 int nr_ports = 0, i;
731
732 if (!mdev)
733 return -EINVAL;
734
735 for (i = 0; i < 2; i++) {
736 snprintf(name, MTTY_STRING_LEN, "%s-%d",
737 dev_driver_string(mdev->parent->dev), i + 1);
738 if (!strcmp(kobj->name, name)) {
739 nr_ports = i + 1;
740 break;
741 }
742 }
743
744 if (!nr_ports)
745 return -EINVAL;
746
747 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
748 if (mdev_state == NULL)
749 return -ENOMEM;
750
751 mdev_state->nr_ports = nr_ports;
752 mdev_state->irq_index = -1;
753 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
754 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
755 mutex_init(&mdev_state->rxtx_lock);
756 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
757
758 if (mdev_state->vconfig == NULL) {
759 kfree(mdev_state);
760 return -ENOMEM;
761 }
762
763 mutex_init(&mdev_state->ops_lock);
764 mdev_state->mdev = mdev;
765 mdev_set_drvdata(mdev, mdev_state);
766
767 mtty_create_config_space(mdev_state);
768
769 mutex_lock(&mdev_list_lock);
770 list_add(&mdev_state->next, &mdev_devices_list);
771 mutex_unlock(&mdev_list_lock);
772
773 return 0;
774}
775
776int mtty_remove(struct mdev_device *mdev)
777{
778 struct mdev_state *mds, *tmp_mds;
779 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
780 int ret = -EINVAL;
781
782 mutex_lock(&mdev_list_lock);
783 list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
784 if (mdev_state == mds) {
785 list_del(&mdev_state->next);
786 mdev_set_drvdata(mdev, NULL);
787 kfree(mdev_state->vconfig);
788 kfree(mdev_state);
789 ret = 0;
790 break;
791 }
792 }
793 mutex_unlock(&mdev_list_lock);
794
795 return ret;
796}
797
798int mtty_reset(struct mdev_device *mdev)
799{
800 struct mdev_state *mdev_state;
801
802 if (!mdev)
803 return -EINVAL;
804
805 mdev_state = mdev_get_drvdata(mdev);
806 if (!mdev_state)
807 return -EINVAL;
808
809 pr_info("%s: called\n", __func__);
810
811 return 0;
812}
813
814ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
815 loff_t *ppos)
816{
817 unsigned int done = 0;
818 int ret;
819
820 while (count) {
821 size_t filled;
822
823 if (count >= 4 && !(*ppos % 4)) {
824 u32 val;
825
826 ret = mdev_access(mdev, (char *)&val, sizeof(val),
827 *ppos, false);
828 if (ret <= 0)
829 goto read_err;
830
831 if (copy_to_user(buf, &val, sizeof(val)))
832 goto read_err;
833
834 filled = 4;
835 } else if (count >= 2 && !(*ppos % 2)) {
836 u16 val;
837
838 ret = mdev_access(mdev, (char *)&val, sizeof(val),
839 *ppos, false);
840 if (ret <= 0)
841 goto read_err;
842
843 if (copy_to_user(buf, &val, sizeof(val)))
844 goto read_err;
845
846 filled = 2;
847 } else {
848 u8 val;
849
850 ret = mdev_access(mdev, (char *)&val, sizeof(val),
851 *ppos, false);
852 if (ret <= 0)
853 goto read_err;
854
855 if (copy_to_user(buf, &val, sizeof(val)))
856 goto read_err;
857
858 filled = 1;
859 }
860
861 count -= filled;
862 done += filled;
863 *ppos += filled;
864 buf += filled;
865 }
866
867 return done;
868
869read_err:
870 return -EFAULT;
871}
872
873ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
874 size_t count, loff_t *ppos)
875{
876 unsigned int done = 0;
877 int ret;
878
879 while (count) {
880 size_t filled;
881
882 if (count >= 4 && !(*ppos % 4)) {
883 u32 val;
884
885 if (copy_from_user(&val, buf, sizeof(val)))
886 goto write_err;
887
888 ret = mdev_access(mdev, (char *)&val, sizeof(val),
889 *ppos, true);
890 if (ret <= 0)
891 goto write_err;
892
893 filled = 4;
894 } else if (count >= 2 && !(*ppos % 2)) {
895 u16 val;
896
897 if (copy_from_user(&val, buf, sizeof(val)))
898 goto write_err;
899
900 ret = mdev_access(mdev, (char *)&val, sizeof(val),
901 *ppos, true);
902 if (ret <= 0)
903 goto write_err;
904
905 filled = 2;
906 } else {
907 u8 val;
908
909 if (copy_from_user(&val, buf, sizeof(val)))
910 goto write_err;
911
912 ret = mdev_access(mdev, (char *)&val, sizeof(val),
913 *ppos, true);
914 if (ret <= 0)
915 goto write_err;
916
917 filled = 1;
918 }
919 count -= filled;
920 done += filled;
921 *ppos += filled;
922 buf += filled;
923 }
924
925 return done;
926write_err:
927 return -EFAULT;
928}
929
930static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
931 unsigned int index, unsigned int start,
932 unsigned int count, void *data)
933{
934 int ret = 0;
935 struct mdev_state *mdev_state;
936
937 if (!mdev)
938 return -EINVAL;
939
940 mdev_state = mdev_get_drvdata(mdev);
941 if (!mdev_state)
942 return -EINVAL;
943
944 mutex_lock(&mdev_state->ops_lock);
945 switch (index) {
946 case VFIO_PCI_INTX_IRQ_INDEX:
947 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
948 case VFIO_IRQ_SET_ACTION_MASK:
949 case VFIO_IRQ_SET_ACTION_UNMASK:
950 break;
951 case VFIO_IRQ_SET_ACTION_TRIGGER:
952 {
953 if (flags & VFIO_IRQ_SET_DATA_NONE) {
954 pr_info("%s: disable INTx\n", __func__);
955 if (mdev_state->intx_evtfd)
956 eventfd_ctx_put(mdev_state->intx_evtfd);
957 break;
958 }
959
960 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
961 int fd = *(int *)data;
962
963 if (fd > 0) {
964 struct eventfd_ctx *evt;
965
966 evt = eventfd_ctx_fdget(fd);
967 if (IS_ERR(evt)) {
968 ret = PTR_ERR(evt);
969 break;
970 }
971 mdev_state->intx_evtfd = evt;
972 mdev_state->irq_fd = fd;
973 mdev_state->irq_index = index;
974 break;
975 }
976 }
977 break;
978 }
979 }
980 break;
981 case VFIO_PCI_MSI_IRQ_INDEX:
982 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
983 case VFIO_IRQ_SET_ACTION_MASK:
984 case VFIO_IRQ_SET_ACTION_UNMASK:
985 break;
986 case VFIO_IRQ_SET_ACTION_TRIGGER:
987 if (flags & VFIO_IRQ_SET_DATA_NONE) {
988 if (mdev_state->msi_evtfd)
989 eventfd_ctx_put(mdev_state->msi_evtfd);
990 pr_info("%s: disable MSI\n", __func__);
991 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
992 break;
993 }
994 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
995 int fd = *(int *)data;
996 struct eventfd_ctx *evt;
997
998 if (fd <= 0)
999 break;
1000
1001 if (mdev_state->msi_evtfd)
1002 break;
1003
1004 evt = eventfd_ctx_fdget(fd);
1005 if (IS_ERR(evt)) {
1006 ret = PTR_ERR(evt);
1007 break;
1008 }
1009 mdev_state->msi_evtfd = evt;
1010 mdev_state->irq_fd = fd;
1011 mdev_state->irq_index = index;
1012 }
1013 break;
1014 }
1015 break;
1016 case VFIO_PCI_MSIX_IRQ_INDEX:
1017 pr_info("%s: MSIX_IRQ\n", __func__);
1018 break;
1019 case VFIO_PCI_ERR_IRQ_INDEX:
1020 pr_info("%s: ERR_IRQ\n", __func__);
1021 break;
1022 case VFIO_PCI_REQ_IRQ_INDEX:
1023 pr_info("%s: REQ_IRQ\n", __func__);
1024 break;
1025 }
1026
1027 mutex_unlock(&mdev_state->ops_lock);
1028 return ret;
1029}
1030
1031static int mtty_trigger_interrupt(uuid_le uuid)
1032{
1033 int ret = -1;
1034 struct mdev_state *mdev_state;
1035
1036 mdev_state = find_mdev_state_by_uuid(uuid);
1037
1038 if (!mdev_state) {
1039 pr_info("%s: mdev not found\n", __func__);
1040 return -EINVAL;
1041 }
1042
1043 if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1044 (!mdev_state->msi_evtfd))
1045 return -EINVAL;
1046 else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1047 (!mdev_state->intx_evtfd)) {
1048 pr_info("%s: Intr eventfd not found\n", __func__);
1049 return -EINVAL;
1050 }
1051
1052 if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1053 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1054 else
1055 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1056
1057#if defined(DEBUG_INTR)
1058 pr_info("Intx triggered\n");
1059#endif
1060 if (ret != 1)
1061 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1062
1063 return ret;
1064}
1065
1066int mtty_get_region_info(struct mdev_device *mdev,
1067 struct vfio_region_info *region_info,
1068 u16 *cap_type_id, void **cap_type)
1069{
1070 unsigned int size = 0;
1071 struct mdev_state *mdev_state;
1072 int bar_index;
1073
1074 if (!mdev)
1075 return -EINVAL;
1076
1077 mdev_state = mdev_get_drvdata(mdev);
1078 if (!mdev_state)
1079 return -EINVAL;
1080
1081 mutex_lock(&mdev_state->ops_lock);
1082 bar_index = region_info->index;
1083
1084 switch (bar_index) {
1085 case VFIO_PCI_CONFIG_REGION_INDEX:
1086 size = MTTY_CONFIG_SPACE_SIZE;
1087 break;
1088 case VFIO_PCI_BAR0_REGION_INDEX:
1089 size = MTTY_IO_BAR_SIZE;
1090 break;
1091 case VFIO_PCI_BAR1_REGION_INDEX:
1092 if (mdev_state->nr_ports == 2)
1093 size = MTTY_IO_BAR_SIZE;
1094 break;
1095 default:
1096 size = 0;
1097 break;
1098 }
1099
1100 mdev_state->region_info[bar_index].size = size;
1101 mdev_state->region_info[bar_index].vfio_offset =
1102 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1103
1104 region_info->size = size;
1105 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1106 region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1107 VFIO_REGION_INFO_FLAG_WRITE;
1108 mutex_unlock(&mdev_state->ops_lock);
1109 return 0;
1110}
1111
1112int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
1113{
1114 switch (irq_info->index) {
1115 case VFIO_PCI_INTX_IRQ_INDEX:
1116 case VFIO_PCI_MSI_IRQ_INDEX:
1117 case VFIO_PCI_REQ_IRQ_INDEX:
1118 break;
1119
1120 default:
1121 return -EINVAL;
1122 }
1123
1124 irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1125 irq_info->count = 1;
1126
1127 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1128 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1129 VFIO_IRQ_INFO_AUTOMASKED);
1130 else
1131 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1132
1133 return 0;
1134}
1135
1136int mtty_get_device_info(struct mdev_device *mdev,
1137 struct vfio_device_info *dev_info)
1138{
1139 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1140 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1141 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1142
1143 return 0;
1144}
1145
1146static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1147 unsigned long arg)
1148{
1149 int ret = 0;
1150 unsigned long minsz;
1151 struct mdev_state *mdev_state;
1152
1153 if (!mdev)
1154 return -EINVAL;
1155
1156 mdev_state = mdev_get_drvdata(mdev);
1157 if (!mdev_state)
1158 return -ENODEV;
1159
1160 switch (cmd) {
1161 case VFIO_DEVICE_GET_INFO:
1162 {
1163 struct vfio_device_info info;
1164
1165 minsz = offsetofend(struct vfio_device_info, num_irqs);
1166
1167 if (copy_from_user(&info, (void __user *)arg, minsz))
1168 return -EFAULT;
1169
1170 if (info.argsz < minsz)
1171 return -EINVAL;
1172
1173 ret = mtty_get_device_info(mdev, &info);
1174 if (ret)
1175 return ret;
1176
1177 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1178
1179 return copy_to_user((void __user *)arg, &info, minsz);
1180 }
1181 case VFIO_DEVICE_GET_REGION_INFO:
1182 {
1183 struct vfio_region_info info;
1184 u16 cap_type_id = 0;
1185 void *cap_type = NULL;
1186
1187 minsz = offsetofend(struct vfio_region_info, offset);
1188
1189 if (copy_from_user(&info, (void __user *)arg, minsz))
1190 return -EFAULT;
1191
1192 if (info.argsz < minsz)
1193 return -EINVAL;
1194
1195 ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1196 &cap_type);
1197 if (ret)
1198 return ret;
1199
1200 return copy_to_user((void __user *)arg, &info, minsz);
1201 }
1202
1203 case VFIO_DEVICE_GET_IRQ_INFO:
1204 {
1205 struct vfio_irq_info info;
1206
1207 minsz = offsetofend(struct vfio_irq_info, count);
1208
1209 if (copy_from_user(&info, (void __user *)arg, minsz))
1210 return -EFAULT;
1211
1212 if ((info.argsz < minsz) ||
1213 (info.index >= mdev_state->dev_info.num_irqs))
1214 return -EINVAL;
1215
1216 ret = mtty_get_irq_info(mdev, &info);
1217 if (ret)
1218 return ret;
1219
1220 if (info.count == -1)
1221 return -EINVAL;
1222
1223 return copy_to_user((void __user *)arg, &info, minsz);
1224 }
1225 case VFIO_DEVICE_SET_IRQS:
1226 {
1227 struct vfio_irq_set hdr;
1228 u8 *data = NULL, *ptr = NULL;
1229 size_t data_size = 0;
1230
1231 minsz = offsetofend(struct vfio_irq_set, count);
1232
1233 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1234 return -EFAULT;
1235
1236 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1237 mdev_state->dev_info.num_irqs,
1238 VFIO_PCI_NUM_IRQS,
1239 &data_size);
1240 if (ret)
1241 return ret;
1242
1243 if (data_size) {
1244 ptr = data = memdup_user((void __user *)(arg + minsz),
1245 data_size);
1246 if (IS_ERR(data))
1247 return PTR_ERR(data);
1248 }
1249
1250 ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1251 hdr.count, data);
1252
1253 kfree(ptr);
1254 return ret;
1255 }
1256 case VFIO_DEVICE_RESET:
1257 return mtty_reset(mdev);
1258 }
1259 return -ENOTTY;
1260}
1261
1262int mtty_open(struct mdev_device *mdev)
1263{
1264 pr_info("%s\n", __func__);
1265 return 0;
1266}
1267
1268void mtty_close(struct mdev_device *mdev)
1269{
1270 pr_info("%s\n", __func__);
1271}
1272
1273static ssize_t
1274sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1275 char *buf)
1276{
1277 return sprintf(buf, "This is phy device\n");
1278}
1279
1280static DEVICE_ATTR_RO(sample_mtty_dev);
1281
1282static struct attribute *mtty_dev_attrs[] = {
1283 &dev_attr_sample_mtty_dev.attr,
1284 NULL,
1285};
1286
1287static const struct attribute_group mtty_dev_group = {
1288 .name = "mtty_dev",
1289 .attrs = mtty_dev_attrs,
1290};
1291
1292const struct attribute_group *mtty_dev_groups[] = {
1293 &mtty_dev_group,
1294 NULL,
1295};
1296
1297static ssize_t
1298sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1299 char *buf)
1300{
1301 struct mdev_device *mdev = to_mdev_device(dev);
1302
1303 if (mdev)
1304 return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
1305
1306 return sprintf(buf, "\n");
1307}
1308
1309static DEVICE_ATTR_RO(sample_mdev_dev);
1310
1311static struct attribute *mdev_dev_attrs[] = {
1312 &dev_attr_sample_mdev_dev.attr,
1313 NULL,
1314};
1315
1316static const struct attribute_group mdev_dev_group = {
1317 .name = "vendor",
1318 .attrs = mdev_dev_attrs,
1319};
1320
1321const struct attribute_group *mdev_dev_groups[] = {
1322 &mdev_dev_group,
1323 NULL,
1324};
1325
1326static ssize_t
1327name_show(struct kobject *kobj, struct device *dev, char *buf)
1328{
1329 char name[MTTY_STRING_LEN];
1330 int i;
1331 const char *name_str[2] = {"Single port serial", "Dual port serial"};
1332
1333 for (i = 0; i < 2; i++) {
1334 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1335 dev_driver_string(dev), i + 1);
1336 if (!strcmp(kobj->name, name))
1337 return sprintf(buf, "%s\n", name_str[i]);
1338 }
1339
1340 return -EINVAL;
1341}
1342
1343MDEV_TYPE_ATTR_RO(name);
1344
1345static ssize_t
1346available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1347{
1348 char name[MTTY_STRING_LEN];
1349 int i;
1350 struct mdev_state *mds;
1351 int ports = 0, used = 0;
1352
1353 for (i = 0; i < 2; i++) {
1354 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1355 dev_driver_string(dev), i + 1);
1356 if (!strcmp(kobj->name, name)) {
1357 ports = i + 1;
1358 break;
1359 }
1360 }
1361
1362 if (!ports)
1363 return -EINVAL;
1364
1365 list_for_each_entry(mds, &mdev_devices_list, next)
1366 used += mds->nr_ports;
1367
1368 return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1369}
1370
1371MDEV_TYPE_ATTR_RO(available_instances);
1372
1373
1374static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1375 char *buf)
1376{
1377 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1378}
1379
1380MDEV_TYPE_ATTR_RO(device_api);
1381
1382static struct attribute *mdev_types_attrs[] = {
1383 &mdev_type_attr_name.attr,
1384 &mdev_type_attr_device_api.attr,
1385 &mdev_type_attr_available_instances.attr,
1386 NULL,
1387};
1388
1389static struct attribute_group mdev_type_group1 = {
1390 .name = "1",
1391 .attrs = mdev_types_attrs,
1392};
1393
1394static struct attribute_group mdev_type_group2 = {
1395 .name = "2",
1396 .attrs = mdev_types_attrs,
1397};
1398
1399struct attribute_group *mdev_type_groups[] = {
1400 &mdev_type_group1,
1401 &mdev_type_group2,
1402 NULL,
1403};
1404
1405struct parent_ops mdev_fops = {
1406 .owner = THIS_MODULE,
1407 .dev_attr_groups = mtty_dev_groups,
1408 .mdev_attr_groups = mdev_dev_groups,
1409 .supported_type_groups = mdev_type_groups,
1410 .create = mtty_create,
1411 .remove = mtty_remove,
1412 .open = mtty_open,
1413 .release = mtty_close,
1414 .read = mtty_read,
1415 .write = mtty_write,
1416 .ioctl = mtty_ioctl,
1417};
1418
1419static void mtty_device_release(struct device *dev)
1420{
1421 dev_dbg(dev, "mtty: released\n");
1422}
1423
1424static int __init mtty_dev_init(void)
1425{
1426 int ret = 0;
1427
1428 pr_info("mtty_dev: %s\n", __func__);
1429
1430 memset(&mtty_dev, 0, sizeof(mtty_dev));
1431
1432 idr_init(&mtty_dev.vd_idr);
1433
1434 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
1435
1436 if (ret < 0) {
1437 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1438 return ret;
1439 }
1440
1441 cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1442 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
1443
1444 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1445
1446 mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1447
1448 if (IS_ERR(mtty_dev.vd_class)) {
1449 pr_err("Error: failed to register mtty_dev class\n");
1450 goto failed1;
1451 }
1452
1453 mtty_dev.dev.class = mtty_dev.vd_class;
1454 mtty_dev.dev.release = mtty_device_release;
1455 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1456
1457 ret = device_register(&mtty_dev.dev);
1458 if (ret)
1459 goto failed2;
1460
1461 if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
1462 goto failed3;
1463
1464 mutex_init(&mdev_list_lock);
1465 INIT_LIST_HEAD(&mdev_devices_list);
1466
1467 goto all_done;
1468
1469failed3:
1470
1471 device_unregister(&mtty_dev.dev);
1472failed2:
1473 class_destroy(mtty_dev.vd_class);
1474
1475failed1:
1476 cdev_del(&mtty_dev.vd_cdev);
1477 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1478
1479all_done:
1480 return ret;
1481}
1482
1483static void __exit mtty_dev_exit(void)
1484{
1485 mtty_dev.dev.bus = NULL;
1486 mdev_unregister_device(&mtty_dev.dev);
1487
1488 device_unregister(&mtty_dev.dev);
1489 idr_destroy(&mtty_dev.vd_idr);
1490 cdev_del(&mtty_dev.vd_cdev);
1491 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1492 class_destroy(mtty_dev.vd_class);
1493 mtty_dev.vd_class = NULL;
1494 pr_info("mtty_dev: Unloaded!\n");
1495}
1496
1497module_init(mtty_dev_init)
1498module_exit(mtty_dev_exit)
1499
1500MODULE_LICENSE("GPL v2");
1501MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1502MODULE_VERSION(VERSION_STRING);
1503MODULE_AUTHOR(DRIVER_AUTHOR);